darwin_kperf_events/lib.rs
1// Auto-generated by `darwin-kperf-codegen`. Do not edit.
2
3//! Hardware performance counter events for Apple Silicon.
4//!
5//! This crate is auto-generated by `darwin-kperf-codegen` from the PMC
6//! database plists in `/usr/share/kpep/`. **Do not edit by hand.**
7//!
8//! # Types
9//!
10//! - [`Event`]: chip-agnostic event enum covering M1 through M5. Use [`Event::on`] to resolve an
11//! event for a specific [`Cpu`], which checks availability and returns the chip-specific
12//! metadata.
13//! - [`Cpu`]: Apple Silicon chip generation, identified by the `kpep_db.name` field at runtime.
14//! - [`EventInfo`]: trait providing event metadata (name, description, counter mask, etc.),
15//! implemented by per-chip enums ([`M1Event`], [`M2Event`], ...) and by [`ResolvedEvent`].
16//! - [`ResolvedEvent`]: an [`Event`] resolved for a specific [`Cpu`], returned by [`Event::on`].
17#![no_std]
18#![expect(
19 clippy::match_same_arms,
20 clippy::too_many_lines,
21 clippy::unnecessary_wraps,
22 clippy::decimal_literal_representation,
23 clippy::unseparated_literal_suffix
24)]
25mod m1;
26mod m2;
27mod m3;
28mod m4;
29mod m5;
30pub use m1::M1Event;
31pub use m2::M2Event;
32pub use m3::M3Event;
33pub use m4::M4Event;
34pub use m5::M5Event;
35/// Apple Silicon chip generation, as identified by `kpep_db.name`.
36#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
37#[non_exhaustive]
38pub enum Cpu {
39 /// Apple A14/M1.
40 M1,
41 /// Apple A15.
42 M2,
43 /// Apple A16.
44 M3,
45 /// Apple silicon.
46 M4,
47 /// Apple silicon.
48 M5,
49}
50impl Cpu {
51 /// Matches the `name` field from a `kpep_db` to a known generation.
52 ///
53 /// Returns `None` for unrecognized CPU names.
54 #[must_use]
55 pub const fn from_db_name(name: &str) -> Option<Self> {
56 match name.as_bytes() {
57 b"a14" => Some(Self::M1),
58 b"a15" => Some(Self::M2),
59 b"a16" | b"as1" | b"as2" | b"as3" => Some(Self::M3),
60 b"as4" | b"as4-1" | b"as4-2" => Some(Self::M4),
61 b"as5" | b"as5-2" => Some(Self::M5),
62 _ => None,
63 }
64 }
65
66 /// Marketing name from the PMC database (e.g. `"Apple A14/M1"`).
67 #[must_use]
68 pub const fn marketing_name(self) -> &'static str {
69 match self {
70 Self::M1 => "Apple A14/M1",
71 Self::M2 => "Apple A15",
72 Self::M3 => "Apple A16",
73 Self::M4 => "Apple silicon",
74 Self::M5 => "Apple silicon",
75 }
76 }
77
78 /// Bitmask of fixed counter registers.
79 #[must_use]
80 pub const fn fixed_counters(self) -> u32 {
81 match self {
82 Self::M1 => 3u32,
83 Self::M2 => 3u32,
84 Self::M3 => 3u32,
85 Self::M4 => 3u32,
86 Self::M5 => 3u32,
87 }
88 }
89
90 /// Bitmask of configurable counter registers.
91 #[must_use]
92 pub const fn config_counters(self) -> u32 {
93 match self {
94 Self::M1 => 1020u32,
95 Self::M2 => 1020u32,
96 Self::M3 => 1020u32,
97 Self::M4 => 1020u32,
98 Self::M5 => 1020u32,
99 }
100 }
101
102 /// Bitmask of power counter registers.
103 #[must_use]
104 pub const fn power_counters(self) -> u32 {
105 match self {
106 Self::M1 => 224u32,
107 Self::M2 => 224u32,
108 Self::M3 => 224u32,
109 Self::M4 => 224u32,
110 Self::M5 => 224u32,
111 }
112 }
113}
114/// Metadata for a hardware performance counter event on a specific chip.
115pub trait EventInfo {
116 /// The kpep event name string (e.g. `"INST_ALL"`).
117 fn name(&self) -> &'static str;
118 /// The kpep event name as a NUL-terminated C string (e.g. `c"INST_ALL"`).
119 fn c_name(&self) -> &'static core::ffi::CStr;
120 /// Human-readable description from the PMC database.
121 fn description(&self) -> &'static str;
122 /// Bitmask of counters this event can be programmed on.
123 fn counters_mask(&self) -> Option<u32>;
124 /// Event number (selector value written to the PMC config register).
125 fn number(&self) -> Option<u16>;
126 /// Fixed counter index, or `None` for configurable events.
127 fn fixed_counter(&self) -> Option<u8>;
128 /// Fallback event name for fixed counters.
129 fn fallback(&self) -> Option<&'static str>;
130 /// Human-readable alias names (e.g. `"Cycles"`, `"Instructions"`).
131 fn aliases(&self) -> &'static [&'static str];
132}
133/// A chip-specific event, erasing which chip it belongs to.
134///
135/// Each variant wraps a per-chip event enum that implements [`EventInfo`].
136#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
137enum AnyEvent {
138 /// Event on M1.
139 M1(M1Event),
140 /// Event on M2.
141 M2(M2Event),
142 /// Event on M3.
143 M3(M3Event),
144 /// Event on M4.
145 M4(M4Event),
146 /// Event on M5.
147 M5(M5Event),
148}
149impl EventInfo for AnyEvent {
150 #[inline]
151 fn name(&self) -> &'static str {
152 match self {
153 Self::M1(event) => event.name(),
154 Self::M2(event) => event.name(),
155 Self::M3(event) => event.name(),
156 Self::M4(event) => event.name(),
157 Self::M5(event) => event.name(),
158 }
159 }
160
161 #[inline]
162 fn c_name(&self) -> &'static core::ffi::CStr {
163 match self {
164 Self::M1(event) => event.c_name(),
165 Self::M2(event) => event.c_name(),
166 Self::M3(event) => event.c_name(),
167 Self::M4(event) => event.c_name(),
168 Self::M5(event) => event.c_name(),
169 }
170 }
171
172 #[inline]
173 fn description(&self) -> &'static str {
174 match self {
175 Self::M1(event) => event.description(),
176 Self::M2(event) => event.description(),
177 Self::M3(event) => event.description(),
178 Self::M4(event) => event.description(),
179 Self::M5(event) => event.description(),
180 }
181 }
182
183 #[inline]
184 fn counters_mask(&self) -> Option<u32> {
185 match self {
186 Self::M1(event) => event.counters_mask(),
187 Self::M2(event) => event.counters_mask(),
188 Self::M3(event) => event.counters_mask(),
189 Self::M4(event) => event.counters_mask(),
190 Self::M5(event) => event.counters_mask(),
191 }
192 }
193
194 #[inline]
195 fn number(&self) -> Option<u16> {
196 match self {
197 Self::M1(event) => event.number(),
198 Self::M2(event) => event.number(),
199 Self::M3(event) => event.number(),
200 Self::M4(event) => event.number(),
201 Self::M5(event) => event.number(),
202 }
203 }
204
205 #[inline]
206 fn fixed_counter(&self) -> Option<u8> {
207 match self {
208 Self::M1(event) => event.fixed_counter(),
209 Self::M2(event) => event.fixed_counter(),
210 Self::M3(event) => event.fixed_counter(),
211 Self::M4(event) => event.fixed_counter(),
212 Self::M5(event) => event.fixed_counter(),
213 }
214 }
215
216 #[inline]
217 fn fallback(&self) -> Option<&'static str> {
218 match self {
219 Self::M1(event) => event.fallback(),
220 Self::M2(event) => event.fallback(),
221 Self::M3(event) => event.fallback(),
222 Self::M4(event) => event.fallback(),
223 Self::M5(event) => event.fallback(),
224 }
225 }
226
227 #[inline]
228 fn aliases(&self) -> &'static [&'static str] {
229 match self {
230 Self::M1(event) => event.aliases(),
231 Self::M2(event) => event.aliases(),
232 Self::M3(event) => event.aliases(),
233 Self::M4(event) => event.aliases(),
234 Self::M5(event) => event.aliases(),
235 }
236 }
237}
238/// A resolved view of an [`Event`] on a specific [`Cpu`].
239///
240/// Returned by [`Event::on`]. Wraps a chip-specific event and forwards
241/// [`EventInfo`] to the chip-specific implementation.
242#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
243pub struct ResolvedEvent(AnyEvent);
244impl EventInfo for ResolvedEvent {
245 #[inline]
246 fn name(&self) -> &'static str {
247 self.0.name()
248 }
249
250 #[inline]
251 fn c_name(&self) -> &'static core::ffi::CStr {
252 self.0.c_name()
253 }
254
255 #[inline]
256 fn description(&self) -> &'static str {
257 self.0.description()
258 }
259
260 #[inline]
261 fn counters_mask(&self) -> Option<u32> {
262 self.0.counters_mask()
263 }
264
265 #[inline]
266 fn number(&self) -> Option<u16> {
267 self.0.number()
268 }
269
270 #[inline]
271 fn fixed_counter(&self) -> Option<u8> {
272 self.0.fixed_counter()
273 }
274
275 #[inline]
276 fn fallback(&self) -> Option<&'static str> {
277 self.0.fallback()
278 }
279
280 #[inline]
281 fn aliases(&self) -> &'static [&'static str] {
282 self.0.aliases()
283 }
284}
285
286/// A hardware performance counter event from Apple's kpep database.
287///
288/// Covers Apple Silicon generations M1 through M5.
289/// Each variant maps to a named event in the PMC database; the event's
290/// availability on a specific chip is noted in the variant doc comment.
291#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
292#[non_exhaustive]
293pub enum Event {
294 /// Mispredicted or not predicted branch Speculatively executed.
295 ///
296 /// M4, M5 only.
297 ArmBrMisPred,
298 /// Predictable branch Speculatively executed.
299 ///
300 /// M4, M5 only.
301 ArmBrPred,
302 /// Level 1 data cache access.
303 ///
304 /// M4, M5 only.
305 ArmL1DCache,
306 /// Level 1 data cache long-latency read miss.
307 ///
308 /// M4, M5 only.
309 ArmL1DCacheLmissRd,
310 /// Attributable Level 1 data cache access, read.
311 ///
312 /// M4, M5 only.
313 ArmL1DCacheRd,
314 /// Level 1 data cache refill.
315 ///
316 /// M4, M5 only.
317 ArmL1DCacheRefill,
318 /// No operation sent for execution.
319 ///
320 /// M4, M5 only.
321 ArmStall,
322 /// No operation issued due to the backend.
323 ///
324 /// M4, M5 only.
325 ArmStallBackend,
326 /// No operation issued due to the frontend.
327 ///
328 /// M4, M5 only.
329 ArmStallFrontend,
330 /// No operation sent for execution on a slot.
331 ///
332 /// M4, M5 only.
333 ArmStallSlot,
334 /// No operation sent for execution on a Slot due to the backend.
335 ///
336 /// M4, M5 only.
337 ArmStallSlotBackend,
338 /// No operation sent for execution on a Slot due to the frontend.
339 ///
340 /// M4, M5 only.
341 ArmStallSlotFrontend,
342 /// Atomic or exclusive instruction failed due to contention (for exclusives,
343 /// incorrectly undercounts for exclusives when the cache line is initially
344 /// found in shared state, however counts correctly for atomics).
345 ///
346 /// All generations.
347 AtomicOrExclusiveFail,
348 /// Atomic or exclusive instruction successfully completed (for exclusives,
349 /// incorrectly undercounts for exclusives when the cache line is initially
350 /// found in shared state, however counts correctly for atomics).
351 ///
352 /// All generations.
353 AtomicOrExclusiveSucc,
354 /// Retired indirect call instructions mispredicted.
355 ///
356 /// All generations.
357 BranchCallIndirMispredNonspec,
358 /// Retired conditional branch instructions that mispredicted.
359 ///
360 /// All generations.
361 BranchCondMispredNonspec,
362 /// Retired indirect branch instructions including calls and returns that
363 /// mispredicted.
364 ///
365 /// All generations.
366 BranchIndirMispredNonspec,
367 /// Instruction architecturally executed, mispredicted branch.
368 ///
369 /// All generations.
370 BranchMispredNonspec,
371 /// Retired return instructions that mispredicted.
372 ///
373 /// All generations.
374 BranchRetIndirMispredNonspec,
375 /// Cycles while the core was active.
376 ///
377 /// All generations.
378 CoreActiveCycle,
379 /// Fetch Unit internal restarts for any reason. Does not include branch
380 /// mispredicts.
381 ///
382 /// All generations.
383 FetchRestart,
384 /// Fixed counter.
385 ///
386 /// All generations.
387 FixedCycles,
388 /// Fixed counter (fallback: `INST_ALL`).
389 ///
390 /// All generations.
391 FixedInstructions,
392 /// Pipeline flush and restarts that were not due to branch mispredictions or
393 /// memory order violations.
394 ///
395 /// All generations.
396 FlushRestartOtherNonspec,
397 /// All retired instructions.
398 ///
399 /// All generations.
400 InstAll,
401 /// Retired data barrier instructions.
402 ///
403 /// All generations.
404 InstBarrier,
405 /// Retired branch instructions including calls and returns.
406 ///
407 /// All generations.
408 InstBranch,
409 /// Retired subroutine call instructions.
410 ///
411 /// All generations.
412 InstBranchCall,
413 /// Retired conditional branch instructions (on M3 and prior, incorrectly only
414 /// counts only B.cond instructions, where on M4 and following, adds
415 /// CBZ/CBNZ/TBZ/TBNZ instructions to form the complete set of conditional
416 /// branch instructions).
417 ///
418 /// M4, M5 only.
419 InstBranchCond,
420 /// Retired indirect branch instructions including indirect calls.
421 ///
422 /// All generations.
423 InstBranchIndir,
424 /// Retired subroutine return instructions.
425 ///
426 /// All generations.
427 InstBranchRet,
428 /// Retired taken branch instructions.
429 ///
430 /// All generations.
431 InstBranchTaken,
432 /// Retired non-branch and non-load/store Integer Unit instructions.
433 ///
434 /// All generations.
435 InstIntAlu,
436 /// Retired load Integer Unit instructions.
437 ///
438 /// All generations.
439 InstIntLd,
440 /// Retired store Integer Unit instructions; does not count DC ZVA (Data Cache
441 /// Zero by VA).
442 ///
443 /// All generations.
444 InstIntSt,
445 /// Retired load and store instructions; does not count DC ZVA (Data Cache Zero
446 /// by VA).
447 ///
448 /// All generations.
449 InstLdst,
450 /// Retired non-load/store Advanced SIMD and FP Unit instructions.
451 ///
452 /// All generations.
453 InstSimdAlu,
454 /// Retired non-load/store vector Advanced SIMD instructions.
455 ///
456 /// M2, M3, M4, M5 only.
457 InstSimdAluVec,
458 /// Retired load Advanced SIMD and FP Unit instructions.
459 ///
460 /// All generations.
461 InstSimdLd,
462 /// Retired store Advanced SIMD and FP Unit instructions.
463 ///
464 /// All generations.
465 InstSimdSt,
466 /// Retired non-load/store SME engine instructions.
467 ///
468 /// M4, M5 only.
469 InstSmeEngineAlu,
470 /// Retired load SME engine instructions.
471 ///
472 /// M4, M5 only.
473 InstSmeEngineLd,
474 /// Retired non-load/store SME engine instructions that were packed with another
475 /// to reduce instruction bandwidth to the SME engine.
476 ///
477 /// M4, M5 only.
478 InstSmeEnginePackingFused,
479 /// Retired scalar floating-point SME engine instructions.
480 ///
481 /// M4, M5 only.
482 InstSmeEngineScalarfp,
483 /// Retired store SME engine instructions.
484 ///
485 /// M4, M5 only.
486 InstSmeEngineSt,
487 /// Cycles while an interrupt was pending because it was masked.
488 ///
489 /// All generations.
490 InterruptPending,
491 /// Loads that missed the L1 Data Cache.
492 ///
493 /// All generations.
494 L1DCacheMissLd,
495 /// Retired loads that missed in the L1 Data Cache.
496 ///
497 /// All generations.
498 L1DCacheMissLdNonspec,
499 /// Stores that missed the L1 Data Cache.
500 ///
501 /// All generations.
502 L1DCacheMissSt,
503 /// Retired stores that missed in the L1 Data Cache.
504 ///
505 /// All generations.
506 L1DCacheMissStNonspec,
507 /// Dirty cache lines written back from the L1D Cache toward the Shared L2
508 /// Cache.
509 ///
510 /// All generations.
511 L1DCacheWriteback,
512 /// Load and store accesses to the L1 Data TLB.
513 ///
514 /// All generations.
515 L1DTlbAccess,
516 /// Translations filled into the L1 Data TLB.
517 ///
518 /// All generations.
519 L1DTlbFill,
520 /// Load and store accesses that missed the L1 Data TLB.
521 ///
522 /// All generations.
523 L1DTlbMiss,
524 /// Retired loads and stores that missed in the L1 Data TLB.
525 ///
526 /// All generations.
527 L1DTlbMissNonspec,
528 /// Demand fetch misses that require a new cache line fill of the L1 Instruction
529 /// Cache.
530 ///
531 /// All generations.
532 L1ICacheMissDemand,
533 /// Translations filled into the L1 Instruction TLB.
534 ///
535 /// All generations.
536 L1ITlbFill,
537 /// Demand instruction fetches that missed in the L1 Instruction TLB.
538 ///
539 /// All generations.
540 L1ITlbMissDemand,
541 /// Loads and stores that missed in the L2 TLB.
542 ///
543 /// All generations.
544 L2TlbMissData,
545 /// Instruction fetches that missed in the L2 TLB.
546 ///
547 /// All generations.
548 L2TlbMissInstruction,
549 /// Core load uops blocked by SME accesses to same 4KiB page.
550 ///
551 /// M4, M5 only.
552 LdBlockedBySmeLdst,
553 /// Load uops that executed with non-temporal hint; excludes SSVE/SME loads
554 /// because they utilize the Store Unit.
555 ///
556 /// All generations.
557 LdNtUop,
558 /// SME engine load uops with Normal memory type.
559 ///
560 /// M4, M5 only.
561 LdSmeNormalUop,
562 /// SME engine load uops that executed with non-temporal hint.
563 ///
564 /// M4, M5 only.
565 LdSmeNtUop,
566 /// Uops that flowed through the Load Unit.
567 ///
568 /// All generations.
569 LdUnitUop,
570 /// Cycles while a younger load uop is waiting for data after an L1 Data Cache
571 /// miss, and no uop was issued by the scheduler with no critical miss,
572 /// prioritized.
573 ///
574 /// M4, M5 only.
575 LdUnitWaitingYoungL1DCacheMiss,
576 /// SME engine load and store uops where all lanes are inactive due to the
577 /// governing predicate; for a page-crossing load or store, the event may
578 /// incorrectly count when all of the elements of the low page are predicated
579 /// off, even if some of the elements on the high page are active. In Apple
580 /// silicon cores, where predication is recommend primarily for data structure
581 /// edge control (discarding elements 'past the end of the data structure'),
582 /// this scenario should not be common.
583 ///
584 /// M4, M5 only.
585 LdstSmePredInactive,
586 /// SME engine load and store accesses that crossed a 16KiB page boundary; an
587 /// access is considered cross-page if any bytes are accessed in the high
588 /// portion (second page), regardless if any bytes are accessed in the low
589 /// portion (first page), after predication is applied. An SME operation that
590 /// only touches the low portion (first page) after predication is applied is
591 /// not considered cross-page.
592 ///
593 /// M4, M5 only.
594 LdstSmeXpgUop,
595 /// Cycles while an old load or store uop is waiting for data after an L1 Data
596 /// Cache miss.
597 ///
598 /// M3, M4, M5 only.
599 LdstUnitOldL1DCacheMiss,
600 /// Cycles while an old load or store uop is waiting for data after an L1 Data
601 /// Cache miss, and no uop was issued by the scheduler, prioritized.
602 ///
603 /// M3, M4, M5 only.
604 LdstUnitWaitingOldL1DCacheMiss,
605 /// Cycles while the instruction queue to the SME engine is full, and no uop was
606 /// issued by the scheduler with no critical miss, prioritized.
607 ///
608 /// M4, M5 only.
609 LdstUnitWaitingSmeEngineInstQueueFull,
610 /// Cycles while the core is waiting for the SME engine to produce memory data,
611 /// and no uop was issued by the scheduler, prioritized.
612 ///
613 /// M4, M5 only.
614 LdstUnitWaitingSmeEngineMemData,
615 /// Load and store uops that crossed a 64B boundary.
616 ///
617 /// All generations.
618 LdstX64Uop,
619 /// Load and store uops that crossed a 16KiB page boundary; an SME access is
620 /// considered cross-page if any bytes are accessed in the high portion (second
621 /// page), regardless if any bytes are accessed in the low portion (first page),
622 /// after predication is applied. An SME operation that only touches the low
623 /// portion (first page) after predication is applied is not considered
624 /// cross-page.
625 ///
626 /// All generations.
627 LdstXpgUop,
628 /// Cycles while the Map Unit had no uops to process and was not stalled.
629 ///
630 /// All generations.
631 MapDispatchBubble,
632 /// Cycles while the Map Unit had no uops to process due to L1 Instruction Cache
633 /// and was not stalled.
634 ///
635 /// M3, M4, M5 only.
636 MapDispatchBubbleIc,
637 /// Cycles while the Map Unit had no uops to process due to L1 Instruction TLB
638 /// and was not stalled.
639 ///
640 /// M3, M4, M5 only.
641 MapDispatchBubbleItlb,
642 /// Slots where the Map Unit had no uops to process and was not stalled.
643 ///
644 /// M4, M5 only.
645 MapDispatchBubbleSlot,
646 /// Mapped core Integer Unit uops for SME engine instructions.
647 ///
648 /// M4, M5 only.
649 MapIntSmeUop,
650 /// Mapped Integer Unit uops.
651 ///
652 /// All generations.
653 MapIntUop,
654 /// Mapped Load and Store Unit uops, including GPR to vector register converts;
655 /// includes all instructions sent to the SME engine because they are processed
656 /// through the Store Unit.
657 ///
658 /// All generations.
659 MapLdstUop,
660 /// Cycles while the Map Unit was stalled while recovering from a flush and
661 /// restart.
662 ///
663 /// M4, M5 only.
664 MapRecovery,
665 /// Cycles while the Map Unit was blocked while rewinding due to flush and
666 /// restart.
667 ///
668 /// All generations.
669 MapRewind,
670 /// Mapped Advanced SIMD and FP Unit uops.
671 ///
672 /// All generations.
673 MapSimdUop,
674 /// Cycles while the Map Unit was stalled for any reason.
675 ///
676 /// All generations.
677 MapStall,
678 /// Cycles while the Map Unit was stalled because of Dispatch back pressure.
679 ///
680 /// All generations.
681 MapStallDispatch,
682 /// Cycles while the Map Unit was stalled for any reason other than recovery.
683 ///
684 /// M4, M5 only.
685 MapStallNonrecovery,
686 /// Mapped uops.
687 ///
688 /// M3, M4, M5 only.
689 MapUop,
690 /// Table walk memory requests on behalf of data accesses.
691 ///
692 /// All generations.
693 MmuTableWalkData,
694 /// Table walk memory requests on behalf of instruction fetches.
695 ///
696 /// All generations.
697 MmuTableWalkInstruction,
698 /// All retired uops.
699 ///
700 /// All generations.
701 RetireUop,
702 /// Cycles while the uop scheduler is empty.
703 ///
704 /// All generations.
705 ScheduleEmpty,
706 /// Uops issued by the scheduler to any execution unit.
707 ///
708 /// M1, M3, M4, M5 only.
709 ScheduleUop,
710 /// Cycles while the core is waiting for register, predicate, or flag data from
711 /// the SME engine, and no uop was issued by the scheduler with no critical
712 /// miss, prioritized.
713 ///
714 /// M4, M5 only.
715 ScheduleWaitingSmeEngineRegData,
716 /// Transitions into SME engine Streaming Mode (PSTATE.SM: 0 to 1).
717 ///
718 /// M4, M5 only.
719 SmeEngineSmEnable,
720 /// Simultaneous transitions into SME engine Streaming Mode and ZA Mode
721 /// (PSTATE.SM: 0 to 1 and PSTATE.ZA: 0 to 1).
722 ///
723 /// M4, M5 only.
724 SmeEngineSmZaEnable,
725 /// Cycles while SME engine ZA Mode is enabled but Streaming Mode is not
726 /// (PSTATE.ZA=1 and PSTATE.SM=0).
727 ///
728 /// M4, M5 only.
729 SmeEngineZaEnabledSmDisabled,
730 /// Core store uops blocked by SME accesses to same 4KiB page, and any barriers
731 /// or store-release uops blocked by SME accesses.
732 ///
733 /// M4, M5 only.
734 StBarrierBlockedBySmeLdst,
735 /// Retired core store uops that triggered memory order violations with core
736 /// load uops.
737 ///
738 /// All generations.
739 StMemOrderViolLdNonspec,
740 /// Store uops that executed with non-temporal hint; includes SSVE/SME loads
741 /// because they utilize the Store Unit.
742 ///
743 /// All generations.
744 StNtUop,
745 /// SME engine store uops with Normal memory type.
746 ///
747 /// M4, M5 only.
748 StSmeNormalUop,
749 /// SME engine store uops that executed with non-temporal hint.
750 ///
751 /// M4, M5 only.
752 StSmeNtUop,
753 /// Uops that flowed through the Store Unit.
754 ///
755 /// All generations.
756 StUnitUop,
757}
758impl Event {
759 /// Resolves this event for the given CPU, returning its chip-specific
760 /// metadata, or `None` if the event is unavailable on that chip.
761 #[must_use]
762 pub fn on(self, cpu: Cpu) -> Option<ResolvedEvent> {
763 let any = match cpu {
764 Cpu::M1 => AnyEvent::M1(M1Event::from_event(self)?),
765 Cpu::M2 => AnyEvent::M2(M2Event::from_event(self)?),
766 Cpu::M3 => AnyEvent::M3(M3Event::from_event(self)?),
767 Cpu::M4 => AnyEvent::M4(M4Event::from_event(self)?),
768 Cpu::M5 => AnyEvent::M5(M5Event::from_event(self)?),
769 };
770 Some(ResolvedEvent(any))
771 }
772}