Skip to main content

symbolic_debuginfo/macho/
compact.rs

1//! Support for the "compact unwinding format" used by Apple platforms,
2//! which can be found in __unwind_info sections of binaries.
3//!
4//! The primary type of interest is [`CompactUnwindInfoIter`], which can be
5//! constructed directly from a section of memory.
6//!
7//! The [`CompactUnwindInfoIter]` lets you iterate through all of the mappings
8//! from instruction addresses to unwinding instructions, or lookup a specific
9//! mapping by instruction address (unimplemented).
10//!
11//!
12//!
13//! # Examples
14//!
15//! If you want to process all the Compact Unwind Info at once, do something like this:
16//!
17//! ```
18//! use symbolic_debuginfo::macho::{
19//!     CompactCfiOp, CompactCfiRegister, CompactUnwindOp,
20//!     CompactUnwindInfoIter, MachError, MachObject,
21//! };
22//!
23//! fn read_compact_unwind_info<'d>(mut iter: CompactUnwindInfoIter<'d>)
24//!     -> Result<(), MachError>
25//! {
26//!     // Iterate through the entries
27//!     while let Some(entry) = iter.next()? {
28//!         match entry.instructions(&iter) {
29//!             CompactUnwindOp::None => {
30//!                 // No instructions for this region, will need to use
31//!                 // stack scanning or frame-pointer techniques to unwind.
32//!             }
33//!             CompactUnwindOp::UseDwarfFde { offset_in_eh_frame } => {
34//!                 // Need to grab the CFI info from the eh_frame section
35//!
36//!                 // process_eh_frame_fde_at(offset_in_eh_frame)
37//!             }
38//!             CompactUnwindOp::CfiOps(ops) => {
39//!                 // Emit a new entry with the following operations
40//!                 let start_addr = entry.instruction_address;
41//!                 let length = entry.len;
42//!
43//!                 for instruction in ops {
44//!                     match instruction {
45//!                         CompactCfiOp::RegisterAt {
46//!                             dest_reg,
47//!                             src_reg,
48//!                             offset_from_src,
49//!                         } => {
50//!                             let dest_reg_name = dest_reg.name(&iter);
51//!                             let src_reg_name = src_reg.name(&iter);
52//!
53//!                             // Emit something to the effect of
54//!                             // $dest_reg_name = *($src_reg_name + offset_from_src);
55//!                         }
56//!                         CompactCfiOp::RegisterIs {
57//!                             dest_reg,
58//!                             src_reg,
59//!                             offset_from_src,
60//!                         } => {
61//!                             let dest_reg_name = dest_reg.name(&iter);
62//!                             let src_reg_name = src_reg.name(&iter);
63//!
64//!                             // Emit something to the effect of
65//!                             // $dest_reg_name = $src_reg_name + offset_from_src;
66//!                         }
67//!                     };
68//!                 }
69//!             }
70//!         }
71//!     }
72//!     Ok(())
73//! }
74//! ```
75//!
76//! If you want to unwind from a specific location, do something like this
77//! (API not yet implemented!):
78//!
79//! ```rust,ignore
80//! use symbolic_debuginfo::macho::{
81//!     CompactCfiOp, CompactCfiRegister, CompactUnwindOp,
82//!     CompactUnwindInfoIter, MachError, MachObject,
83//! };
84//!
85//! fn unwind_one_frame<'d>(mut iter: CompactUnwindInfoIter<'d>, current_address_in_module: u32)
86//!     -> Result<(), MachError>
87//! {
88//!     if let Some(entry) = iter.entry_for_address(current_address_in_module)? {
89//!         match entry.instructions(&iter) {
90//!             CompactUnwindOp::None => {
91//!                 // No instructions for this region, will need to use
92//!                 // stack scanning or frame-pointer techniques to unwind.
93//!             }
94//!             CompactUnwindOp::UseDwarfFde { offset_in_eh_frame } => {
95//!                 // Need to grab the CFI info from the eh_frame section
96//!
97//!                 // process_eh_frame_fde_at(offset_in_eh_frame)
98//!             }
99//!             CompactUnwindOp::CfiOps(ops) => {
100//!                 // Emit a new entry with the following operations
101//!                 let start_addr = entry.instruction_address;
102//!                 let length = entry.len;
103//!
104//!                 for instruction in ops {
105//!                     match instruction {
106//!                         CompactCfiOp::RegisterAt {
107//!                             dest_reg,
108//!                             src_reg,
109//!                             offset_from_src,
110//!                         } => {
111//!                             let dest_reg_name = dest_reg.name(&iter);
112//!                             let src_reg_name = src_reg.name(&iter);
113//!
114//!                             // Emit something to the effect of
115//!                             // $dest_reg_name = *($src_reg_name + offset_from_src);
116//!                         }
117//!                         CompactCfiOp::RegisterIs {
118//!                             dest_reg,
119//!                             src_reg,
120//!                             offset_from_src,
121//!                         } => {
122//!                             let dest_reg_name = dest_reg.name(&iter);
123//!                             let src_reg_name = src_reg.name(&iter);
124//!
125//!                             // Emit something to the effect of
126//!                             // $dest_reg_name = $src_reg_name + offset_from_src;
127//!                         }
128//!                     };
129//!                 }
130//!             }
131//!         }
132//!     }
133//!     Ok(())
134//! }
135//! ```
136//!
137//!
138//! # Unimplemented Features (TODO)
139//!
140//! * Personality/LSDA lookup (for runtime unwinders)
141//! * Entry lookup by address (for runtime unwinders)
142//! * x86/x64 Stackless-Indirect mode decoding (for stack frames > 2KB)
143//!
144//!
145//! # The Compact Unwinding Format
146//!
147//! This format is defined only by its implementation in llvm. Notably these two
148//! files include lots of useful comments and definitions:
149//!
150//! * [Header describing layout of the format](https://github.com/llvm/llvm-project/blob/main/libunwind/include/mach-o/compact_unwind_encoding.h)
151//! * [Implementation that outputs the format](https://github.com/llvm/llvm-project/blob/main/lld/MachO/UnwindInfoSection.cpp)
152//! * [Implementation of lldb interpreting that format (CreateUnwindPlan_x86_64 especially useful)](https://github.com/llvm/llvm-project/blob/main/lldb/source/Symbol/CompactUnwindInfo.cpp)
153//!
154//! This implementation is based on those files at commit `d480f968ad8b56d3ee4a6b6df5532d485b0ad01e`.
155//!
156//! Unfortunately the description of the format in those files elides some important
157//! details, and it uses some naming conventions that are confusing, so this document
158//! will attempt to define this format more completely, and with more clear terms.
159//!
160//! Some notable terminology changes from llvm:
161//!
162//! * "encoding" or "encoding type" => opcode
163//! * "function offset" => instruction address
164//!
165//! Like all unwinding info formats, the goal of the compact unwinding format
166//! is to create a mapping from addresses in the binary to opcodes describing
167//! how to unwind from that location.
168//!
169//! These opcodes describe:
170//!
171//! * How to recover the return pointer for the current frame
172//! * How to recover some of the registers for the current frame
173//! * How to run destructors / catch the unwind at runtime (personality/LSDA)
174//!
175//! A user of the compact unwinding format would:
176//!
177//! 1. Get the current instruction pointer (e.g. `%rip`).
178//! 2. Lookup the corresponding opcode in the compact unwinding structure.
179//! 3. Follow the instructions of that opcode to recover the current frame.
180//! 4. Optionally perform runtime unwinding tasks for the current frame (destructors).
181//! 5. Use that information to recover the instruction pointer of the previous frame.
182//! 6. Repeat until unwinding is complete.
183//!
184//! The compact unwinding format can be understood as two separate pieces:
185//!
186//! * An architecture-agnostic "page-table" structure for finding opcode entries
187//! * Architecture-specific opcode formats (x86, x64, and ARM64)
188//!
189//! Unlike DWARF CFI, compact unwinding doesn't have facilities for incrementally
190//! updating how to recover certain registers as the function progresses.
191//!
192//! Empirical analysis suggests that there tends to only be one opcode for
193//! an entire function (which explains why llvm refers to instruction addresses
194//! as "function offsets"), although nothing in the format seems to *require*
195//! this to be the case.
196//!
197//! One consequence of only having one opcode for a whole function is that
198//! functions will generally have incorrect instructions for the function's
199//! prologue (where callee-saved registers are individually PUSHed onto the
200//! stack before the rest of the stack space is allocated), and epilogue
201//! (where callee-saved registers are individually POPed back into registers).
202//!
203//! Presumably this isn't a very big deal, since there's very few situations
204//! where unwinding would involve a function still executing its prologue/epilogue.
205//! This might matter when handling a stack overflow that occurred while
206//! saving the registers, or when processing a non-crashing thread in a minidump
207//! that happened to be in its prologue/epilogue.
208//!
209//! Similarly, the way ranges of instructions are mapped means that Compact
210//! Unwinding will generally incorrectly map the padding bytes between functions
211//! (attributing them to the previous function), while DWARF CFI tends to
212//! more carefully exclude those addresses. Presumably also not a big deal.
213//!
214//! Both of these things mean that if DWARF CFI and Compact Unwinding are
215//! available for a function, the DWARF CFI is expected to be more precise.
216//!
217//! It's possible that LSDA entries have addresses decoupled from the primary
218//! opcode so that instructions on how to run destructors can vary more
219//! granularly, but LSDA support is still TODO as it's not needed for
220//! backtraces.
221//!
222//!
223//! # Page Tables
224//!
225//! This section describes the architecture-agnostic layout of the compact
226//! unwinding format. The layout of the format is a two-level page-table
227//! with one root first-level node pointing to arbitrarily many second-level
228//! nodes, which in turn can hold several hundred opcode entries.
229//!
230//! There are two high-level concepts in this format that enable significant
231//! compression of the tables:
232//!
233//! 1. Eliding duplicate instruction addresses
234//! 2. Palettizing the opcodes
235//!
236//!
237//!
238//! Trick 1 is standard for unwinders: the table of mappings is sorted by
239//! address, and any entries that would have the same opcode as the
240//! previous one are elided. So for instance the following:
241//!
242//! ```text
243//! address: 1, opcode: 1
244//! address: 2, opcode: 1
245//! address: 3, opcode: 2
246//! ```
247//!
248//! Is just encoded like this:
249//!
250//! ```text
251//! address: 1, opcode: 1
252//! address: 3, opcode: 2
253//! ```
254//!
255//! We have found a few places with "zero-length" entries, where the same
256//! address gets repeated, such as the following in `libsystem_kernel.dylib`:
257//!
258//! ```text
259//! address: 0x000121c3, opcode: 0x00000000
260//! address: 0x000121c3, opcode: 0x04000680
261//! ```
262//!
263//! In this case you can just discard the zero-length one (the first one).
264//!
265//!
266//!
267//! Trick 2 is more novel: At the first level a global palette of up to 127 opcodes
268//! is defined. Each second-level "compressed" (leaf) page can also define up to 128 local
269//! opcodes. Then the entries mapping instruction addresses to opcodes can use 8-bit
270//! indices into those palettes instead of entire 32-bit opcodes. If an index is
271//! smaller than the number of global opcodes, it's global, otherwise it's local
272//! (subtract the global count to get the local index).
273//!
274//! > Unclear detail: If the global palette is smaller than 127, can the local
275//! > palette be larger than 128?
276//!
277//! To compress these entries into a single 32-bit value, the address is truncated
278//! to 24 bits and packed with the index. The addresses stored in these entries
279//! are also relative to a base address that each second-level page defines.
280//! (This will be made more clear below).
281//!
282//! There are also non-palletized "regular" second-level pages with absolute
283//! 32-bit addresses, but those are fairly rare. llvm seems to only want to emit
284//! them in the final page.
285//!
286//! The root page also stores the first address mapped by each second-level
287//! page, allowing for more efficient binary search for a particular function
288//! offset entry. (This is the base address the compressed pages use.)
289//!
290//! The root page always has a final sentinel entry which has a null pointer
291//! to its second-level page while still specifying a first address. This
292//! makes it easy to lookup the maximum mapped address (the sentinel will store
293//! that value +1), and just generally makes everything Work Nicer.
294//!
295//!
296//!
297//! ## Layout of the Page Table
298//!
299//! The page table starts at the very beginning of the `__unwind_info` section
300//! with the root page:
301//!
302//! ```rust,ignore
303//! struct RootPage {
304//!   /// Only version 1 is currently defined
305//!   version: u32 = 1,
306//!
307//!   /// The array of u32 global opcodes (offset relative to start of root page).
308//!   ///
309//!   /// These may be indexed by "compressed" second-level pages.
310//!   global_opcodes_offset: u32,
311//!   global_opcodes_len: u32,
312//!
313//!   /// The array of u32 global personality codes
314//!   /// (offset relative to start of root page).
315//!   ///
316//!   /// Personalities define the style of unwinding that an unwinder should
317//!   /// use, and how to interpret the LSDA entries for a function (see below).
318//!   personalities_offset: u32,
319//!   personalities_len: u32,
320//!
321//!   /// The array of FirstLevelPageEntry's describing the second-level pages
322//!   /// (offset relative to start of root page).
323//!   pages_offset: u32,
324//!   pages_len: u32,
325//!
326//!   // After this point there are several dynamically-sized arrays whose
327//!   // precise order and positioning don't matter, because they are all
328//!   // accessed using offsets like the ones above. The arrays are:
329//!
330//!   global_opcodes: [u32; global_opcodes_len],
331//!   personalities: [u32; personalities_len],
332//!   pages: [FirstLevelPageEntry; pages_len],
333//!
334//!   /// An array of LSDA pointers (Language Specific Data Areas).
335//!   ///
336//!   /// LSDAs are tables that an unwinder's personality function will use to
337//!   /// find what destructors should be run and whether unwinding should
338//!   /// be caught and normal execution resumed. We can treat them opaquely.
339//!   ///
340//!   /// Second-level pages have addresses into this array so that it can
341//!   /// can be indexed, the root page doesn't need to know about them.
342//!   lsdas: [LsdaEntry; unknown_len],
343//! }
344//!
345//!
346//! struct FirstLevelPageEntry {
347//!   /// The first address mapped by this page.
348//!   ///
349//!   /// This is useful for binary-searching for the page that can map
350//!   /// a specific address in the binary (the primary kind of lookup
351//!   /// performed by an unwinder).
352//!   first_address: u32,
353//!
354//!   /// Offset to the second-level page (offset relative to start of root page).
355//!   ///
356//!   /// This may point to a RegularSecondLevelPage or a CompressedSecondLevelPage.
357//!   /// Which it is can be determined by the 32-bit "kind" value that is at
358//!   /// the start of both layouts.
359//!   second_level_page_offset: u32,
360//!
361//!   /// Base offset into the lsdas array that entries in this page will be
362//!   /// relative to (offset relative to start of root page).
363//!   lsda_index_offset: u32,
364//! }
365//!
366//!
367//! struct RegularSecondLevelPage {
368//!   /// Always 2 (use to distinguish from CompressedSecondLevelPage).
369//!   kind: u32 = 2,
370//!
371//!   /// The Array of RegularEntry's (offset relative to **start of this page**).
372//!   entries_offset: u16,
373//!   entries_len: u16,
374//! }
375//!
376//!
377//! struct RegularEntry {
378//!   /// The address in the binary for this entry (absolute).
379//!   instruction_address: u32,
380//!   /// The opcode for this address.
381//!   opcode: u32,
382//! }
383//!
384//!
385//! struct CompressedSecondLevelPage {
386//!   /// Always 3 (use to distinguish from RegularSecondLevelPage).
387//!   kind: u32 = 3,
388//!
389//!   /// The array of compressed u32 entries
390//!   /// (offset relative to **start of this page**).
391//!   ///
392//!   /// Entries are a u32 that contains two packed values (from high to low):
393//!   /// * 8 bits: opcode index
394//!   ///   * 0..global_opcodes_len => index into global palette
395//!   ///   * global_opcodes_len..255 => index into local palette
396//!   ///     (subtract global_opcodes_len to get the real local index)
397//!   /// * 24 bits: instruction address
398//!   ///   * address is relative to this page's first_address!
399//!   entries_offset: u16,
400//!   entries_len: u16,
401//!
402//!   /// The array of u32 local opcodes for this page
403//!   /// (offset relative to **start of this page**).
404//!   local_opcodes_offset: u16,
405//!   local_opcodes_len: u16,
406//! }
407//!
408//!
409//! // TODO: why do these have instruction_addresses? Are they not in sync
410//! // with the second-level entries?
411//! struct LsdaEntry {
412//!   instruction_address: u32,
413//!   lsda_address: u32,
414//! }
415//! ```
416//!
417//!
418//!
419//! # Opcode Format
420//!
421//! There are 3 architecture-specific opcode formats: x86, x64, and ARM64.
422//!
423//! All 3 formats have a "null opcode" (`0x0000_0000`) which indicates that
424//! there is no unwinding information for this range of addresses. This happens
425//! with things like hand-written assembly subroutines. This implementation
426//! will yield it as a valid opcode that converts into [`CompactUnwindOp::None`].
427//!
428//! All 3 formats share a common header in the top 8 bits (from high to low):
429//!
430//! ```rust,ignore
431//! /// Whether this instruction is the start of a function.
432//! is_start: u1,
433//!
434//! /// Whether there is an lsda entry for this instruction.
435//! has_lsda: u1,
436//!
437//! /// An index into the global personalities array
438//! /// (TODO: ignore if has_lsda == false?)
439//! personality_index: u2,
440//!
441//! /// The architecture-specific kind of opcode this is, specifying how to
442//! /// interpret the remaining 24 bits of the opcode.
443//! opcode_kind: u4,
444//! ```
445//!
446//!
447//!
448//! ## x86 and x64 Opcodes
449//!
450//! x86 and x64 use the same opcode layout, differing only in the registers
451//! being restored. Registers are numbered 0-6, with the following mappings:
452//!
453//! x86:
454//! * 0 => no register (like `Option::None`)
455//! * 1 => `ebx`
456//! * 2 => `ecx`
457//! * 3 => `edx`
458//! * 4 => `edi`
459//! * 5 => `esi`
460//! * 6 => `ebp`
461//!
462//! x64:
463//! * 0 => no register (like `Option::None`)
464//! * 1 => `rbx`
465//! * 2 => `r12`
466//! * 3 => `r13`
467//! * 4 => `r14`
468//! * 5 => `r15`
469//! * 6 => `rbp`
470//!
471//! Note also that encoded sizes/offsets are generally divided by the pointer size
472//! (since all values we are interested in are pointer-aligned), which of course differs
473//! between x86 and x64.
474//!
475//! There are 4 kinds of x86/x64 opcodes (specified by opcode_kind):
476//!
477//! (One of the llvm headers refers to a 5th "0=old" opcode. Apparently this
478//! was used for initial development of the format, and is basically just
479//! reserved to prevent the testing data from ever getting mixed with real
480//! data. Nothing should produce or handle it. It does incidentally match
481//! the "null opcode", but it's fine to regard that as an unknown opcode
482//! and do nothing.)
483//!
484//!
485//! ### x86/x64 Opcode 1: Frame-Based
486//!
487//! The function has the standard frame pointer (`bp`) prelude which:
488//!
489//! * Pushes the caller's `bp` to the stack
490//! * Sets `bp := sp` (new frame pointer is the current top of the stack)
491//!
492//! `bp` has been preserved, and any callee-saved registers that need to be restored
493//! are saved on the stack at a known offset from `bp`. The return address is
494//! stored just before the caller's `bp`. The caller's stack pointer should
495//! point before where the return address is saved.
496//!
497//! So to unwind you just need to do:
498//!
499//! ```text
500//! %sp := %bp + 2*POINTER_SIZE
501//! %ip := *(%bp + POINTER_SIZE)
502//! %bp := *(%bp)
503//!
504//! (and restore all the other callee-saved registers as described below)
505//! ```
506//!
507//! Registers are stored in increasing order (so `reg1` comes before `reg2`).
508//! If a register has the "no register" value, continue iterating the offset
509//! forward. This lets the registers be stored slightly-non-contiguously on the
510//! stack.
511//!
512//! The remaining 24 bits of the opcode are interpreted as follows (from high to low):
513//!
514//! ```rust,ignore
515//! /// The offset from bp that the registers to restore are saved at,
516//! /// divided by pointer size.
517//! stack_offset: u8,
518//!
519//! _unused: u1,
520//!
521//! /// Registers to restore (see register mapping in previous section)
522//! reg1: u3,
523//! reg2: u3,
524//! reg3: u3,
525//! reg4: u3,
526//! reg5: u3,
527//! ```
528//!
529//!
530//!
531//! ### x86/x64 Opcode 2: Frameless (Stack-Immediate)
532//!
533//!
534//! The callee's stack frame has a known size, so we can find the start
535//! of the frame by offsetting from sp (the stack pointer). The return
536//! address is saved immediately after that location. Any callee-saved
537//! registers that need to be restored are saved immediately after that.
538//!
539//! So to unwind you just need to do:
540//!
541//! ```text
542//! %sp := %sp + stack_size * POINTER_SIZE
543//! %ip := *(%sp - 8)
544//!
545//! (and restore all the other callee-saved registers as described below)
546//! ```
547//!
548//! Registers are stored in *reverse* order on the stack from the order the
549//! decoding algorithm outputs (so `reg[1]` comes before `reg[0]`).
550//!
551//! If a register has the "no register" value, *do not* continue iterating the
552//! offset forward -- registers are strictly contiguous (it's possible
553//! "no register" can only be trailing due to the encoding, but I haven't
554//! verified this).
555//!
556//! The remaining 24 bits of the opcode are interpreted as follows (from high to low):
557//!
558//! ```rust,ignore
559//! /// How big the stack frame is, divided by pointer size.
560//! stack_size: u8,
561//!
562//! _unused: u3,
563//!
564//! /// The number of registers that are saved on the stack.
565//! register_count: u3,
566//!
567//! /// The permutation encoding of the registers that are saved
568//! /// on the stack (see below).
569//! register_permutations: u10,
570//! ```
571//!
572//! The register permutation encoding is a Lehmer code sequence encoded into a
573//! single variable-base number so we can encode the ordering of up to
574//! six registers in a 10-bit space.
575//!
576//! This can't really be described well with anything but code, so
577//! just read this implementation or llvm's implementation for how to
578//! encode/decode this.
579//!
580//!
581//!
582//! ### x86/x64 Opcode 3: Frameless (Stack-Indirect)
583//!
584//! (Currently Unimplemented)
585//!
586//! Stack-Indirect is exactly the same situation as Stack-Immediate, but
587//! the stack-frame size is too large for Stack-Immediate to encode. However,
588//! the function prereserved the size of the frame in its prologue, so we can
589//! extract the the size of the frame from a `sub` instruction at a known
590//! offset from the start of the function (`subl $nnnnnnnn,ESP` in x86,
591//! `subq $nnnnnnnn,RSP` in x64).
592//!
593//! This requires being able to find the first instruction of the function
594//! (TODO: presumably the first is_start entry <= this one?).
595//!
596//! TODO: describe how to extract the value from the `sub` instruction.
597//!
598//!
599//! ```rust,ignore
600//! /// Offset from the start of the function where the `sub` instruction
601//! /// we need is stored. (NOTE: not divided by anything!)
602//! instruction_offset: u8,
603//!
604//! /// An offset to add to the loaded stack size, divided by pointer size.
605//! /// This allows the stack size to differ slightly from the `sub`, to
606//! /// compensate for any function prologue that pushes a bunch of
607//! /// pointer-sized registers.
608//! stack_adjust: u3,
609//!
610//! /// The number of registers that are saved on the stack.
611//! register_count: u3,
612//!
613//! /// The permutation encoding of the registers that are saved on the stack
614//! /// (see Stack-Immediate for a description of this format).
615//! register_permutations: u10,
616//! ```
617//!
618//! **Note**: apparently binaries generated by the clang in Xcode 6 generated
619//! corrupted versions of this opcode, but this was fixed in Xcode 7
620//! (released in September 2015), so *presumably* this isn't something we're
621//! likely to encounter. But if you encounter messed up opcodes this might be why.
622//!
623//!
624//!
625//! ### x86/x64 Opcode 4: Dwarf
626//!
627//! There is no compact unwind info here, and you should instead use the
628//! DWARF CFI in `.eh_frame` for this line. The remaining 24 bits of the opcode
629//! are an offset into the `.eh_frame` section that should hold the DWARF FDE
630//! for this instruction address.
631//!
632//!
633//!
634//! ## ARM64 Opcodes
635//!
636//! ARM64 (AKA AArch64) is a lot more strict about the ABI of functions, and
637//! as such it has fairly simple opcodes. There are 3 kinds of ARM64 opcode:
638//!
639//! (Yes there's no Opcode 1, I don't know why.)
640//!
641//!
642//! ### ARM64 Opcode 2: Frameless
643//!
644//! This is a "frameless" leaf function. The caller is responsible for
645//! saving/restoring all of its general purpose registers. The frame pointer
646//! is still the caller's frame pointer and doesn't need to be touched. The
647//! return address is stored in the link register (`x30`).
648//!
649//! So to unwind you just need to do:
650//!
651//! ```text
652//! %sp := %sp + stack_size * 16
653//! %pc := %x30
654//!
655//! (no other registers to restore)
656//! ```
657//!
658//! The remaining 24 bits of the opcode are interpreted as follows (from high to low):
659//!
660//! ```rust,ignore
661//! /// How big the stack frame is, divided by 16.
662//! stack_size: u12,
663//!
664//! _unused: u12,
665//! ```
666//!
667//!
668//!
669//! ### ARM64 Opcode 3: Dwarf
670//!
671//! There is no compact unwind info here, and you should instead use the
672//! DWARF CFI in `.eh_frame` for this line. The remaining 24 bits of the opcode
673//! are an offset into the `.eh_frame` section that should hold the DWARF FDE
674//! for this instruction address.
675//!
676//!
677//!
678//! ### ARM64 Opcode 4: Frame-Based
679//!
680//! This is a function with the standard prologue. The return address (`pc`) and the
681//! frame pointer (`x29`) were pushed onto the stack in a pair and in that order
682//! (ARM64 registers are saved/restored in pairs), and then the frame pointer was updated
683//! to the current stack pointer.
684//!
685//! So to unwind you just need to do:
686//!
687//! ```text
688//! %sp := %x29 + 16
689//! %pc := *(%x29 + 8)
690//! %x29 := *(%x29)
691//!
692//! (and restore all the other callee-saved registers as described below)
693//! ```
694//!
695//! Any callee-saved registers that need to be restored were then pushed
696//! onto the stack in pairs in the following order (if they were pushed at
697//! all, see below):
698//!
699//! 1. `x19`, `x20`
700//! 2. `x21`, `x22`
701//! 3. `x23`, `x24`
702//! 4. `x25`, `x26`
703//! 5. `x27`, `x28`
704//! 6. `d8`, `d9`
705//! 7. `d10`, `d11`
706//! 8. `d12`, `d13`
707//! 9. `d14`, `d15`
708//!
709//! The remaining 24 bits of the opcode are interpreted as follows (from high to low):
710//!
711//! ```rust,ignore
712//! _unused: u15,
713//!
714//! // Whether each register pair was pushed
715//! d14_and_d15_saved: u1,
716//! d12_and_d13_saved: u1,
717//! d10_and_d11_saved: u1,
718//! d8_and_d9_saved: u1,
719//!
720//! x27_and_x28_saved: u1,
721//! x25_and_x26_saved: u1,
722//! x23_and_x24_saved: u1,
723//! x21_and_x22_saved: u1,
724//! x19_and_x20_saved: u1,
725//! ```
726//!
727//!
728//!
729//! # Notable Corners
730//!
731//! Here's some notable corner cases and esoterica of the format. Behaviour in
732//! these situations is not strictly guaranteed (as in we may decide to
733//! make the implemenation more strict or liberal if it is deemed necessary
734//! or desirable). But current behaviour *is* documented here for the sake of
735//! maintenance/debugging. Hopefully it also helps highlight all the ways things
736//! can go wrong for anyone using this documentation to write their own tooling.
737//!
738//! For all these cases, if an Error is reported during iteration/search, the
739//! [`CompactUnwindInfoIter`] will be in an unspecified state for future queries.
740//! It will never violate memory safety but it may start yielding chaotic
741//! values.
742//!
743//! If this implementation ever panics, that should be regarded as an
744//! implementation bug.
745//!
746//!
747//! Things we allow:
748//!
749//! * The personalities array has a 32-bit length, but all indices into
750//!   it are only 2 bits. As such, it is theoretically possible for there
751//!   to be unindexable personalities. In practice that Shouldn't Happen,
752//!   and this implementation won't report an error if it does, because it
753//!   can be benign (although we have no way to tell if indices were truncated).
754//!
755//! * The llvm headers say that at most there should be 127 global opcodes
756//!   and 128 local opcodes, but since local index translation is based on
757//!   the actual number of global opcodes and *not* 127/128, there's no
758//!   reason why each palette should be individually limited like this.
759//!   This implementation doesn't report an error if this happens, and should
760//!   work fine if it does.
761//!
762//! * The llvm headers say that second-level pages are *actual* pages at
763//!   a fixed size of 4096 bytes. It's unclear what advantage this provides,
764//!   perhaps there's a situation where you're mapping in the pages on demand?
765//!   This puts a practical limit on the number of entries each second-level
766//!   page can hold -- regular pages can fit 511 entries, while compressed
767//!   pages can hold 1021 entries+local_opcodes (they have to share). This
768//!   implementation does not report an error if a second-level page has more
769//!   values than that, and should work fine if it does.
770//!
771//! * If a [`CompactUnwindInfoIter`] is created for an architecture it wasn't
772//!   designed for, it is assumed that the layout of the page tables will
773//!   remain the same, and entry iteration/lookup should still work and
774//!   produce results. However [`CompactUnwindInfoEntry::instructions`]
775//!   will always return [`CompactUnwindOp::None`].
776//!
777//! * If an opcode kind is encountered that this implementation wasn't
778//!   designed for, `Opcode::instructions` will return [`CompactUnwindOp::None`].
779//!
780//! * If two entries have the same address (making the first have zero-length),
781//!   we silently discard the first one in favour of the second.
782//!
783//! * Only 7 register mappings are provided for x86/x64 opcodes, but the
784//!   3-bit encoding allows for 8. This implementation will just map the
785//!   8th encoding to "no register" as well.
786//!
787//! * Only 6 registers can be restored by the x86/x64 stackless modes, but
788//!   the 3-bit encoding of the register count allows for 7. This implementation
789//!   clamps the value to 6.
790//!
791//!
792//! Things we produce errors for:
793//!
794//! * If the root page has a version this implementation wasn't designed for,
795//!   [`CompactUnwindInfoIter::new`] will return an Error.
796//!
797//! * A corrupt unwind_info section may have its entries out of order. Since
798//!   the next entry's instruction_address is always needed to compute the
799//!   number of bytes the current entry covers, the implementation will report
800//!   an error if it encounters this. However it does not attempt to fully
801//!   validate the ordering during an `entry_for_address` query, as this would
802//!   significantly slow down the binary search. In this situation
803//!   you may get chaotic results (same guarantees as `BTreeMap` with an
804//!   inconsistent `Ord` implementation).
805//!
806//! * A corrupt unwind_info section may attempt to index out of bounds either
807//!   with out-of-bounds offset values (e.g. personalities_offset) or with out
808//!   of bounds indices (e.g. a local opcode index). When an array length is
809//!   provided, this implementation will return an error if an index is out
810//!   out of bounds. Offsets are only restricted to the unwind_info
811//!   section itself, as this implementation does not assume arrays are
812//!   placed in any particular place, and does not try to prevent aliasing.
813//!   Trying to access outside the `.unwind_info` section will return an error.
814//!
815//! * If an unknown second-level page type is encountered, iteration/lookup will
816//!   return an error.
817//!
818//!
819//! Things that cause chaos:
820//!
821//! * If the null page was missing, there would be no way to identify the
822//!   number of instruction bytes the last entry in the table covers. As such,
823//!   this implementation assumes that it exists, and currently does not validate
824//!   it ahead of time. If the null page *is* missing, the last entry or page
825//!   may be treated as the null page, and won't be emitted. (Perhaps we should
826//!   provide more reliable behaviour here?)
827//!
828//! * If there are multiple null pages, or if there is a page with a defined
829//!   second-level page but no entries of its own, behaviour is unspecified.
830//!
831
832use crate::macho::MachError;
833use goblin::error::Error;
834use goblin::mach::segment::SectionData;
835use scroll::{Endian, Pread};
836use std::mem;
837
838// Hacky glue types to keep exposure of the containing library minimal.
839// This will help with transplanting this code into goblin.
840type Result<T> = std::result::Result<T, MachError>;
841
842#[derive(Debug, Clone)]
843enum Arch {
844    X86,
845    X64,
846    Arm64,
847    Other,
848}
849
850// Types marked with repr(C) indicate their layout precisely matches the
851// layout of the format. In theory we could point directly into the binary
852// of the unwind_info section with these types, but we avoid doing so for
853// endianness/safety.
854
855#[repr(C)]
856#[derive(Debug, Clone, Pread)]
857struct FirstLevelPage {
858    // Only version 1 is currently defined
859    // version: u32 = 1,
860    /// The array of u32 global opcodes (offset relative to start of root page).
861    ///
862    /// These may be indexed by "compressed" second-level pages.
863    global_opcodes_offset: u32,
864    global_opcodes_len: u32,
865
866    /// The array of u32 global personality codes (offset relative to start of root page).
867    ///
868    /// Personalities define the style of unwinding that an unwinder should use,
869    /// and how to interpret the LSDA entries for a function (see below).
870    personalities_offset: u32,
871    personalities_len: u32,
872
873    /// The array of [`FirstLevelPageEntry`]'s describing the second-level pages
874    /// (offset relative to start of root page).
875    pages_offset: u32,
876    pages_len: u32,
877    // After this point there are several dynamically-sized arrays whose precise
878    // order and positioning don't matter, because they are all accessed using
879    // offsets like the ones above. The arrays are:
880
881    // global_opcodes: [u32; global_opcodes_len],
882    // personalities: [u32; personalities_len],
883    // pages: [FirstLevelPageEntry; pages_len],
884    // lsdas: [LsdaEntry; unknown_len],
885}
886
887#[repr(C)]
888#[derive(Debug, Clone, Pread)]
889struct FirstLevelPageEntry {
890    /// The first address mapped by this page.
891    ///
892    /// This is useful for binary-searching for the page that can map
893    /// a specific address in the binary (the primary kind of lookup
894    /// performed by an unwinder).
895    first_address: u32,
896
897    /// Offset to the second-level page (offset relative to start of root page).
898    ///
899    /// This may point to either a [`RegularSecondLevelPage`] or a [`CompressedSecondLevelPage`].
900    /// Which it is can be determined by the 32-bit "kind" value that is at
901    /// the start of both layouts.
902    second_level_page_offset: u32,
903
904    /// Base offset into the lsdas array that entries in this page will be relative
905    /// to (offset relative to start of root page).
906    lsda_index_offset: u32,
907}
908
909#[repr(C)]
910#[derive(Debug, Clone, Pread)]
911struct RegularSecondLevelPage {
912    // Always 2 (use to distinguish from CompressedSecondLevelPage).
913    // kind: u32 = 2,
914    /// The Array of [`RegularEntry`]'s (offset relative to **start of this page**).
915    entries_offset: u16,
916    entries_len: u16,
917}
918
919#[repr(C)]
920#[derive(Debug, Clone, Pread)]
921struct CompressedSecondLevelPage {
922    // Always 3 (use to distinguish from RegularSecondLevelPage).
923    // kind: u32 = 3,
924    /// The array of compressed u32 entries (offset relative to **start of this page**).
925    ///
926    /// Entries are a u32 that contains two packed values (from highest to lowest bits):
927    /// * 8 bits: opcode index
928    ///   * 0..global_opcodes_len => index into global palette
929    ///   * global_opcodes_len..255 => index into local palette (subtract global_opcodes_len)
930    /// * 24 bits: instruction address
931    ///   * address is relative to this page's first_address!
932    entries_offset: u16,
933    entries_len: u16,
934
935    /// The array of u32 local opcodes for this page (offset relative to **start of this page**).
936    local_opcodes_offset: u16,
937    local_opcodes_len: u16,
938}
939
940#[repr(C)]
941#[derive(Debug, Clone, Pread)]
942struct RegularEntry {
943    /// The address in the binary for this entry (absolute).
944    instruction_address: u32,
945    /// The opcode for this address.
946    opcode: u32,
947}
948
949#[allow(dead_code)]
950#[derive(Debug, Clone)]
951#[repr(C)]
952struct LsdaEntry {
953    instruction_address: u32,
954    lsda_address: u32,
955}
956
957#[derive(Debug, Clone)]
958enum OpcodeOrIndex {
959    Opcode(u32),
960    Index(u32),
961}
962
963#[derive(Debug, Clone)]
964struct RawCompactUnwindInfoEntry {
965    /// The address of the first instruction this entry applies to
966    /// (may apply to later instructions as well).
967    instruction_address: u32,
968    /// Either an opcode or the index into an opcode palette
969    opcode_or_index: OpcodeOrIndex,
970}
971
972/// An iterator over the [`CompactUnwindInfoEntry`]'s of a `.unwind_info` section.
973#[derive(Debug, Clone)]
974pub struct CompactUnwindInfoIter<'a> {
975    /// Parent .unwind_info metadata.
976    arch: Arch,
977    endian: Endian,
978    section: SectionData<'a>,
979    /// Parsed root page.
980    root: FirstLevelPage,
981
982    // Iterator state
983    /// Current index in the root node.
984    first_idx: u32,
985    /// Current index in the second-level node.
986    second_idx: u32,
987    /// Parsed version of the current pages.
988    page_of_next_entry: Option<(FirstLevelPageEntry, SecondLevelPage)>,
989    /// Minimally parsed version of the next entry, which we need to have
990    /// already loaded to know how many instructions the previous entry covered.
991    next_entry: Option<RawCompactUnwindInfoEntry>,
992    done_page: bool,
993}
994
995impl<'a> CompactUnwindInfoIter<'a> {
996    /// Creates a new [`CompactUnwindInfoIter`] for the given section.
997    pub fn new(
998        section: SectionData<'a>,
999        little_endian: bool,
1000        arch: symbolic_common::Arch,
1001    ) -> Result<Self> {
1002        const UNWIND_SECTION_VERSION: u32 = 1;
1003
1004        use symbolic_common::CpuFamily;
1005        let arch = match arch.cpu_family() {
1006            CpuFamily::Intel32 => Arch::X86,
1007            CpuFamily::Amd64 => Arch::X64,
1008            CpuFamily::Arm64 => Arch::Arm64,
1009            _ => Arch::Other,
1010        };
1011
1012        let endian = if little_endian {
1013            Endian::Little
1014        } else {
1015            Endian::Big
1016        };
1017
1018        let offset = &mut 0;
1019
1020        // Grab all the fields from the header
1021        let version: u32 = section.gread_with(offset, endian)?;
1022        if version != UNWIND_SECTION_VERSION {
1023            return Err(MachError::from(Error::Malformed(format!(
1024                "Unknown Compact Unwinding Info version {version}"
1025            ))));
1026        }
1027
1028        let root = section.gread_with(offset, endian)?;
1029
1030        let iter = CompactUnwindInfoIter {
1031            arch,
1032            endian,
1033            section,
1034            root,
1035
1036            first_idx: 0,
1037            second_idx: 0,
1038            page_of_next_entry: None,
1039            next_entry: None,
1040            done_page: true,
1041        };
1042
1043        Ok(iter)
1044    }
1045    /// Gets the next entry in the iterator.
1046    #[allow(clippy::should_implement_trait)]
1047    pub fn next(&mut self) -> Result<Option<CompactUnwindInfoEntry>> {
1048        // Iteration is slightly more complex here because we want to be able to
1049        // report how many instructions an entry covers, and knowing this requires us
1050        // to parse the *next* entry's instruction_address value. Also, there's
1051        // a sentinel page at the end of the listing with a null second_level_page_offset
1052        // which requires some special handling.
1053        //
1054        // To handle this, we split iteration into two phases:
1055        //
1056        // * next_raw minimally parses the next entry so we can extract the opcode,
1057        //   while also ensuring page_of_next_entry is set to match it.
1058        //
1059        // * next uses next_raw to "peek" the instruction_address of the next entry,
1060        //   and then saves the result as `next_entry`, to avoid doing a bunch of
1061        //   repeated work.
1062
1063        // If this is our first iteration next_entry will be empty, try to get it.
1064        if self.next_entry.is_none() {
1065            self.next_entry = self.next_raw()?;
1066        }
1067
1068        if let Some(cur_entry) = self.next_entry.take() {
1069            // Copy the first and second page data, as it may get overwritten
1070            // by next_raw, then peek the next entry.
1071            let (first_page, second_page) = self.page_of_next_entry.clone().unwrap();
1072            self.next_entry = self.next_raw()?;
1073            if let Some(next_entry) = self.next_entry.as_ref() {
1074                let result = self.complete_entry(
1075                    &cur_entry,
1076                    next_entry.instruction_address,
1077                    &first_page,
1078                    &second_page,
1079                )?;
1080                Ok(Some(result))
1081            } else {
1082                // If there's no next_entry, then cur_entry is the sentinel, which
1083                // we shouldn't yield.
1084                Ok(None)
1085            }
1086        } else {
1087            // next_raw still yielded nothing, we're done.
1088            Ok(None)
1089        }
1090    }
1091
1092    // Yields a minimally parsed version of the next entry, and sets
1093    // page_of_next_entry to the page matching it (so it can be further
1094    // parsed when needed.
1095    fn next_raw(&mut self) -> Result<Option<RawCompactUnwindInfoEntry>> {
1096        // First, load up the page for this value if needed
1097        if self.done_page {
1098            // Only advance the indices if we've already loaded up a page
1099            // (so it's not the first iteration) and we have pages left.
1100            if self.page_of_next_entry.is_some() && self.first_idx != self.root.pages_len {
1101                self.first_idx += 1;
1102                self.second_idx = 0;
1103            }
1104            if let Some(entry) = self.first_level_entry(self.first_idx)? {
1105                if entry.second_level_page_offset == 0 {
1106                    // sentinel page at the end of the list, create a dummy entry
1107                    // and advance past this page (don't reset done_page).
1108                    return Ok(Some(RawCompactUnwindInfoEntry {
1109                        instruction_address: entry.first_address,
1110                        opcode_or_index: OpcodeOrIndex::Opcode(0),
1111                    }));
1112                }
1113                let second_level_page = self.second_level_page(entry.second_level_page_offset)?;
1114                self.page_of_next_entry = Some((entry, second_level_page));
1115                self.done_page = false;
1116            } else {
1117                // Couldn't load a page, so we're at the end of our iteration.
1118                return Ok(None);
1119            }
1120        }
1121
1122        // If we get here, we must have loaded a page
1123        let (first_level_entry, second_level_page) = self.page_of_next_entry.as_ref().unwrap();
1124        let entry =
1125            self.second_level_entry(first_level_entry, second_level_page, self.second_idx)?;
1126
1127        // Advance to the next entry
1128        self.second_idx += 1;
1129
1130        // If we reach the end of the page, setup for the next page
1131        if self.second_idx == second_level_page.len() {
1132            self.done_page = true;
1133        }
1134
1135        Ok(Some(entry))
1136    }
1137
1138    /*
1139    /// Gets the entry associated with a particular address.
1140    pub fn entry_for_address(&mut self, _address: u32) -> Result<Option<CompactUnwindInfoEntry>> {
1141        // TODO: this would be nice for an actual unwinding implementation, but
1142        // dumping all of the entries doesn't need this.
1143    }
1144    */
1145
1146    fn first_level_entry(&self, idx: u32) -> Result<Option<FirstLevelPageEntry>> {
1147        if idx < self.root.pages_len {
1148            let idx_offset = mem::size_of::<FirstLevelPageEntry>() * idx as usize;
1149            let offset = self.root.pages_offset as usize + idx_offset;
1150
1151            Ok(Some(self.section.pread_with(offset, self.endian)?))
1152        } else {
1153            Ok(None)
1154        }
1155    }
1156
1157    fn second_level_page(&self, offset: u32) -> Result<SecondLevelPage> {
1158        const SECOND_LEVEL_REGULAR: u32 = 2;
1159        const SECOND_LEVEL_COMPRESSED: u32 = 3;
1160
1161        let mut offset = offset as usize;
1162
1163        let kind: u32 = self.section.gread_with(&mut offset, self.endian)?;
1164        if kind == SECOND_LEVEL_REGULAR {
1165            Ok(SecondLevelPage::Regular(
1166                self.section.gread_with(&mut offset, self.endian)?,
1167            ))
1168        } else if kind == SECOND_LEVEL_COMPRESSED {
1169            Ok(SecondLevelPage::Compressed(
1170                self.section.gread_with(&mut offset, self.endian)?,
1171            ))
1172        } else {
1173            Err(MachError::from(Error::Malformed(format!(
1174                "Unknown second-level page kind: {kind}"
1175            ))))
1176        }
1177    }
1178
1179    fn second_level_entry(
1180        &self,
1181        first_level_entry: &FirstLevelPageEntry,
1182        second_level_page: &SecondLevelPage,
1183        second_level_idx: u32,
1184    ) -> Result<RawCompactUnwindInfoEntry> {
1185        match *second_level_page {
1186            SecondLevelPage::Compressed(ref page) => {
1187                let offset = first_level_entry.second_level_page_offset as usize
1188                    + page.entries_offset as usize
1189                    + second_level_idx as usize * 4;
1190                let compressed_entry: u32 = self.section.pread_with(offset, self.endian)?;
1191
1192                let instruction_address =
1193                    (compressed_entry & 0x00FFFFFF) + first_level_entry.first_address;
1194                let opcode_idx = (compressed_entry >> 24) & 0xFF;
1195                Ok(RawCompactUnwindInfoEntry {
1196                    instruction_address,
1197                    opcode_or_index: OpcodeOrIndex::Index(opcode_idx),
1198                })
1199            }
1200            SecondLevelPage::Regular(ref page) => {
1201                let offset = first_level_entry.second_level_page_offset as usize
1202                    + page.entries_offset as usize
1203                    + second_level_idx as usize * 8;
1204
1205                let entry: RegularEntry = self.section.pread_with(offset, self.endian)?;
1206
1207                Ok(RawCompactUnwindInfoEntry {
1208                    instruction_address: entry.instruction_address,
1209                    opcode_or_index: OpcodeOrIndex::Opcode(entry.opcode),
1210                })
1211            }
1212        }
1213    }
1214
1215    fn complete_entry(
1216        &self,
1217        entry: &RawCompactUnwindInfoEntry,
1218        next_entry_instruction_address: u32,
1219        first_level_entry: &FirstLevelPageEntry,
1220        second_level_page: &SecondLevelPage,
1221    ) -> Result<CompactUnwindInfoEntry> {
1222        if entry.instruction_address > next_entry_instruction_address {
1223            return Err(MachError::from(Error::Malformed(format!(
1224                "Entry addresses are not monotonic! ({} > {})",
1225                entry.instruction_address, next_entry_instruction_address
1226            ))));
1227        }
1228        let opcode = match entry.opcode_or_index {
1229            OpcodeOrIndex::Opcode(opcode) => opcode,
1230            OpcodeOrIndex::Index(opcode_idx) => {
1231                if let SecondLevelPage::Compressed(ref page) = second_level_page {
1232                    if opcode_idx < self.root.global_opcodes_len {
1233                        self.global_opcode(opcode_idx)?
1234                    } else {
1235                        let opcode_idx = opcode_idx - self.root.global_opcodes_len;
1236                        if opcode_idx >= page.local_opcodes_len as u32 {
1237                            return Err(MachError::from(Error::Malformed(format!(
1238                                "Local opcode index too large ({} >= {})",
1239                                opcode_idx, page.local_opcodes_len
1240                            ))));
1241                        }
1242                        let offset = first_level_entry.second_level_page_offset as usize
1243                            + page.local_opcodes_offset as usize
1244                            + opcode_idx as usize * 4;
1245                        let opcode: u32 = self.section.pread_with(offset, self.endian)?;
1246                        opcode
1247                    }
1248                } else {
1249                    unreachable!()
1250                }
1251            }
1252        };
1253        let opcode = Opcode(opcode);
1254
1255        Ok(CompactUnwindInfoEntry {
1256            instruction_address: entry.instruction_address,
1257            len: next_entry_instruction_address - entry.instruction_address,
1258            opcode,
1259        })
1260    }
1261
1262    fn global_opcode(&self, opcode_idx: u32) -> Result<u32> {
1263        if opcode_idx >= self.root.global_opcodes_len {
1264            return Err(MachError::from(Error::Malformed(format!(
1265                "Global opcode index too large ({} >= {})",
1266                opcode_idx, self.root.global_opcodes_len
1267            ))));
1268        }
1269        let offset = self.root.global_opcodes_offset as usize + opcode_idx as usize * 4;
1270        let opcode: u32 = self.section.pread_with(offset, self.endian)?;
1271        Ok(opcode)
1272    }
1273
1274    fn personality(&self, personality_idx: u32) -> Result<u32> {
1275        if personality_idx >= self.root.personalities_len {
1276            return Err(MachError::from(Error::Malformed(format!(
1277                "Personality index too large ({} >= {})",
1278                personality_idx, self.root.personalities_len
1279            ))));
1280        }
1281        let offset = self.root.personalities_offset as usize + personality_idx as usize * 4;
1282        let personality: u32 = self.section.pread_with(offset, self.endian)?;
1283        Ok(personality)
1284    }
1285
1286    /// Dumps similar output to `llvm-objdump --unwind-info`, for debugging.
1287    pub fn dump(&self) -> Result<()> {
1288        println!("Contents of __unwind_info section:");
1289        println!("  Version:                                   0x1");
1290        println!(
1291            "  Common encodings array section offset:     0x{:x}",
1292            self.root.global_opcodes_offset
1293        );
1294        println!(
1295            "  Number of common encodings in array:       0x{:x}",
1296            self.root.global_opcodes_len
1297        );
1298        println!(
1299            "  Personality function array section offset: 0x{:x}",
1300            self.root.personalities_offset
1301        );
1302        println!(
1303            "  Number of personality functions in array:  0x{:x}",
1304            self.root.personalities_len
1305        );
1306        println!(
1307            "  Index array section offset:                0x{:x}",
1308            self.root.pages_offset
1309        );
1310        println!(
1311            "  Number of indices in array:                0x{:x}",
1312            self.root.pages_len
1313        );
1314
1315        println!(
1316            "  Common encodings: (count = {})",
1317            self.root.global_opcodes_len
1318        );
1319        for i in 0..self.root.global_opcodes_len {
1320            let opcode = self.global_opcode(i)?;
1321            println!("    encoding[{i}]: 0x{opcode:08x}");
1322        }
1323
1324        println!(
1325            "  Personality functions: (count = {})",
1326            self.root.personalities_len
1327        );
1328        for i in 0..self.root.personalities_len {
1329            let personality = self.personality(i)?;
1330            println!("    personality[{i}]: 0x{personality:08x}");
1331        }
1332
1333        println!("  Top level indices: (count = {})", self.root.pages_len);
1334        for i in 0..self.root.pages_len {
1335            let entry = self.first_level_entry(i)?.unwrap();
1336            println!("    [{}]: function offset=0x{:08x}, 2nd level page offset=0x{:08x}, LSDA offset=0x{:08x}",
1337                    i,
1338                    entry.first_address,
1339                    entry.second_level_page_offset,
1340                    entry.lsda_index_offset);
1341        }
1342
1343        // TODO: print LSDA info
1344        println!("  LSDA descriptors:");
1345        println!("  Second level indices:");
1346
1347        let mut iter = (*self).clone();
1348        while let Some(raw_entry) = iter.next_raw()? {
1349            let (first, second) = iter.page_of_next_entry.clone().unwrap();
1350            // Always observing the index after the step, so subtract 1
1351            let second_idx = iter.second_idx - 1;
1352
1353            // If this is the first entry of this page, dump the page
1354            if second_idx == 0 {
1355                println!("    Second level index[{}]: offset in section=0x{:08x}, base function=0x{:08x}",
1356                iter.first_idx,
1357                first.second_level_page_offset,
1358                first.first_address);
1359            }
1360
1361            // Dump the entry
1362
1363            // Feed in own instruction_address as a dummy value (we don't need it for this format)
1364            let entry =
1365                iter.complete_entry(&raw_entry, raw_entry.instruction_address, &first, &second)?;
1366            if let OpcodeOrIndex::Index(opcode_idx) = raw_entry.opcode_or_index {
1367                println!(
1368                    "      [{}]: function offset=0x{:08x}, encoding[{}]=0x{:08x}",
1369                    second_idx, entry.instruction_address, opcode_idx, entry.opcode.0
1370                );
1371            } else {
1372                println!(
1373                    "      [{}]: function offset=0x{:08x}, encoding=0x{:08x}",
1374                    second_idx, entry.instruction_address, entry.opcode.0
1375                );
1376            }
1377        }
1378
1379        Ok(())
1380    }
1381}
1382
1383#[derive(Debug, Clone)]
1384enum SecondLevelPage {
1385    Compressed(CompressedSecondLevelPage),
1386    Regular(RegularSecondLevelPage),
1387}
1388
1389impl SecondLevelPage {
1390    fn len(&self) -> u32 {
1391        match *self {
1392            SecondLevelPage::Regular(ref page) => page.entries_len as u32,
1393            SecondLevelPage::Compressed(ref page) => page.entries_len as u32,
1394        }
1395    }
1396}
1397
1398/// A Compact Unwind Info entry.
1399#[derive(Debug, Clone)]
1400pub struct CompactUnwindInfoEntry {
1401    /// The first instruction this entry covers.
1402    pub instruction_address: u32,
1403    /// How many addresses this entry covers.
1404    pub len: u32,
1405    /// The opcode for this entry.
1406    opcode: Opcode,
1407}
1408
1409impl CompactUnwindInfoEntry {
1410    /// Gets cfi instructions associated with this entry.
1411    pub fn instructions(&self, iter: &CompactUnwindInfoIter) -> CompactUnwindOp {
1412        self.opcode.instructions(iter)
1413    }
1414}
1415
1416/// A Compact Unwinding Operation
1417#[derive(Debug)]
1418pub enum CompactUnwindOp {
1419    /// The instructions can be described with simple CFI operations.
1420    CfiOps(CompactCfiOpIter),
1421    /// Instructions can't be encoded by Compact Unwinding, but an FDE
1422    /// with real DWARF CFI instructions is stored in the eh_frame section.
1423    UseDwarfFde {
1424        /// The offset in the eh_frame section where the FDE is.
1425        offset_in_eh_frame: u32,
1426    },
1427    /// Nothing to do (may be unimplemented features or an unknown encoding)
1428    None,
1429}
1430
1431/// Minimal set of CFI ops needed to express Compact Unwinding semantics:
1432#[derive(Debug, Clone, PartialEq, Eq)]
1433pub enum CompactCfiOp {
1434    /// The value of `dest_reg` is *stored at* `src_reg + offset_from_src`.
1435    RegisterAt {
1436        /// Destination
1437        dest_reg: CompactCfiRegister,
1438        /// Source
1439        src_reg: CompactCfiRegister,
1440        /// Offset
1441        offset_from_src: i32,
1442    },
1443    /// The value of `dest_reg` *is* `src_reg + offset_from_src`.
1444    RegisterIs {
1445        /// Destination
1446        dest_reg: CompactCfiRegister,
1447        /// Source
1448        src_reg: CompactCfiRegister,
1449        /// Offset
1450        offset_from_src: i32,
1451    },
1452}
1453
1454#[derive(Debug, Clone)]
1455enum X86UnwindingMode {
1456    RbpFrame,
1457    StackImmediate,
1458    StackIndirect,
1459    Dwarf,
1460}
1461
1462#[derive(Debug, Clone)]
1463enum Arm64UnwindingMode {
1464    Frameless,
1465    Dwarf,
1466    Frame,
1467}
1468
1469#[derive(Debug, Clone)]
1470struct Opcode(u32);
1471
1472// Arch-generic stuff
1473impl Opcode {
1474    fn instructions(&self, iter: &CompactUnwindInfoIter) -> CompactUnwindOp {
1475        match iter.arch {
1476            Arch::X86 | Arch::X64 => self.x86_instructions(iter),
1477            Arch::Arm64 => self.arm64_instructions(iter),
1478            _ => CompactUnwindOp::None,
1479        }
1480    }
1481
1482    fn pointer_size(&self, iter: &CompactUnwindInfoIter) -> u32 {
1483        match iter.arch {
1484            Arch::X86 => 4,
1485            Arch::X64 => 8,
1486            Arch::Arm64 => 8,
1487            _ => unimplemented!(),
1488        }
1489    }
1490
1491    /*
1492    // potentially needed for future work:
1493
1494    fn is_start(&self) -> bool {
1495        let offset = 32 - 1;
1496        (self.0 & (1 << offset)) != 0
1497    }
1498    fn has_lsda(&self) -> bool{
1499        let offset = 32 - 2;
1500        (self.0 & (1 << offset)) != 0
1501    }
1502    fn personality_index(&self) -> u32 {
1503        let offset = 32 - 4;
1504        (self.0 >> offset) & 0b11
1505    }
1506    */
1507}
1508
1509// x86/x64 implementation
1510impl Opcode {
1511    fn x86_instructions(&self, iter: &CompactUnwindInfoIter) -> CompactUnwindOp {
1512        let pointer_size = self.pointer_size(iter) as i32;
1513        match self.x86_mode() {
1514            Some(X86UnwindingMode::RbpFrame) => {
1515                // This function has the standard function prelude and rbp
1516                // has been preserved. Additionally, any callee-saved registers
1517                // that haven't been preserved (x86_rbp_registers) are saved on
1518                // the stack at x86_rbp_stack_offset.
1519                let mut ops = CompactCfiOpIter::new();
1520
1521                ops.push(CompactCfiOp::RegisterIs {
1522                    dest_reg: CompactCfiRegister::cfa(),
1523                    src_reg: CompactCfiRegister::frame_pointer(),
1524                    offset_from_src: 2 * pointer_size,
1525                });
1526                ops.push(CompactCfiOp::RegisterAt {
1527                    dest_reg: CompactCfiRegister::frame_pointer(),
1528                    src_reg: CompactCfiRegister::cfa(),
1529                    offset_from_src: -2 * pointer_size,
1530                });
1531                ops.push(CompactCfiOp::RegisterAt {
1532                    dest_reg: CompactCfiRegister::instruction_pointer(),
1533                    src_reg: CompactCfiRegister::cfa(),
1534                    offset_from_src: -pointer_size,
1535                });
1536
1537                // This implementation here is in line with whatever llvm does here:
1538                // https://github.com/llvm/llvm-project/blob/d21a35ac0a958fd4cff0b8f424a2706b8785b89d/lldb/source/Symbol/CompactUnwindInfo.cpp#L766-L788
1539
1540                // These offsets are relative to the frame pointer, but
1541                // cfi prefers things to be relative to the cfa, so apply
1542                // the same offset here too.
1543                let offset = self.x86_rbp_stack_offset() as i32 + 2;
1544                // Offset advances even if there's no register here
1545                for (i, reg) in self.x86_rbp_registers().iter().enumerate() {
1546                    if let Some(reg) = *reg {
1547                        ops.push(CompactCfiOp::RegisterAt {
1548                            dest_reg: reg,
1549                            src_reg: CompactCfiRegister::cfa(),
1550                            offset_from_src: -(offset - i as i32) * pointer_size,
1551                        });
1552                    }
1553                }
1554                CompactUnwindOp::CfiOps(ops.into_iter())
1555            }
1556            Some(X86UnwindingMode::StackImmediate) => {
1557                // This function doesn't have the standard rbp-based prelude,
1558                // but we know how large its stack frame is (x86_frameless_stack_size),
1559                // and any callee-saved registers that haven't been preserved are
1560                // saved *immediately* after the location at rip.
1561
1562                let mut ops = CompactCfiOpIter::new();
1563
1564                let stack_size = self.x86_frameless_stack_size();
1565                ops.push(CompactCfiOp::RegisterIs {
1566                    dest_reg: CompactCfiRegister::cfa(),
1567                    src_reg: CompactCfiRegister::stack_pointer(),
1568                    offset_from_src: stack_size as i32 * pointer_size,
1569                });
1570                ops.push(CompactCfiOp::RegisterAt {
1571                    dest_reg: CompactCfiRegister::instruction_pointer(),
1572                    src_reg: CompactCfiRegister::cfa(),
1573                    offset_from_src: -pointer_size,
1574                });
1575
1576                let mut offset = 2;
1577                // offset only advances if there's a register here.
1578                // also note registers are in reverse order.
1579                for reg in self.x86_frameless_registers().iter().rev() {
1580                    if let Some(reg) = *reg {
1581                        ops.push(CompactCfiOp::RegisterAt {
1582                            dest_reg: reg,
1583                            src_reg: CompactCfiRegister::cfa(),
1584                            offset_from_src: -offset * pointer_size,
1585                        });
1586                        offset += 1;
1587                    }
1588                }
1589                CompactUnwindOp::CfiOps(ops.into_iter())
1590            }
1591            Some(X86UnwindingMode::StackIndirect) => {
1592                // TODO: implement this? Perhaps there is no reasonable implementation
1593                // since this involves parsing a value out of a machine instruction
1594                // in the binary? Or can we just do that work here and it just
1595                // becomes a constant in the CFI output?
1596                //
1597                // Either way it's not urgent, since this mode is only needed for
1598                // stack frames that are bigger than ~2KB.
1599                CompactUnwindOp::None
1600            }
1601            Some(X86UnwindingMode::Dwarf) => {
1602                // Oops! It was in the eh_frame all along.
1603
1604                let offset_in_eh_frame = self.x86_dwarf_fde();
1605                CompactUnwindOp::UseDwarfFde { offset_in_eh_frame }
1606            }
1607            None => CompactUnwindOp::None,
1608        }
1609    }
1610
1611    fn x86_mode(&self) -> Option<X86UnwindingMode> {
1612        const X86_MODE_MASK: u32 = 0x0F00_0000;
1613        const X86_MODE_RBP_FRAME: u32 = 0x0100_0000;
1614        const X86_MODE_STACK_IMMD: u32 = 0x0200_0000;
1615        const X86_MODE_STACK_IND: u32 = 0x0300_0000;
1616        const X86_MODE_DWARF: u32 = 0x0400_0000;
1617
1618        let masked = self.0 & X86_MODE_MASK;
1619
1620        match masked {
1621            X86_MODE_RBP_FRAME => Some(X86UnwindingMode::RbpFrame),
1622            X86_MODE_STACK_IMMD => Some(X86UnwindingMode::StackImmediate),
1623            X86_MODE_STACK_IND => Some(X86UnwindingMode::StackIndirect),
1624            X86_MODE_DWARF => Some(X86UnwindingMode::Dwarf),
1625            _ => None,
1626        }
1627    }
1628
1629    fn x86_rbp_registers(&self) -> [Option<CompactCfiRegister>; 5] {
1630        let mask = 0b111;
1631        [
1632            CompactCfiRegister::from_x86_encoded(self.0 & mask),
1633            CompactCfiRegister::from_x86_encoded((self.0 >> 3) & mask),
1634            CompactCfiRegister::from_x86_encoded((self.0 >> 6) & mask),
1635            CompactCfiRegister::from_x86_encoded((self.0 >> 9) & mask),
1636            CompactCfiRegister::from_x86_encoded((self.0 >> 12) & mask),
1637        ]
1638    }
1639
1640    fn x86_rbp_stack_offset(&self) -> u32 {
1641        let offset = 32 - 8 - 8;
1642        (self.0 >> offset) & 0b1111_1111
1643    }
1644
1645    fn x86_frameless_stack_size(&self) -> u32 {
1646        let offset = 32 - 8 - 8;
1647        (self.0 >> offset) & 0b1111_1111
1648    }
1649
1650    fn x86_frameless_register_count(&self) -> u32 {
1651        let offset = 32 - 8 - 8 - 3 - 3;
1652        let register_count = (self.0 >> offset) & 0b111;
1653        if register_count > 6 {
1654            6
1655        } else {
1656            register_count
1657        }
1658    }
1659
1660    fn x86_frameless_registers(&self) -> [Option<CompactCfiRegister>; 6] {
1661        let mut permutation = self.0 & 0b11_1111_1111;
1662        let mut permunreg = [0; 6];
1663        let register_count = self.x86_frameless_register_count();
1664
1665        // I honestly haven't looked into what the heck this is doing, I
1666        // just copied this implementation from llvm since it honestly doesn't
1667        // matter much. Magically unpack 6 values from 10 bits!
1668        match register_count {
1669            6 => {
1670                permunreg[0] = permutation / 120; // 120 == 5!
1671                permutation -= permunreg[0] * 120;
1672                permunreg[1] = permutation / 24; // 24 == 4!
1673                permutation -= permunreg[1] * 24;
1674                permunreg[2] = permutation / 6; // 6 == 3!
1675                permutation -= permunreg[2] * 6;
1676                permunreg[3] = permutation / 2; // 2 == 2!
1677                permutation -= permunreg[3] * 2;
1678                permunreg[4] = permutation; // 1 == 1!
1679                permunreg[5] = 0;
1680            }
1681            5 => {
1682                permunreg[0] = permutation / 120;
1683                permutation -= permunreg[0] * 120;
1684                permunreg[1] = permutation / 24;
1685                permutation -= permunreg[1] * 24;
1686                permunreg[2] = permutation / 6;
1687                permutation -= permunreg[2] * 6;
1688                permunreg[3] = permutation / 2;
1689                permutation -= permunreg[3] * 2;
1690                permunreg[4] = permutation;
1691            }
1692            4 => {
1693                permunreg[0] = permutation / 60;
1694                permutation -= permunreg[0] * 60;
1695                permunreg[1] = permutation / 12;
1696                permutation -= permunreg[1] * 12;
1697                permunreg[2] = permutation / 3;
1698                permutation -= permunreg[2] * 3;
1699                permunreg[3] = permutation;
1700            }
1701            3 => {
1702                permunreg[0] = permutation / 20;
1703                permutation -= permunreg[0] * 20;
1704                permunreg[1] = permutation / 4;
1705                permutation -= permunreg[1] * 4;
1706                permunreg[2] = permutation;
1707            }
1708            2 => {
1709                permunreg[0] = permutation / 5;
1710                permutation -= permunreg[0] * 5;
1711                permunreg[1] = permutation;
1712            }
1713            1 => {
1714                permunreg[0] = permutation;
1715            }
1716            _ => {
1717                // Do nothing
1718            }
1719        }
1720
1721        let mut registers = [0u32; 6];
1722        let mut used = [false; 7];
1723        for i in 0..register_count {
1724            let mut renum = 0;
1725            for j in 1u32..7 {
1726                if !used[j as usize] {
1727                    if renum == permunreg[i as usize] {
1728                        registers[i as usize] = j;
1729                        used[j as usize] = true;
1730                        break;
1731                    }
1732                    renum += 1;
1733                }
1734            }
1735        }
1736        [
1737            CompactCfiRegister::from_x86_encoded(registers[0]),
1738            CompactCfiRegister::from_x86_encoded(registers[1]),
1739            CompactCfiRegister::from_x86_encoded(registers[2]),
1740            CompactCfiRegister::from_x86_encoded(registers[3]),
1741            CompactCfiRegister::from_x86_encoded(registers[4]),
1742            CompactCfiRegister::from_x86_encoded(registers[5]),
1743        ]
1744    }
1745
1746    fn x86_dwarf_fde(&self) -> u32 {
1747        self.0 & 0x00FF_FFFF
1748    }
1749    /*
1750    // potentially needed for future work:
1751
1752    fn x86_frameless_stack_adjust(&self) -> u32 {
1753        let offset = 32 - 8 - 8 - 3;
1754        (self.0 >> offset) & 0b111
1755    }
1756    */
1757}
1758
1759// ARM64 implementation
1760impl Opcode {
1761    fn arm64_mode(&self) -> Option<Arm64UnwindingMode> {
1762        const ARM64_MODE_MASK: u32 = 0x0F000000;
1763        const ARM64_MODE_FRAMELESS: u32 = 0x02000000;
1764        const ARM64_MODE_DWARF: u32 = 0x03000000;
1765        const ARM64_MODE_FRAME: u32 = 0x04000000;
1766
1767        let masked = self.0 & ARM64_MODE_MASK;
1768
1769        match masked {
1770            ARM64_MODE_FRAMELESS => Some(Arm64UnwindingMode::Frameless),
1771            ARM64_MODE_DWARF => Some(Arm64UnwindingMode::Dwarf),
1772            ARM64_MODE_FRAME => Some(Arm64UnwindingMode::Frame),
1773            _ => None,
1774        }
1775    }
1776
1777    fn arm64_instructions(&self, iter: &CompactUnwindInfoIter) -> CompactUnwindOp {
1778        let pointer_size = self.pointer_size(iter) as i32;
1779        match self.arm64_mode() {
1780            Some(Arm64UnwindingMode::Frameless) => {
1781                // This is a "frameless" leaf function. All there is to
1782                // do is pop the stack and move the return address from
1783                // the link register to the instruction pointer.
1784
1785                // Stack size is divided by 16.
1786                let stack_size = self.arm64_frameless_stack_size() * 16;
1787                let mut ops = CompactCfiOpIter::new();
1788
1789                ops.push(CompactCfiOp::RegisterIs {
1790                    dest_reg: CompactCfiRegister::cfa(),
1791                    src_reg: CompactCfiRegister::stack_pointer(),
1792                    offset_from_src: stack_size as i32,
1793                });
1794                ops.push(CompactCfiOp::RegisterIs {
1795                    dest_reg: CompactCfiRegister::instruction_pointer(),
1796                    src_reg: CompactCfiRegister::link_register(),
1797                    offset_from_src: 0,
1798                });
1799
1800                CompactUnwindOp::CfiOps(ops.into_iter())
1801            }
1802            Some(Arm64UnwindingMode::Dwarf) => {
1803                let offset_in_eh_frame = self.arm64_dwarf_fde();
1804                CompactUnwindOp::UseDwarfFde { offset_in_eh_frame }
1805            }
1806            Some(Arm64UnwindingMode::Frame) => {
1807                let mut ops = CompactCfiOpIter::new();
1808
1809                // This function has the standard ARM64 prologue, where
1810                // the frame pointer and instruction pointer are immediately
1811                // pushed as a pair onto the stack, and then the frame
1812                // pointer is updated to be the current stack pointer.
1813                ops.push(CompactCfiOp::RegisterIs {
1814                    dest_reg: CompactCfiRegister::cfa(),
1815                    src_reg: CompactCfiRegister::frame_pointer(),
1816                    offset_from_src: 2 * pointer_size,
1817                });
1818                ops.push(CompactCfiOp::RegisterAt {
1819                    dest_reg: CompactCfiRegister::frame_pointer(),
1820                    src_reg: CompactCfiRegister::cfa(),
1821                    offset_from_src: -2 * pointer_size,
1822                });
1823                ops.push(CompactCfiOp::RegisterAt {
1824                    dest_reg: CompactCfiRegister::instruction_pointer(),
1825                    src_reg: CompactCfiRegister::cfa(),
1826                    offset_from_src: -pointer_size,
1827                });
1828
1829                // Then the X19-X28 registers that need to be restored
1830                // are pushed onto the stack in pairs in ascending order.
1831                // This is followed by the D8-D15 registers that need
1832                // to be restored.
1833
1834                // The registers that were pushed are just represented
1835                // by a simple bit set covering bits 0-9 (**low-to-high**):
1836                let num_reg_pairs = 9;
1837                let mut pairs_saved = 0;
1838                for pair_num in 0..num_reg_pairs {
1839                    let has_pair = (self.0 & (1 << pair_num)) != 0;
1840                    if has_pair {
1841                        // Although ARM64 wants to restore these registers in pairs,
1842                        // we specify them individually since CFI likes it that way.
1843                        let first_reg = ARM64_REG_BASE + pair_num * 2;
1844                        let second_reg = ARM64_REG_BASE + pair_num * 2 + 1;
1845
1846                        ops.push(CompactCfiOp::RegisterAt {
1847                            dest_reg: CompactCfiRegister::from_arm64_encoded(first_reg),
1848                            src_reg: CompactCfiRegister::cfa(),
1849                            offset_from_src: (-2 * pairs_saved - 3) * pointer_size,
1850                        });
1851                        ops.push(CompactCfiOp::RegisterAt {
1852                            dest_reg: CompactCfiRegister::from_arm64_encoded(second_reg),
1853                            src_reg: CompactCfiRegister::cfa(),
1854                            offset_from_src: (-2 * pairs_saved - 4) * pointer_size,
1855                        });
1856                        pairs_saved += 1;
1857                    }
1858                }
1859
1860                CompactUnwindOp::CfiOps(ops.into_iter())
1861            }
1862            None => CompactUnwindOp::None,
1863        }
1864    }
1865
1866    fn arm64_frameless_stack_size(&self) -> u32 {
1867        let offset = 32 - 8 - 12;
1868        (self.0 >> offset) & 0xFFF
1869    }
1870
1871    fn arm64_dwarf_fde(&self) -> u32 {
1872        self.0 & 0x00FF_FFFF
1873    }
1874}
1875
1876// The x86 encoding includes the frame pointer as value 6, while
1877// the ARM64 encoding doesn't encode it (but needs it for output).
1878// To avoid the register number of the frame pointer being dependent
1879// on the target architecture, we start ARM64 register numbers
1880// *after* 6, so that value can still be used. This is potentially
1881// needlessly cute, but it makes usage a bit cleaner.
1882const REG_FRAME: u8 = 6;
1883const ARM64_REG_BASE: u32 = REG_FRAME as u32 + 1;
1884// These registers aren't ever encoded explicitly, so we make
1885// up some arbitrary values for reporting them in our outputs.
1886const REG_LINK: u8 = 252;
1887const REG_INSTRUCTION: u8 = 253;
1888const REG_STACK: u8 = 254;
1889const REG_CFA: u8 = 255;
1890
1891/// A register for a [`CompactCfiOp`], as used by Compact Unwinding.
1892///
1893/// You should just treat this opaquely and use its methods to make sense of it.
1894#[derive(Debug, Copy, Clone, PartialEq, Eq)]
1895pub struct CompactCfiRegister(u8);
1896
1897impl CompactCfiRegister {
1898    fn from_x86_encoded(val: u32) -> Option<Self> {
1899        if (1..=6).contains(&val) {
1900            Some(CompactCfiRegister(val as u8))
1901        } else {
1902            None
1903        }
1904    }
1905
1906    fn from_arm64_encoded(val: u32) -> Self {
1907        // Assert shouldn't happen as we're processing trusted input here, but
1908        // good to validate this in tests.
1909        debug_assert!((ARM64_REG_BASE..ARM64_REG_BASE + 18).contains(&val));
1910        CompactCfiRegister(val as u8)
1911    }
1912
1913    /// Whether this register is the cfa register.
1914    pub fn is_cfa(&self) -> bool {
1915        self.0 == REG_CFA
1916    }
1917
1918    /// The name of this register that cfi wants.
1919    pub fn name(&self, iter: &CompactUnwindInfoIter) -> Option<&'static str> {
1920        match self.0 {
1921            REG_CFA => Some("cfa"),
1922            other => name_of_other_reg(other, iter),
1923        }
1924    }
1925
1926    /// Gets the CFA register (Canonical Frame Address) -- the frame pointer (e.g. rbp)
1927    pub fn cfa() -> Self {
1928        Self(REG_CFA)
1929    }
1930
1931    /// Gets the register for the frame pointer (e.g. rbp).
1932    pub fn frame_pointer() -> Self {
1933        CompactCfiRegister(REG_FRAME)
1934    }
1935
1936    /// Gets the register for the instruction pointer (e.g. rip).
1937    pub fn instruction_pointer() -> Self {
1938        CompactCfiRegister(REG_INSTRUCTION)
1939    }
1940
1941    /// Gets the register for the stack pointer (e.g. rsp).
1942    pub fn stack_pointer() -> Self {
1943        CompactCfiRegister(REG_STACK)
1944    }
1945
1946    /// Get the ARM64 link register (x30).
1947    pub fn link_register() -> Self {
1948        CompactCfiRegister(REG_LINK)
1949    }
1950}
1951
1952fn name_of_other_reg(reg: u8, iter: &CompactUnwindInfoIter) -> Option<&'static str> {
1953    match iter.arch {
1954        Arch::X86 => match reg {
1955            0 => None,
1956            1 => Some("ebx"),
1957            2 => Some("ecx"),
1958            3 => Some("edx"),
1959            4 => Some("edi"),
1960            5 => Some("esi"),
1961            6 => Some("ebp"),
1962
1963            // Not part of the compact format, but needed to describe opcode behaviours
1964            REG_INSTRUCTION => Some("eip"),
1965            REG_STACK => Some("esp"),
1966
1967            _ => None,
1968        },
1969        Arch::X64 => match reg {
1970            0 => None,
1971            1 => Some("rbx"),
1972            2 => Some("r12"),
1973            3 => Some("r13"),
1974            4 => Some("r14"),
1975            5 => Some("r15"),
1976            6 => Some("rbp"),
1977
1978            // Not part of the compact format, but needed to describe opcode behaviours
1979            REG_INSTRUCTION => Some("rip"),
1980            REG_STACK => Some("rsp"),
1981            _ => None,
1982        },
1983        Arch::Arm64 => {
1984            match reg {
1985                7 => Some("x19"),
1986                8 => Some("x20"),
1987                9 => Some("x21"),
1988                10 => Some("x22"),
1989                11 => Some("x23"),
1990                12 => Some("x24"),
1991                13 => Some("x25"),
1992                14 => Some("x26"),
1993                15 => Some("x27"),
1994                16 => Some("x28"),
1995
1996                17 => Some("d8"),
1997                18 => Some("d9"),
1998                19 => Some("d10"),
1999                20 => Some("d11"),
2000                21 => Some("d12"),
2001                22 => Some("d13"),
2002                23 => Some("d14"),
2003                24 => Some("d15"),
2004
2005                // Not part of the compact format, but needed to describe opcode behaviours
2006                REG_FRAME => Some("x29"),
2007                REG_LINK => Some("x30"),
2008                REG_INSTRUCTION => Some("pc"),
2009                REG_STACK => Some("sp"),
2010                _ => None,
2011            }
2012        }
2013        _ => None,
2014    }
2015}
2016
2017/// An iterator over the [`CompactCfiOp`]s yielded by [`CompactUnwindOp::CfiOps`].
2018#[derive(Debug, Clone)]
2019pub struct CompactCfiOpIter {
2020    // This is just a hacky impl of an ArrayVec to avoid depending on it, and
2021    // avoid allocating. This ends up storing 20 u64's if enum optimizations
2022    // work the way I expect.
2023    items: [Option<CompactCfiOp>; 21],
2024    cur_idx: usize,
2025}
2026
2027impl CompactCfiOpIter {
2028    fn new() -> Self {
2029        Self {
2030            items: [
2031                None, None, None, None, None, None, None, None, None, None, None, None, None, None,
2032                None, None, None, None, None, None, None,
2033            ],
2034            cur_idx: 0,
2035        }
2036    }
2037
2038    fn push(&mut self, item: CompactCfiOp) {
2039        // Will panic if we overflow, but that's fine, the buffer should be
2040        // sized to fit any payload we need, since that's bounded.
2041        self.items[self.cur_idx] = Some(item);
2042        self.cur_idx += 1;
2043    }
2044
2045    /// Resets cur_idx for this to be used as an iterator,
2046    /// because I'm too lazy to make *another* type for this.
2047    fn into_iter(mut self) -> Self {
2048        self.cur_idx = 0;
2049        self
2050    }
2051}
2052
2053impl Iterator for CompactCfiOpIter {
2054    type Item = CompactCfiOp;
2055    fn next(&mut self) -> Option<Self::Item> {
2056        if self.cur_idx < self.items.len() {
2057            let old_idx = self.cur_idx;
2058            self.cur_idx += 1;
2059            self.items[old_idx].take()
2060        } else {
2061            None
2062        }
2063    }
2064}
2065
2066#[cfg(test)]
2067mod test {
2068
2069    use super::{
2070        CompactCfiOp, CompactCfiRegister, CompactUnwindInfoIter, CompactUnwindOp, Opcode,
2071        ARM64_REG_BASE,
2072    };
2073    use crate::macho::MachError;
2074    use scroll::Pwrite;
2075    use symbolic_common::Arch;
2076
2077    // All Second-level pages have this much memory to work with, let's stick to that
2078    const PAGE_SIZE: usize = 4096;
2079    const REGULAR_PAGE_HEADER_LEN: usize = 8;
2080    const COMPRESSED_PAGE_HEADER_LEN: usize = 12;
2081    const MAX_REGULAR_SECOND_LEVEL_ENTRIES: usize = (PAGE_SIZE - REGULAR_PAGE_HEADER_LEN) / 8;
2082    const MAX_COMPRESSED_SECOND_LEVEL_ENTRIES: usize = (PAGE_SIZE - COMPRESSED_PAGE_HEADER_LEN) / 4;
2083    const MAX_COMPRESSED_SECOND_LEVEL_ENTRIES_WITH_MAX_LOCALS: usize =
2084        (PAGE_SIZE - COMPRESSED_PAGE_HEADER_LEN - MAX_LOCAL_OPCODES_LEN as usize * 4) / 4;
2085
2086    // Mentioned by headers, but seems to have no real significance
2087    const MAX_GLOBAL_OPCODES_LEN: u32 = 127;
2088    const MAX_LOCAL_OPCODES_LEN: u32 = 128;
2089
2090    // Only 2 bits are allocated to this index
2091    const MAX_PERSONALITIES_LEN: u32 = 4;
2092
2093    const X86_MODE_RBP_FRAME: u32 = 0x0100_0000;
2094    const X86_MODE_STACK_IMMD: u32 = 0x0200_0000;
2095    const X86_MODE_STACK_IND: u32 = 0x0300_0000;
2096    const X86_MODE_DWARF: u32 = 0x0400_0000;
2097
2098    const ARM64_MODE_FRAMELESS: u32 = 0x02000000;
2099    const ARM64_MODE_DWARF: u32 = 0x03000000;
2100    const ARM64_MODE_FRAME: u32 = 0x04000000;
2101
2102    const REGULAR_PAGE_KIND: u32 = 2;
2103    const COMPRESSED_PAGE_KIND: u32 = 3;
2104
2105    fn align(offset: u32, align: u32) -> u32 {
2106        offset.div_ceil(align) * align
2107    }
2108
2109    fn pack_x86_rbp_registers(regs: [u8; 5]) -> u32 {
2110        let mut result: u32 = 0;
2111        let base_offset = 0;
2112        for (idx, &reg) in regs.iter().enumerate() {
2113            assert!(reg <= 6);
2114            result |= (reg as u32 & 0b111) << (base_offset + idx * 3);
2115        }
2116
2117        result
2118    }
2119    fn pack_x86_stackless_registers(num_regs: u32, registers: [u8; 6]) -> u32 {
2120        for &reg in &registers {
2121            assert!(reg <= 6);
2122        }
2123
2124        // Also copied from llvm implementation
2125        let mut renumregs = [0u32; 6];
2126        for i in 6 - num_regs..6 {
2127            let mut countless = 0;
2128            for j in 6 - num_regs..i {
2129                if registers[j as usize] < registers[i as usize] {
2130                    countless += 1;
2131                }
2132            }
2133            renumregs[i as usize] = registers[i as usize] as u32 - countless - 1;
2134        }
2135        let mut permutation_encoding: u32 = 0;
2136        match num_regs {
2137            6 => {
2138                permutation_encoding |= 120 * renumregs[0]
2139                    + 24 * renumregs[1]
2140                    + 6 * renumregs[2]
2141                    + 2 * renumregs[3]
2142                    + renumregs[4];
2143            }
2144            5 => {
2145                permutation_encoding |= 120 * renumregs[1]
2146                    + 24 * renumregs[2]
2147                    + 6 * renumregs[3]
2148                    + 2 * renumregs[4]
2149                    + renumregs[5];
2150            }
2151            4 => {
2152                permutation_encoding |=
2153                    60 * renumregs[2] + 12 * renumregs[3] + 3 * renumregs[4] + renumregs[5];
2154            }
2155            3 => {
2156                permutation_encoding |= 20 * renumregs[3] + 4 * renumregs[4] + renumregs[5];
2157            }
2158            2 => {
2159                permutation_encoding |= 5 * renumregs[4] + renumregs[5];
2160            }
2161            1 => {
2162                permutation_encoding |= renumregs[5];
2163            }
2164            0 => {
2165                // do nothing
2166            }
2167            _ => unreachable!(),
2168        }
2169        permutation_encoding
2170    }
2171    fn assert_opcodes_match<A, B>(mut a: A, mut b: B)
2172    where
2173        A: Iterator<Item = CompactCfiOp>,
2174        B: Iterator<Item = CompactCfiOp>,
2175    {
2176        while let (Some(a_op), Some(b_op)) = (a.next(), b.next()) {
2177            assert_eq!(a_op, b_op);
2178        }
2179        assert!(b.next().is_none());
2180        assert!(a.next().is_none());
2181    }
2182
2183    #[test]
2184    // Make sure we error out for an unknown version of this section
2185    fn test_compact_unknown_version() -> Result<(), MachError> {
2186        {
2187            let offset = &mut 0;
2188            let mut section = vec![0u8; 1024];
2189
2190            // Version 0 doesn't exist
2191            section.gwrite(0u32, offset)?;
2192
2193            assert!(CompactUnwindInfoIter::new(&section, true, Arch::Amd64).is_err());
2194        }
2195
2196        {
2197            let offset = &mut 0;
2198            let mut section = vec![0; 1024];
2199
2200            // Version 2 doesn't exist
2201            section.gwrite(2u32, offset)?;
2202            assert!(CompactUnwindInfoIter::new(&section, true, Arch::X86).is_err());
2203        }
2204        Ok(())
2205    }
2206
2207    #[test]
2208    // Make sure we handle a section with no entries reasonably
2209    fn test_compact_empty() -> Result<(), MachError> {
2210        let offset = &mut 0;
2211        let mut section = vec![0u8; 1024];
2212
2213        // Just set the version, everything else is 0
2214        section.gwrite(1u32, offset)?;
2215
2216        let mut iter = CompactUnwindInfoIter::new(&section, true, Arch::Amd64)?;
2217        assert!(iter.next()?.is_none());
2218        assert!(iter.next()?.is_none());
2219
2220        Ok(())
2221    }
2222
2223    #[test]
2224    // Create a reasonable structure that has both kinds of second-level pages
2225    // and poke at some corner cases. opcode values are handled opaquely, just
2226    // checking that they roundtrip correctly.
2227    fn test_compact_structure() -> Result<(), MachError> {
2228        let global_opcodes: Vec<u32> = vec![0, 2, 4, 7];
2229        assert!(global_opcodes.len() <= MAX_GLOBAL_OPCODES_LEN as usize);
2230        let personalities: Vec<u32> = vec![7, 12, 3];
2231        assert!(personalities.len() <= MAX_PERSONALITIES_LEN as usize);
2232
2233        // instruction_address, lsda_address
2234        let lsdas: Vec<(u32, u32)> = vec![(0, 1), (7, 3), (18, 5)];
2235
2236        // first_instruction_address, second_page_offset, lsda_offset
2237        let mut first_entries: Vec<(u32, u32, u32)> = vec![];
2238
2239        /////////////////////////////////////////////////
2240        //          Values we will be testing          //
2241        /////////////////////////////////////////////////
2242
2243        // page entries are instruction_address, opcode
2244        let mut regular_entries: Vec<Vec<(u32, u32)>> = vec![
2245            // Some data
2246            vec![(1, 7), (3, 8), (6, 10), (10, 4)],
2247            vec![(20, 5), (21, 2), (24, 7), (25, 0)],
2248            // Page len 1
2249            vec![(29, 8)],
2250        ];
2251        let mut compressed_entries: Vec<Vec<(u32, u32)>> = vec![
2252            // Some data
2253            vec![(10001, 7), (10003, 8), (10006, 10), (10010, 4)],
2254            vec![(10020, 5), (10021, 2), (10024, 7), (10025, 0)],
2255            // Page len 1
2256            vec![(10029, 8)],
2257        ];
2258
2259        // max-len regular page
2260        let mut temp = vec![];
2261        let base_instruction = 100;
2262        for i in 0..MAX_REGULAR_SECOND_LEVEL_ENTRIES {
2263            temp.push((base_instruction + i as u32, i as u32))
2264        }
2265        regular_entries.push(temp);
2266
2267        // max-len compact page (only global entries)
2268        let mut temp = vec![];
2269        let base_instruction = 10100;
2270        for i in 0..MAX_COMPRESSED_SECOND_LEVEL_ENTRIES {
2271            temp.push((base_instruction + i as u32, 2))
2272        }
2273        compressed_entries.push(temp);
2274
2275        // max-len compact page (max local entries)
2276        let mut temp = vec![];
2277        let base_instruction = 14100;
2278        for i in 0..MAX_COMPRESSED_SECOND_LEVEL_ENTRIES_WITH_MAX_LOCALS {
2279            temp.push((
2280                base_instruction + i as u32,
2281                100 + (i as u32 % MAX_LOCAL_OPCODES_LEN),
2282            ))
2283        }
2284        compressed_entries.push(temp);
2285
2286        ///////////////////////////////////////////////////////
2287        //               Compute the format                  //
2288        ///////////////////////////////////////////////////////
2289
2290        // First temporarily write the second level pages into other buffers
2291        let mut second_level_pages: Vec<[u8; PAGE_SIZE]> = vec![];
2292        for page in &regular_entries {
2293            second_level_pages.push([0; PAGE_SIZE]);
2294            let buf = second_level_pages.last_mut().unwrap();
2295            let buf_offset = &mut 0;
2296
2297            // kind
2298            buf.gwrite(REGULAR_PAGE_KIND, buf_offset)?;
2299
2300            // entry array offset + len
2301            buf.gwrite(REGULAR_PAGE_HEADER_LEN as u16, buf_offset)?;
2302            buf.gwrite(page.len() as u16, buf_offset)?;
2303
2304            for &(insruction_address, opcode) in page {
2305                buf.gwrite(insruction_address, buf_offset)?;
2306                buf.gwrite(opcode, buf_offset)?;
2307            }
2308        }
2309
2310        for page in &compressed_entries {
2311            second_level_pages.push([0; PAGE_SIZE]);
2312            let buf = second_level_pages.last_mut().unwrap();
2313            let buf_offset = &mut 0;
2314
2315            // Compute a palete for local opcodes
2316            // (this is semi-quadratic in that it can do 255 * 1000 iterations, it's fine)
2317            let mut local_opcodes = vec![];
2318            let mut indices = vec![];
2319            for &(_, opcode) in page {
2320                if let Some((idx, _)) = global_opcodes
2321                    .iter()
2322                    .enumerate()
2323                    .find(|&(_, &global_opcode)| global_opcode == opcode)
2324                {
2325                    indices.push(idx);
2326                } else if let Some((idx, _)) = local_opcodes
2327                    .iter()
2328                    .enumerate()
2329                    .find(|&(_, &global_opcode)| global_opcode == opcode)
2330                {
2331                    indices.push(global_opcodes.len() + idx);
2332                } else {
2333                    local_opcodes.push(opcode);
2334                    indices.push(global_opcodes.len() + local_opcodes.len() - 1);
2335                }
2336            }
2337            assert!(local_opcodes.len() <= MAX_LOCAL_OPCODES_LEN as usize);
2338
2339            let entries_offset = COMPRESSED_PAGE_HEADER_LEN + local_opcodes.len() * 4;
2340            let first_address = page.first().unwrap().0;
2341            // kind
2342            buf.gwrite(COMPRESSED_PAGE_KIND, buf_offset)?;
2343
2344            // entry array offset + len
2345            buf.gwrite(entries_offset as u16, buf_offset)?;
2346            buf.gwrite(page.len() as u16, buf_offset)?;
2347
2348            // local opcodes array + len
2349            buf.gwrite(COMPRESSED_PAGE_HEADER_LEN as u16, buf_offset)?;
2350            buf.gwrite(local_opcodes.len() as u16, buf_offset)?;
2351
2352            for opcode in local_opcodes {
2353                buf.gwrite(opcode, buf_offset)?;
2354            }
2355            for (&(instruction_address, _opcode), idx) in page.iter().zip(indices) {
2356                let compressed_address = (instruction_address - first_address) & 0x00FF_FFFF;
2357                let compressed_idx = (idx as u32) << 24;
2358                assert_eq!(compressed_address + first_address, instruction_address);
2359                assert_eq!(idx & 0xFFFF_FF00, 0);
2360
2361                let compressed_opcode: u32 = compressed_address | compressed_idx;
2362                buf.gwrite(compressed_opcode, buf_offset)?;
2363            }
2364        }
2365
2366        let header_size: u32 = 4 * 7;
2367        let global_opcodes_offset: u32 = header_size;
2368        let personalities_offset: u32 = global_opcodes_offset + global_opcodes.len() as u32 * 4;
2369        let first_entries_offset: u32 = personalities_offset + personalities.len() as u32 * 4;
2370        let lsdas_offset: u32 = first_entries_offset + (second_level_pages.len() + 1) as u32 * 12;
2371        let second_level_pages_offset: u32 =
2372            align(lsdas_offset + lsdas.len() as u32 * 8, PAGE_SIZE as u32);
2373        let final_size: u32 =
2374            second_level_pages_offset + second_level_pages.len() as u32 * PAGE_SIZE as u32;
2375
2376        // Validate that we have strictly monotonically increasing addresses,
2377        // and build the first-level entries.
2378        let mut cur_address = 0;
2379        for (idx, page) in regular_entries
2380            .iter()
2381            .chain(compressed_entries.iter())
2382            .enumerate()
2383        {
2384            let first_address = page.first().unwrap().0;
2385            let page_offset = second_level_pages_offset + PAGE_SIZE as u32 * idx as u32;
2386            first_entries.push((first_address, page_offset, lsdas_offset));
2387
2388            for &(address, _) in page {
2389                assert!(address > cur_address);
2390                cur_address = address;
2391            }
2392        }
2393        assert_eq!(second_level_pages.len(), first_entries.len());
2394        // Push the null page into our first_entries
2395        first_entries.push((cur_address + 1, 0, 0));
2396
2397        ///////////////////////////////////////////////////////
2398        //                  Emit the binary                  //
2399        ///////////////////////////////////////////////////////
2400
2401        let offset = &mut 0;
2402        let mut section = vec![0u8; final_size as usize];
2403
2404        // Write the header
2405        section.gwrite(1u32, offset)?;
2406
2407        section.gwrite(global_opcodes_offset, offset)?;
2408        section.gwrite(global_opcodes.len() as u32, offset)?;
2409
2410        section.gwrite(personalities_offset, offset)?;
2411        section.gwrite(personalities.len() as u32, offset)?;
2412
2413        section.gwrite(first_entries_offset, offset)?;
2414        section.gwrite(first_entries.len() as u32, offset)?;
2415
2416        // Write the arrays
2417        assert_eq!(*offset as u32, global_opcodes_offset);
2418        for &opcode in &global_opcodes {
2419            section.gwrite(opcode, offset)?;
2420        }
2421        assert_eq!(*offset as u32, personalities_offset);
2422        for &personality in &personalities {
2423            section.gwrite(personality, offset)?;
2424        }
2425        assert_eq!(*offset as u32, first_entries_offset);
2426        for &entry in &first_entries {
2427            section.gwrite(entry.0, offset)?;
2428            section.gwrite(entry.1, offset)?;
2429            section.gwrite(entry.2, offset)?;
2430        }
2431        assert_eq!(*offset as u32, lsdas_offset);
2432        for &lsda in &lsdas {
2433            section.gwrite(lsda.0, offset)?;
2434            section.gwrite(lsda.1, offset)?;
2435        }
2436
2437        // Write the pages
2438        *offset = second_level_pages_offset as usize;
2439        for second_level_page in &second_level_pages {
2440            for byte in second_level_page {
2441                section.gwrite(byte, offset)?;
2442            }
2443        }
2444
2445        ///////////////////////////////////////////////////////
2446        //         Test that everything roundtrips           //
2447        ///////////////////////////////////////////////////////
2448
2449        let mut iter = CompactUnwindInfoIter::new(&section, true, Arch::Amd64)?;
2450        let mut orig_entries = regular_entries
2451            .iter()
2452            .chain(compressed_entries.iter())
2453            .flatten();
2454
2455        while let (Some(entry), Some((orig_address, orig_opcode))) =
2456            (iter.next()?, orig_entries.next())
2457        {
2458            assert_eq!(entry.instruction_address, *orig_address);
2459            assert_eq!(entry.opcode.0, *orig_opcode);
2460        }
2461
2462        // Confirm both were completely exhausted at the same time
2463        assert!(iter.next()?.is_none());
2464        assert_eq!(orig_entries.next(), None);
2465
2466        Ok(())
2467    }
2468
2469    #[test]
2470    fn test_compact_opcodes_x86() -> Result<(), MachError> {
2471        // Make an empty but valid section to initialize the CompactUnwindInfoIter
2472        let pointer_size = 4;
2473        let frameless_reg_count_offset = 32 - 8 - 8 - 3 - 3;
2474        let stack_size_offset = 32 - 8 - 8;
2475        let offset = &mut 0;
2476        let mut section = vec![0u8; 1024];
2477        // Just set the version, everything else is 0
2478        section.gwrite(1u32, offset)?;
2479
2480        let iter = CompactUnwindInfoIter::new(&section, true, Arch::X86)?;
2481
2482        // Check that the null opcode is handled reasonably
2483        {
2484            let opcode = Opcode(0);
2485            assert!(matches!(opcode.instructions(&iter), CompactUnwindOp::None));
2486        }
2487
2488        // Check that dwarf opcodes work
2489        {
2490            let opcode = Opcode(X86_MODE_DWARF | 0x00123456);
2491            assert!(matches!(
2492                opcode.instructions(&iter),
2493                CompactUnwindOp::UseDwarfFde {
2494                    offset_in_eh_frame: 0x00123456
2495                }
2496            ));
2497        }
2498
2499        // Check that rbp opcodes work
2500        {
2501            // Simple, no general registers to restore
2502            let stack_size: i32 = 0xa1;
2503            let registers = [0, 0, 0, 0, 0];
2504            let opcode = Opcode(
2505                X86_MODE_RBP_FRAME
2506                    | pack_x86_rbp_registers(registers)
2507                    | ((stack_size as u32) << stack_size_offset),
2508            );
2509            let expected = vec![
2510                CompactCfiOp::RegisterIs {
2511                    dest_reg: CompactCfiRegister::cfa(),
2512                    src_reg: CompactCfiRegister::frame_pointer(),
2513                    offset_from_src: 2 * pointer_size,
2514                },
2515                CompactCfiOp::RegisterAt {
2516                    dest_reg: CompactCfiRegister::frame_pointer(),
2517                    src_reg: CompactCfiRegister::cfa(),
2518                    offset_from_src: -2 * pointer_size,
2519                },
2520                CompactCfiOp::RegisterAt {
2521                    dest_reg: CompactCfiRegister::instruction_pointer(),
2522                    src_reg: CompactCfiRegister::cfa(),
2523                    offset_from_src: -pointer_size,
2524                },
2525            ];
2526
2527            match opcode.instructions(&iter) {
2528                CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
2529                _ => unreachable!(),
2530            }
2531        }
2532        {
2533            // One general register to restore
2534            let stack_size: i32 = 0x13;
2535            let registers = [1, 0, 0, 0, 0];
2536            let opcode = Opcode(
2537                X86_MODE_RBP_FRAME
2538                    | pack_x86_rbp_registers(registers)
2539                    | ((stack_size as u32) << stack_size_offset),
2540            );
2541            let expected = vec![
2542                CompactCfiOp::RegisterIs {
2543                    dest_reg: CompactCfiRegister::cfa(),
2544                    src_reg: CompactCfiRegister::frame_pointer(),
2545                    offset_from_src: 2 * pointer_size,
2546                },
2547                CompactCfiOp::RegisterAt {
2548                    dest_reg: CompactCfiRegister::frame_pointer(),
2549                    src_reg: CompactCfiRegister::cfa(),
2550                    offset_from_src: -2 * pointer_size,
2551                },
2552                CompactCfiOp::RegisterAt {
2553                    dest_reg: CompactCfiRegister::instruction_pointer(),
2554                    src_reg: CompactCfiRegister::cfa(),
2555                    offset_from_src: -pointer_size,
2556                },
2557                CompactCfiOp::RegisterAt {
2558                    dest_reg: CompactCfiRegister::from_x86_encoded(1).unwrap(),
2559                    src_reg: CompactCfiRegister::cfa(),
2560                    offset_from_src: -(stack_size + 2) * pointer_size,
2561                },
2562            ];
2563
2564            match opcode.instructions(&iter) {
2565                CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
2566                _ => unreachable!(),
2567            }
2568        }
2569        {
2570            // All general register slots used
2571            let stack_size: i32 = 0xc2;
2572            let registers = [2, 3, 4, 5, 6];
2573            let opcode = Opcode(
2574                X86_MODE_RBP_FRAME
2575                    | pack_x86_rbp_registers(registers)
2576                    | ((stack_size as u32) << stack_size_offset),
2577            );
2578            let expected = vec![
2579                CompactCfiOp::RegisterIs {
2580                    dest_reg: CompactCfiRegister::cfa(),
2581                    src_reg: CompactCfiRegister::frame_pointer(),
2582                    offset_from_src: 2 * pointer_size,
2583                },
2584                CompactCfiOp::RegisterAt {
2585                    dest_reg: CompactCfiRegister::frame_pointer(),
2586                    src_reg: CompactCfiRegister::cfa(),
2587                    offset_from_src: -2 * pointer_size,
2588                },
2589                CompactCfiOp::RegisterAt {
2590                    dest_reg: CompactCfiRegister::instruction_pointer(),
2591                    src_reg: CompactCfiRegister::cfa(),
2592                    offset_from_src: -pointer_size,
2593                },
2594                CompactCfiOp::RegisterAt {
2595                    dest_reg: CompactCfiRegister::from_x86_encoded(2).unwrap(),
2596                    src_reg: CompactCfiRegister::cfa(),
2597                    offset_from_src: -(stack_size + 2) * pointer_size,
2598                },
2599                CompactCfiOp::RegisterAt {
2600                    dest_reg: CompactCfiRegister::from_x86_encoded(3).unwrap(),
2601                    src_reg: CompactCfiRegister::cfa(),
2602                    offset_from_src: -(stack_size + 2 - 1) * pointer_size,
2603                },
2604                CompactCfiOp::RegisterAt {
2605                    dest_reg: CompactCfiRegister::from_x86_encoded(4).unwrap(),
2606                    src_reg: CompactCfiRegister::cfa(),
2607                    offset_from_src: -(stack_size + 2 - 2) * pointer_size,
2608                },
2609                CompactCfiOp::RegisterAt {
2610                    dest_reg: CompactCfiRegister::from_x86_encoded(5).unwrap(),
2611                    src_reg: CompactCfiRegister::cfa(),
2612                    offset_from_src: -(stack_size + 2 - 3) * pointer_size,
2613                },
2614                CompactCfiOp::RegisterAt {
2615                    dest_reg: CompactCfiRegister::from_x86_encoded(6).unwrap(),
2616                    src_reg: CompactCfiRegister::cfa(),
2617                    offset_from_src: -(stack_size + 2 - 4) * pointer_size,
2618                },
2619            ];
2620
2621            match opcode.instructions(&iter) {
2622                CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
2623                _ => unreachable!(),
2624            }
2625        }
2626        {
2627            // Holes in the general registers
2628            let stack_size: i32 = 0xa7;
2629            let registers = [2, 0, 4, 0, 6];
2630            let opcode = Opcode(
2631                X86_MODE_RBP_FRAME
2632                    | pack_x86_rbp_registers(registers)
2633                    | ((stack_size as u32) << stack_size_offset),
2634            );
2635            let expected = vec![
2636                CompactCfiOp::RegisterIs {
2637                    dest_reg: CompactCfiRegister::cfa(),
2638                    src_reg: CompactCfiRegister::frame_pointer(),
2639                    offset_from_src: 2 * pointer_size,
2640                },
2641                CompactCfiOp::RegisterAt {
2642                    dest_reg: CompactCfiRegister::frame_pointer(),
2643                    src_reg: CompactCfiRegister::cfa(),
2644                    offset_from_src: -2 * pointer_size,
2645                },
2646                CompactCfiOp::RegisterAt {
2647                    dest_reg: CompactCfiRegister::instruction_pointer(),
2648                    src_reg: CompactCfiRegister::cfa(),
2649                    offset_from_src: -pointer_size,
2650                },
2651                CompactCfiOp::RegisterAt {
2652                    dest_reg: CompactCfiRegister::from_x86_encoded(2).unwrap(),
2653                    src_reg: CompactCfiRegister::cfa(),
2654                    offset_from_src: -(stack_size + 2) * pointer_size,
2655                },
2656                CompactCfiOp::RegisterAt {
2657                    dest_reg: CompactCfiRegister::from_x86_encoded(4).unwrap(),
2658                    src_reg: CompactCfiRegister::cfa(),
2659                    offset_from_src: -(stack_size + 2 - 2) * pointer_size,
2660                },
2661                CompactCfiOp::RegisterAt {
2662                    dest_reg: CompactCfiRegister::from_x86_encoded(6).unwrap(),
2663                    src_reg: CompactCfiRegister::cfa(),
2664                    offset_from_src: -(stack_size + 2 - 4) * pointer_size,
2665                },
2666            ];
2667
2668            match opcode.instructions(&iter) {
2669                CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
2670                _ => unreachable!(),
2671            }
2672        }
2673
2674        // Check that stack-immediate opcodes work
2675        {
2676            // Simple, no general registers to restore
2677            let stack_size: i32 = 0xa1;
2678            let packed_stack_size = (stack_size as u32) << stack_size_offset;
2679            let num_regs = 0;
2680            let packed_num_regs = num_regs << frameless_reg_count_offset;
2681            let registers = [0, 0, 0, 0, 0, 0];
2682            let opcode = Opcode(
2683                X86_MODE_STACK_IMMD
2684                    | pack_x86_stackless_registers(num_regs, registers)
2685                    | packed_num_regs
2686                    | packed_stack_size,
2687            );
2688            let expected = vec![
2689                CompactCfiOp::RegisterIs {
2690                    dest_reg: CompactCfiRegister::cfa(),
2691                    src_reg: CompactCfiRegister::stack_pointer(),
2692                    offset_from_src: stack_size * pointer_size,
2693                },
2694                CompactCfiOp::RegisterAt {
2695                    dest_reg: CompactCfiRegister::instruction_pointer(),
2696                    src_reg: CompactCfiRegister::cfa(),
2697                    offset_from_src: -pointer_size,
2698                },
2699            ];
2700
2701            match opcode.instructions(&iter) {
2702                CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
2703                _ => unreachable!(),
2704            }
2705        }
2706        {
2707            // One general register to restore
2708            let stack_size: i32 = 0x13;
2709            let packed_stack_size = (stack_size as u32) << stack_size_offset;
2710            let num_regs = 1;
2711            let packed_num_regs = num_regs << frameless_reg_count_offset;
2712            let registers = [0, 0, 0, 0, 0, 1];
2713            let opcode = Opcode(
2714                X86_MODE_STACK_IMMD
2715                    | pack_x86_stackless_registers(num_regs, registers)
2716                    | packed_num_regs
2717                    | packed_stack_size,
2718            );
2719            let expected = vec![
2720                CompactCfiOp::RegisterIs {
2721                    dest_reg: CompactCfiRegister::cfa(),
2722                    src_reg: CompactCfiRegister::stack_pointer(),
2723                    offset_from_src: stack_size * pointer_size,
2724                },
2725                CompactCfiOp::RegisterAt {
2726                    dest_reg: CompactCfiRegister::instruction_pointer(),
2727                    src_reg: CompactCfiRegister::cfa(),
2728                    offset_from_src: -pointer_size,
2729                },
2730                CompactCfiOp::RegisterAt {
2731                    dest_reg: CompactCfiRegister::from_x86_encoded(1).unwrap(),
2732                    src_reg: CompactCfiRegister::cfa(),
2733                    offset_from_src: -2 * pointer_size,
2734                },
2735            ];
2736
2737            match opcode.instructions(&iter) {
2738                CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
2739                _ => unreachable!(),
2740            }
2741        }
2742        {
2743            // All general register slots used
2744            let stack_size: i32 = 0xc1;
2745            let packed_stack_size = (stack_size as u32) << stack_size_offset;
2746            let num_regs = 6;
2747            let packed_num_regs = num_regs << frameless_reg_count_offset;
2748            let registers = [1, 2, 3, 4, 5, 6];
2749            let opcode = Opcode(
2750                X86_MODE_STACK_IMMD
2751                    | pack_x86_stackless_registers(num_regs, registers)
2752                    | packed_num_regs
2753                    | packed_stack_size,
2754            );
2755            let expected = vec![
2756                CompactCfiOp::RegisterIs {
2757                    dest_reg: CompactCfiRegister::cfa(),
2758                    src_reg: CompactCfiRegister::stack_pointer(),
2759                    offset_from_src: stack_size * pointer_size,
2760                },
2761                CompactCfiOp::RegisterAt {
2762                    dest_reg: CompactCfiRegister::instruction_pointer(),
2763                    src_reg: CompactCfiRegister::cfa(),
2764                    offset_from_src: -pointer_size,
2765                },
2766                CompactCfiOp::RegisterAt {
2767                    dest_reg: CompactCfiRegister::from_x86_encoded(6).unwrap(),
2768                    src_reg: CompactCfiRegister::cfa(),
2769                    offset_from_src: -2 * pointer_size,
2770                },
2771                CompactCfiOp::RegisterAt {
2772                    dest_reg: CompactCfiRegister::from_x86_encoded(5).unwrap(),
2773                    src_reg: CompactCfiRegister::cfa(),
2774                    offset_from_src: -3 * pointer_size,
2775                },
2776                CompactCfiOp::RegisterAt {
2777                    dest_reg: CompactCfiRegister::from_x86_encoded(4).unwrap(),
2778                    src_reg: CompactCfiRegister::cfa(),
2779                    offset_from_src: -4 * pointer_size,
2780                },
2781                CompactCfiOp::RegisterAt {
2782                    dest_reg: CompactCfiRegister::from_x86_encoded(3).unwrap(),
2783                    src_reg: CompactCfiRegister::cfa(),
2784                    offset_from_src: -5 * pointer_size,
2785                },
2786                CompactCfiOp::RegisterAt {
2787                    dest_reg: CompactCfiRegister::from_x86_encoded(2).unwrap(),
2788                    src_reg: CompactCfiRegister::cfa(),
2789                    offset_from_src: -6 * pointer_size,
2790                },
2791                CompactCfiOp::RegisterAt {
2792                    dest_reg: CompactCfiRegister::from_x86_encoded(1).unwrap(),
2793                    src_reg: CompactCfiRegister::cfa(),
2794                    offset_from_src: -7 * pointer_size,
2795                },
2796            ];
2797
2798            match opcode.instructions(&iter) {
2799                CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
2800                _ => unreachable!(),
2801            }
2802        }
2803        {
2804            // Some general registers
2805            let stack_size: i32 = 0xf1;
2806            let packed_stack_size = (stack_size as u32) << stack_size_offset;
2807            let num_regs = 3;
2808            let packed_num_regs = num_regs << frameless_reg_count_offset;
2809            let registers = [0, 0, 0, 2, 4, 6];
2810            let opcode = Opcode(
2811                X86_MODE_STACK_IMMD
2812                    | pack_x86_stackless_registers(num_regs, registers)
2813                    | packed_num_regs
2814                    | packed_stack_size,
2815            );
2816            let expected = vec![
2817                CompactCfiOp::RegisterIs {
2818                    dest_reg: CompactCfiRegister::cfa(),
2819                    src_reg: CompactCfiRegister::stack_pointer(),
2820                    offset_from_src: stack_size * pointer_size,
2821                },
2822                CompactCfiOp::RegisterAt {
2823                    dest_reg: CompactCfiRegister::instruction_pointer(),
2824                    src_reg: CompactCfiRegister::cfa(),
2825                    offset_from_src: -pointer_size,
2826                },
2827                CompactCfiOp::RegisterAt {
2828                    dest_reg: CompactCfiRegister::from_x86_encoded(6).unwrap(),
2829                    src_reg: CompactCfiRegister::cfa(),
2830                    offset_from_src: -2 * pointer_size,
2831                },
2832                CompactCfiOp::RegisterAt {
2833                    dest_reg: CompactCfiRegister::from_x86_encoded(4).unwrap(),
2834                    src_reg: CompactCfiRegister::cfa(),
2835                    offset_from_src: -3 * pointer_size,
2836                },
2837                CompactCfiOp::RegisterAt {
2838                    dest_reg: CompactCfiRegister::from_x86_encoded(2).unwrap(),
2839                    src_reg: CompactCfiRegister::cfa(),
2840                    offset_from_src: -4 * pointer_size,
2841                },
2842            ];
2843
2844            match opcode.instructions(&iter) {
2845                CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
2846                _ => unreachable!(),
2847            }
2848        }
2849
2850        // Check that stack-indirect opcodes work (feature unimplemented)
2851        {
2852            let _opcode = Opcode(X86_MODE_STACK_IND);
2853            // ... tests
2854        }
2855
2856        Ok(())
2857    }
2858
2859    #[test]
2860    fn test_compact_opcodes_x64() -> Result<(), MachError> {
2861        // Make an empty but valid section to initialize the CompactUnwindInfoIter
2862        let pointer_size = 8;
2863        let frameless_reg_count_offset = 32 - 8 - 8 - 3 - 3;
2864        let stack_size_offset = 32 - 8 - 8;
2865        let offset = &mut 0;
2866        let mut section = vec![0u8; 1024];
2867        // Just set the version, everything else is 0
2868        section.gwrite(1u32, offset)?;
2869
2870        let iter = CompactUnwindInfoIter::new(&section, true, Arch::Amd64)?;
2871
2872        // Check that the null opcode is handled reasonably
2873        {
2874            let opcode = Opcode(0);
2875            assert!(matches!(opcode.instructions(&iter), CompactUnwindOp::None));
2876        }
2877
2878        // Check that dwarf opcodes work
2879        {
2880            let opcode = Opcode(X86_MODE_DWARF | 0x00123456);
2881            assert!(matches!(
2882                opcode.instructions(&iter),
2883                CompactUnwindOp::UseDwarfFde {
2884                    offset_in_eh_frame: 0x00123456
2885                }
2886            ));
2887        }
2888
2889        // Check that rbp opcodes work
2890        {
2891            // Simple, no general registers to restore
2892            let stack_size: i32 = 0xa1;
2893            let registers = [0, 0, 0, 0, 0];
2894            let opcode = Opcode(
2895                X86_MODE_RBP_FRAME
2896                    | pack_x86_rbp_registers(registers)
2897                    | ((stack_size as u32) << stack_size_offset),
2898            );
2899            let expected = vec![
2900                CompactCfiOp::RegisterIs {
2901                    dest_reg: CompactCfiRegister::cfa(),
2902                    src_reg: CompactCfiRegister::frame_pointer(),
2903                    offset_from_src: 2 * pointer_size,
2904                },
2905                CompactCfiOp::RegisterAt {
2906                    dest_reg: CompactCfiRegister::frame_pointer(),
2907                    src_reg: CompactCfiRegister::cfa(),
2908                    offset_from_src: -2 * pointer_size,
2909                },
2910                CompactCfiOp::RegisterAt {
2911                    dest_reg: CompactCfiRegister::instruction_pointer(),
2912                    src_reg: CompactCfiRegister::cfa(),
2913                    offset_from_src: -pointer_size,
2914                },
2915            ];
2916
2917            match opcode.instructions(&iter) {
2918                CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
2919                _ => unreachable!(),
2920            }
2921        }
2922        {
2923            // One general register to restore
2924            let stack_size: i32 = 0x13;
2925            let registers = [1, 0, 0, 0, 0];
2926            let opcode = Opcode(
2927                X86_MODE_RBP_FRAME
2928                    | pack_x86_rbp_registers(registers)
2929                    | ((stack_size as u32) << stack_size_offset),
2930            );
2931            let expected = vec![
2932                CompactCfiOp::RegisterIs {
2933                    dest_reg: CompactCfiRegister::cfa(),
2934                    src_reg: CompactCfiRegister::frame_pointer(),
2935                    offset_from_src: 2 * pointer_size,
2936                },
2937                CompactCfiOp::RegisterAt {
2938                    dest_reg: CompactCfiRegister::frame_pointer(),
2939                    src_reg: CompactCfiRegister::cfa(),
2940                    offset_from_src: -2 * pointer_size,
2941                },
2942                CompactCfiOp::RegisterAt {
2943                    dest_reg: CompactCfiRegister::instruction_pointer(),
2944                    src_reg: CompactCfiRegister::cfa(),
2945                    offset_from_src: -pointer_size,
2946                },
2947                CompactCfiOp::RegisterAt {
2948                    dest_reg: CompactCfiRegister::from_x86_encoded(1).unwrap(),
2949                    src_reg: CompactCfiRegister::cfa(),
2950                    offset_from_src: -(stack_size + 2) * pointer_size,
2951                },
2952            ];
2953
2954            match opcode.instructions(&iter) {
2955                CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
2956                _ => unreachable!(),
2957            }
2958        }
2959        {
2960            // All general register slots used
2961            let stack_size: i32 = 0xc2;
2962            let registers = [2, 3, 4, 5, 6];
2963            let opcode = Opcode(
2964                X86_MODE_RBP_FRAME
2965                    | pack_x86_rbp_registers(registers)
2966                    | ((stack_size as u32) << stack_size_offset),
2967            );
2968            let expected = vec![
2969                CompactCfiOp::RegisterIs {
2970                    dest_reg: CompactCfiRegister::cfa(),
2971                    src_reg: CompactCfiRegister::frame_pointer(),
2972                    offset_from_src: 2 * pointer_size,
2973                },
2974                CompactCfiOp::RegisterAt {
2975                    dest_reg: CompactCfiRegister::frame_pointer(),
2976                    src_reg: CompactCfiRegister::cfa(),
2977                    offset_from_src: -2 * pointer_size,
2978                },
2979                CompactCfiOp::RegisterAt {
2980                    dest_reg: CompactCfiRegister::instruction_pointer(),
2981                    src_reg: CompactCfiRegister::cfa(),
2982                    offset_from_src: -pointer_size,
2983                },
2984                CompactCfiOp::RegisterAt {
2985                    dest_reg: CompactCfiRegister::from_x86_encoded(2).unwrap(),
2986                    src_reg: CompactCfiRegister::cfa(),
2987                    offset_from_src: -(stack_size + 2) * pointer_size,
2988                },
2989                CompactCfiOp::RegisterAt {
2990                    dest_reg: CompactCfiRegister::from_x86_encoded(3).unwrap(),
2991                    src_reg: CompactCfiRegister::cfa(),
2992                    offset_from_src: -(stack_size + 2 - 1) * pointer_size,
2993                },
2994                CompactCfiOp::RegisterAt {
2995                    dest_reg: CompactCfiRegister::from_x86_encoded(4).unwrap(),
2996                    src_reg: CompactCfiRegister::cfa(),
2997                    offset_from_src: -(stack_size + 2 - 2) * pointer_size,
2998                },
2999                CompactCfiOp::RegisterAt {
3000                    dest_reg: CompactCfiRegister::from_x86_encoded(5).unwrap(),
3001                    src_reg: CompactCfiRegister::cfa(),
3002                    offset_from_src: -(stack_size + 2 - 3) * pointer_size,
3003                },
3004                CompactCfiOp::RegisterAt {
3005                    dest_reg: CompactCfiRegister::from_x86_encoded(6).unwrap(),
3006                    src_reg: CompactCfiRegister::cfa(),
3007                    offset_from_src: -(stack_size + 2 - 4) * pointer_size,
3008                },
3009            ];
3010
3011            match opcode.instructions(&iter) {
3012                CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
3013                _ => unreachable!(),
3014            }
3015        }
3016        {
3017            // Holes in the general registers
3018            let stack_size: i32 = 0xa7;
3019            let registers = [2, 0, 4, 0, 6];
3020            let opcode = Opcode(
3021                X86_MODE_RBP_FRAME
3022                    | pack_x86_rbp_registers(registers)
3023                    | ((stack_size as u32) << stack_size_offset),
3024            );
3025            let expected = vec![
3026                CompactCfiOp::RegisterIs {
3027                    dest_reg: CompactCfiRegister::cfa(),
3028                    src_reg: CompactCfiRegister::frame_pointer(),
3029                    offset_from_src: 2 * pointer_size,
3030                },
3031                CompactCfiOp::RegisterAt {
3032                    dest_reg: CompactCfiRegister::frame_pointer(),
3033                    src_reg: CompactCfiRegister::cfa(),
3034                    offset_from_src: -2 * pointer_size,
3035                },
3036                CompactCfiOp::RegisterAt {
3037                    dest_reg: CompactCfiRegister::instruction_pointer(),
3038                    src_reg: CompactCfiRegister::cfa(),
3039                    offset_from_src: -pointer_size,
3040                },
3041                CompactCfiOp::RegisterAt {
3042                    dest_reg: CompactCfiRegister::from_x86_encoded(2).unwrap(),
3043                    src_reg: CompactCfiRegister::cfa(),
3044                    offset_from_src: -(stack_size + 2) * pointer_size,
3045                },
3046                CompactCfiOp::RegisterAt {
3047                    dest_reg: CompactCfiRegister::from_x86_encoded(4).unwrap(),
3048                    src_reg: CompactCfiRegister::cfa(),
3049                    offset_from_src: -(stack_size + 2 - 2) * pointer_size,
3050                },
3051                CompactCfiOp::RegisterAt {
3052                    dest_reg: CompactCfiRegister::from_x86_encoded(6).unwrap(),
3053                    src_reg: CompactCfiRegister::cfa(),
3054                    offset_from_src: -(stack_size + 2 - 4) * pointer_size,
3055                },
3056            ];
3057
3058            match opcode.instructions(&iter) {
3059                CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
3060                _ => unreachable!(),
3061            }
3062        }
3063
3064        // Check that stack-immediate opcodes work
3065        {
3066            // Simple, no general registers to restore
3067            let stack_size: i32 = 0xa1;
3068            let packed_stack_size = (stack_size as u32) << stack_size_offset;
3069            let num_regs = 0;
3070            let packed_num_regs = num_regs << frameless_reg_count_offset;
3071            let registers = [0, 0, 0, 0, 0, 0];
3072            let opcode = Opcode(
3073                X86_MODE_STACK_IMMD
3074                    | pack_x86_stackless_registers(num_regs, registers)
3075                    | packed_num_regs
3076                    | packed_stack_size,
3077            );
3078            let expected = vec![
3079                CompactCfiOp::RegisterIs {
3080                    dest_reg: CompactCfiRegister::cfa(),
3081                    src_reg: CompactCfiRegister::stack_pointer(),
3082                    offset_from_src: stack_size * pointer_size,
3083                },
3084                CompactCfiOp::RegisterAt {
3085                    dest_reg: CompactCfiRegister::instruction_pointer(),
3086                    src_reg: CompactCfiRegister::cfa(),
3087                    offset_from_src: -pointer_size,
3088                },
3089            ];
3090
3091            match opcode.instructions(&iter) {
3092                CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
3093                _ => unreachable!(),
3094            }
3095        }
3096        {
3097            // One general register to restore
3098            let stack_size: i32 = 0x13;
3099            let packed_stack_size = (stack_size as u32) << stack_size_offset;
3100            let num_regs = 1;
3101            let packed_num_regs = num_regs << frameless_reg_count_offset;
3102            let registers = [0, 0, 0, 0, 0, 1];
3103            let opcode = Opcode(
3104                X86_MODE_STACK_IMMD
3105                    | pack_x86_stackless_registers(num_regs, registers)
3106                    | packed_num_regs
3107                    | packed_stack_size,
3108            );
3109            let expected = vec![
3110                CompactCfiOp::RegisterIs {
3111                    dest_reg: CompactCfiRegister::cfa(),
3112                    src_reg: CompactCfiRegister::stack_pointer(),
3113                    offset_from_src: stack_size * pointer_size,
3114                },
3115                CompactCfiOp::RegisterAt {
3116                    dest_reg: CompactCfiRegister::instruction_pointer(),
3117                    src_reg: CompactCfiRegister::cfa(),
3118                    offset_from_src: -pointer_size,
3119                },
3120                CompactCfiOp::RegisterAt {
3121                    dest_reg: CompactCfiRegister::from_x86_encoded(1).unwrap(),
3122                    src_reg: CompactCfiRegister::cfa(),
3123                    offset_from_src: -2 * pointer_size,
3124                },
3125            ];
3126
3127            match opcode.instructions(&iter) {
3128                CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
3129                _ => unreachable!(),
3130            }
3131        }
3132        {
3133            // All general register slots used
3134            let stack_size: i32 = 0xc1;
3135            let packed_stack_size = (stack_size as u32) << stack_size_offset;
3136            let num_regs = 6;
3137            let packed_num_regs = num_regs << frameless_reg_count_offset;
3138            let registers = [1, 2, 3, 4, 5, 6];
3139            let opcode = Opcode(
3140                X86_MODE_STACK_IMMD
3141                    | pack_x86_stackless_registers(num_regs, registers)
3142                    | packed_num_regs
3143                    | packed_stack_size,
3144            );
3145            let expected = vec![
3146                CompactCfiOp::RegisterIs {
3147                    dest_reg: CompactCfiRegister::cfa(),
3148                    src_reg: CompactCfiRegister::stack_pointer(),
3149                    offset_from_src: stack_size * pointer_size,
3150                },
3151                CompactCfiOp::RegisterAt {
3152                    dest_reg: CompactCfiRegister::instruction_pointer(),
3153                    src_reg: CompactCfiRegister::cfa(),
3154                    offset_from_src: -pointer_size,
3155                },
3156                CompactCfiOp::RegisterAt {
3157                    dest_reg: CompactCfiRegister::from_x86_encoded(6).unwrap(),
3158                    src_reg: CompactCfiRegister::cfa(),
3159                    offset_from_src: -2 * pointer_size,
3160                },
3161                CompactCfiOp::RegisterAt {
3162                    dest_reg: CompactCfiRegister::from_x86_encoded(5).unwrap(),
3163                    src_reg: CompactCfiRegister::cfa(),
3164                    offset_from_src: -3 * pointer_size,
3165                },
3166                CompactCfiOp::RegisterAt {
3167                    dest_reg: CompactCfiRegister::from_x86_encoded(4).unwrap(),
3168                    src_reg: CompactCfiRegister::cfa(),
3169                    offset_from_src: -4 * pointer_size,
3170                },
3171                CompactCfiOp::RegisterAt {
3172                    dest_reg: CompactCfiRegister::from_x86_encoded(3).unwrap(),
3173                    src_reg: CompactCfiRegister::cfa(),
3174                    offset_from_src: -5 * pointer_size,
3175                },
3176                CompactCfiOp::RegisterAt {
3177                    dest_reg: CompactCfiRegister::from_x86_encoded(2).unwrap(),
3178                    src_reg: CompactCfiRegister::cfa(),
3179                    offset_from_src: -6 * pointer_size,
3180                },
3181                CompactCfiOp::RegisterAt {
3182                    dest_reg: CompactCfiRegister::from_x86_encoded(1).unwrap(),
3183                    src_reg: CompactCfiRegister::cfa(),
3184                    offset_from_src: -7 * pointer_size,
3185                },
3186            ];
3187
3188            match opcode.instructions(&iter) {
3189                CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
3190                _ => unreachable!(),
3191            }
3192        }
3193        {
3194            // Some general registers
3195            let stack_size: i32 = 0xf1;
3196            let packed_stack_size = (stack_size as u32) << stack_size_offset;
3197            let num_regs = 3;
3198            let packed_num_regs = num_regs << frameless_reg_count_offset;
3199            let registers = [0, 0, 0, 2, 4, 6];
3200            let opcode = Opcode(
3201                X86_MODE_STACK_IMMD
3202                    | pack_x86_stackless_registers(num_regs, registers)
3203                    | packed_num_regs
3204                    | packed_stack_size,
3205            );
3206            let expected = vec![
3207                CompactCfiOp::RegisterIs {
3208                    dest_reg: CompactCfiRegister::cfa(),
3209                    src_reg: CompactCfiRegister::stack_pointer(),
3210                    offset_from_src: stack_size * pointer_size,
3211                },
3212                CompactCfiOp::RegisterAt {
3213                    dest_reg: CompactCfiRegister::instruction_pointer(),
3214                    src_reg: CompactCfiRegister::cfa(),
3215                    offset_from_src: -pointer_size,
3216                },
3217                CompactCfiOp::RegisterAt {
3218                    dest_reg: CompactCfiRegister::from_x86_encoded(6).unwrap(),
3219                    src_reg: CompactCfiRegister::cfa(),
3220                    offset_from_src: -2 * pointer_size,
3221                },
3222                CompactCfiOp::RegisterAt {
3223                    dest_reg: CompactCfiRegister::from_x86_encoded(4).unwrap(),
3224                    src_reg: CompactCfiRegister::cfa(),
3225                    offset_from_src: -3 * pointer_size,
3226                },
3227                CompactCfiOp::RegisterAt {
3228                    dest_reg: CompactCfiRegister::from_x86_encoded(2).unwrap(),
3229                    src_reg: CompactCfiRegister::cfa(),
3230                    offset_from_src: -4 * pointer_size,
3231                },
3232            ];
3233
3234            match opcode.instructions(&iter) {
3235                CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
3236                _ => unreachable!(),
3237            }
3238        }
3239
3240        // Check that stack-indirect opcodes work (feature unimplemented)
3241        {
3242            let _opcode = Opcode(X86_MODE_STACK_IND);
3243            // ... tests
3244        }
3245
3246        Ok(())
3247    }
3248
3249    #[test]
3250    fn test_compact_opcodes_arm64() -> Result<(), MachError> {
3251        // Make an empty but valid section to initialize the CompactUnwindInfoIter
3252        let pointer_size = 8;
3253        let frameless_stack_size_offset = 32 - 8 - 12;
3254        let offset = &mut 0;
3255        let mut section = vec![0u8; 1024];
3256        // Just set the version, everything else is 0
3257        section.gwrite(1u32, offset)?;
3258
3259        let iter = CompactUnwindInfoIter::new(&section, true, Arch::Arm64)?;
3260
3261        // Check that the null opcode is handled reasonably
3262        {
3263            let opcode = Opcode(0);
3264            assert!(matches!(opcode.instructions(&iter), CompactUnwindOp::None));
3265        }
3266
3267        // Check that dwarf opcodes work
3268        {
3269            let opcode = Opcode(ARM64_MODE_DWARF | 0x00123456);
3270            assert!(matches!(
3271                opcode.instructions(&iter),
3272                CompactUnwindOp::UseDwarfFde {
3273                    offset_in_eh_frame: 0x00123456
3274                }
3275            ));
3276        }
3277
3278        // Check that frame opcodes work
3279        {
3280            // Simple, no general registers to restore
3281            let registers = 0b0_0000_0000;
3282            let opcode = Opcode(ARM64_MODE_FRAME | registers);
3283            let expected = vec![
3284                CompactCfiOp::RegisterIs {
3285                    dest_reg: CompactCfiRegister::cfa(),
3286                    src_reg: CompactCfiRegister::frame_pointer(),
3287                    offset_from_src: 2 * pointer_size,
3288                },
3289                CompactCfiOp::RegisterAt {
3290                    dest_reg: CompactCfiRegister::frame_pointer(),
3291                    src_reg: CompactCfiRegister::cfa(),
3292                    offset_from_src: -2 * pointer_size,
3293                },
3294                CompactCfiOp::RegisterAt {
3295                    dest_reg: CompactCfiRegister::instruction_pointer(),
3296                    src_reg: CompactCfiRegister::cfa(),
3297                    offset_from_src: -pointer_size,
3298                },
3299            ];
3300
3301            match opcode.instructions(&iter) {
3302                CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
3303                _ => unreachable!(),
3304            }
3305        }
3306        {
3307            // One general register pair to restore
3308            let registers = 0b0_0100_0000;
3309            let opcode = Opcode(ARM64_MODE_FRAME | registers);
3310            let expected = vec![
3311                CompactCfiOp::RegisterIs {
3312                    dest_reg: CompactCfiRegister::cfa(),
3313                    src_reg: CompactCfiRegister::frame_pointer(),
3314                    offset_from_src: 2 * pointer_size,
3315                },
3316                CompactCfiOp::RegisterAt {
3317                    dest_reg: CompactCfiRegister::frame_pointer(),
3318                    src_reg: CompactCfiRegister::cfa(),
3319                    offset_from_src: -2 * pointer_size,
3320                },
3321                CompactCfiOp::RegisterAt {
3322                    dest_reg: CompactCfiRegister::instruction_pointer(),
3323                    src_reg: CompactCfiRegister::cfa(),
3324                    offset_from_src: -pointer_size,
3325                },
3326                CompactCfiOp::RegisterAt {
3327                    dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 12),
3328                    src_reg: CompactCfiRegister::cfa(),
3329                    offset_from_src: -3 * pointer_size,
3330                },
3331                CompactCfiOp::RegisterAt {
3332                    dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 13),
3333                    src_reg: CompactCfiRegister::cfa(),
3334                    offset_from_src: -4 * pointer_size,
3335                },
3336            ];
3337
3338            match opcode.instructions(&iter) {
3339                CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
3340                _ => unreachable!(),
3341            }
3342        }
3343        {
3344            // All general purpose registers restored
3345            let registers = 0b1_1111_1111;
3346            let opcode = Opcode(ARM64_MODE_FRAME | registers);
3347            let expected = vec![
3348                CompactCfiOp::RegisterIs {
3349                    dest_reg: CompactCfiRegister::cfa(),
3350                    src_reg: CompactCfiRegister::frame_pointer(),
3351                    offset_from_src: 2 * pointer_size,
3352                },
3353                CompactCfiOp::RegisterAt {
3354                    dest_reg: CompactCfiRegister::frame_pointer(),
3355                    src_reg: CompactCfiRegister::cfa(),
3356                    offset_from_src: -2 * pointer_size,
3357                },
3358                CompactCfiOp::RegisterAt {
3359                    dest_reg: CompactCfiRegister::instruction_pointer(),
3360                    src_reg: CompactCfiRegister::cfa(),
3361                    offset_from_src: -pointer_size,
3362                },
3363                CompactCfiOp::RegisterAt {
3364                    dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE),
3365                    src_reg: CompactCfiRegister::cfa(),
3366                    offset_from_src: -3 * pointer_size,
3367                },
3368                CompactCfiOp::RegisterAt {
3369                    dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 1),
3370                    src_reg: CompactCfiRegister::cfa(),
3371                    offset_from_src: -4 * pointer_size,
3372                },
3373                CompactCfiOp::RegisterAt {
3374                    dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 2),
3375                    src_reg: CompactCfiRegister::cfa(),
3376                    offset_from_src: -5 * pointer_size,
3377                },
3378                CompactCfiOp::RegisterAt {
3379                    dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 3),
3380                    src_reg: CompactCfiRegister::cfa(),
3381                    offset_from_src: -6 * pointer_size,
3382                },
3383                CompactCfiOp::RegisterAt {
3384                    dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 4),
3385                    src_reg: CompactCfiRegister::cfa(),
3386                    offset_from_src: -7 * pointer_size,
3387                },
3388                CompactCfiOp::RegisterAt {
3389                    dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 5),
3390                    src_reg: CompactCfiRegister::cfa(),
3391                    offset_from_src: -8 * pointer_size,
3392                },
3393                CompactCfiOp::RegisterAt {
3394                    dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 6),
3395                    src_reg: CompactCfiRegister::cfa(),
3396                    offset_from_src: -9 * pointer_size,
3397                },
3398                CompactCfiOp::RegisterAt {
3399                    dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 7),
3400                    src_reg: CompactCfiRegister::cfa(),
3401                    offset_from_src: -10 * pointer_size,
3402                },
3403                CompactCfiOp::RegisterAt {
3404                    dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 8),
3405                    src_reg: CompactCfiRegister::cfa(),
3406                    offset_from_src: -11 * pointer_size,
3407                },
3408                CompactCfiOp::RegisterAt {
3409                    dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 9),
3410                    src_reg: CompactCfiRegister::cfa(),
3411                    offset_from_src: -12 * pointer_size,
3412                },
3413                CompactCfiOp::RegisterAt {
3414                    dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 10),
3415                    src_reg: CompactCfiRegister::cfa(),
3416                    offset_from_src: -13 * pointer_size,
3417                },
3418                CompactCfiOp::RegisterAt {
3419                    dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 11),
3420                    src_reg: CompactCfiRegister::cfa(),
3421                    offset_from_src: -14 * pointer_size,
3422                },
3423                CompactCfiOp::RegisterAt {
3424                    dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 12),
3425                    src_reg: CompactCfiRegister::cfa(),
3426                    offset_from_src: -15 * pointer_size,
3427                },
3428                CompactCfiOp::RegisterAt {
3429                    dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 13),
3430                    src_reg: CompactCfiRegister::cfa(),
3431                    offset_from_src: -16 * pointer_size,
3432                },
3433                CompactCfiOp::RegisterAt {
3434                    dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 14),
3435                    src_reg: CompactCfiRegister::cfa(),
3436                    offset_from_src: -17 * pointer_size,
3437                },
3438                CompactCfiOp::RegisterAt {
3439                    dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 15),
3440                    src_reg: CompactCfiRegister::cfa(),
3441                    offset_from_src: -18 * pointer_size,
3442                },
3443                CompactCfiOp::RegisterAt {
3444                    dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 16),
3445                    src_reg: CompactCfiRegister::cfa(),
3446                    offset_from_src: -19 * pointer_size,
3447                },
3448                CompactCfiOp::RegisterAt {
3449                    dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 17),
3450                    src_reg: CompactCfiRegister::cfa(),
3451                    offset_from_src: -20 * pointer_size,
3452                },
3453            ];
3454
3455            match opcode.instructions(&iter) {
3456                CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
3457                _ => unreachable!(),
3458            }
3459        }
3460        {
3461            // Holes between the registers
3462            let registers = 0b1_0101_0101;
3463            let opcode = Opcode(ARM64_MODE_FRAME | registers);
3464            let expected = vec![
3465                CompactCfiOp::RegisterIs {
3466                    dest_reg: CompactCfiRegister::cfa(),
3467                    src_reg: CompactCfiRegister::frame_pointer(),
3468                    offset_from_src: 2 * pointer_size,
3469                },
3470                CompactCfiOp::RegisterAt {
3471                    dest_reg: CompactCfiRegister::frame_pointer(),
3472                    src_reg: CompactCfiRegister::cfa(),
3473                    offset_from_src: -2 * pointer_size,
3474                },
3475                CompactCfiOp::RegisterAt {
3476                    dest_reg: CompactCfiRegister::instruction_pointer(),
3477                    src_reg: CompactCfiRegister::cfa(),
3478                    offset_from_src: -pointer_size,
3479                },
3480                CompactCfiOp::RegisterAt {
3481                    dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE),
3482                    src_reg: CompactCfiRegister::cfa(),
3483                    offset_from_src: -3 * pointer_size,
3484                },
3485                CompactCfiOp::RegisterAt {
3486                    dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 1),
3487                    src_reg: CompactCfiRegister::cfa(),
3488                    offset_from_src: -4 * pointer_size,
3489                },
3490                CompactCfiOp::RegisterAt {
3491                    dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 4),
3492                    src_reg: CompactCfiRegister::cfa(),
3493                    offset_from_src: -5 * pointer_size,
3494                },
3495                CompactCfiOp::RegisterAt {
3496                    dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 5),
3497                    src_reg: CompactCfiRegister::cfa(),
3498                    offset_from_src: -6 * pointer_size,
3499                },
3500                CompactCfiOp::RegisterAt {
3501                    dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 8),
3502                    src_reg: CompactCfiRegister::cfa(),
3503                    offset_from_src: -7 * pointer_size,
3504                },
3505                CompactCfiOp::RegisterAt {
3506                    dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 9),
3507                    src_reg: CompactCfiRegister::cfa(),
3508                    offset_from_src: -8 * pointer_size,
3509                },
3510                CompactCfiOp::RegisterAt {
3511                    dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 12),
3512                    src_reg: CompactCfiRegister::cfa(),
3513                    offset_from_src: -9 * pointer_size,
3514                },
3515                CompactCfiOp::RegisterAt {
3516                    dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 13),
3517                    src_reg: CompactCfiRegister::cfa(),
3518                    offset_from_src: -10 * pointer_size,
3519                },
3520                CompactCfiOp::RegisterAt {
3521                    dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 16),
3522                    src_reg: CompactCfiRegister::cfa(),
3523                    offset_from_src: -11 * pointer_size,
3524                },
3525                CompactCfiOp::RegisterAt {
3526                    dest_reg: CompactCfiRegister::from_arm64_encoded(ARM64_REG_BASE + 17),
3527                    src_reg: CompactCfiRegister::cfa(),
3528                    offset_from_src: -12 * pointer_size,
3529                },
3530            ];
3531
3532            match opcode.instructions(&iter) {
3533                CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
3534                _ => unreachable!(),
3535            }
3536        }
3537
3538        // Check that the frameless opcode works
3539        {
3540            let stack_size = 0xae1;
3541            let packed_stack_size = stack_size << frameless_stack_size_offset;
3542            let opcode = Opcode(ARM64_MODE_FRAMELESS | packed_stack_size);
3543            let expected = vec![
3544                CompactCfiOp::RegisterIs {
3545                    dest_reg: CompactCfiRegister::cfa(),
3546                    src_reg: CompactCfiRegister::stack_pointer(),
3547                    offset_from_src: stack_size as i32 * 16,
3548                },
3549                CompactCfiOp::RegisterIs {
3550                    dest_reg: CompactCfiRegister::instruction_pointer(),
3551                    src_reg: CompactCfiRegister::link_register(),
3552                    offset_from_src: 0,
3553                },
3554            ];
3555
3556            match opcode.instructions(&iter) {
3557                CompactUnwindOp::CfiOps(ops) => assert_opcodes_match(ops, expected.into_iter()),
3558                _ => unreachable!(),
3559            }
3560        }
3561
3562        Ok(())
3563    }
3564
3565    #[test]
3566    fn test_compact_register_naming() -> Result<(), MachError> {
3567        // Just guarding against special register names breaking
3568
3569        let offset = &mut 0;
3570        let mut section = vec![0u8; 1024];
3571        // Just set the version, everything else is 0
3572        section.gwrite(1u32, offset)?;
3573
3574        {
3575            // ARM64 register names
3576            let iter = CompactUnwindInfoIter::new(&section, true, Arch::Arm64)?;
3577
3578            assert_eq!(CompactCfiRegister::cfa().name(&iter), Some("cfa"));
3579            assert_eq!(CompactCfiRegister::stack_pointer().name(&iter), Some("sp"));
3580            assert_eq!(
3581                CompactCfiRegister::instruction_pointer().name(&iter),
3582                Some("pc")
3583            );
3584            assert_eq!(CompactCfiRegister::frame_pointer().name(&iter), Some("x29"));
3585            assert_eq!(CompactCfiRegister::link_register().name(&iter), Some("x30"));
3586        }
3587
3588        {
3589            // x86 register names
3590            let iter = CompactUnwindInfoIter::new(&section, true, Arch::X86)?;
3591
3592            assert_eq!(CompactCfiRegister::cfa().name(&iter), Some("cfa"));
3593            assert_eq!(CompactCfiRegister::stack_pointer().name(&iter), Some("esp"));
3594            assert_eq!(
3595                CompactCfiRegister::instruction_pointer().name(&iter),
3596                Some("eip")
3597            );
3598            assert_eq!(CompactCfiRegister::frame_pointer().name(&iter), Some("ebp"));
3599        }
3600
3601        {
3602            // x64 register names
3603            let iter = CompactUnwindInfoIter::new(&section, true, Arch::Amd64)?;
3604
3605            assert_eq!(CompactCfiRegister::cfa().name(&iter), Some("cfa"));
3606            assert_eq!(CompactCfiRegister::stack_pointer().name(&iter), Some("rsp"));
3607            assert_eq!(
3608                CompactCfiRegister::instruction_pointer().name(&iter),
3609                Some("rip")
3610            );
3611            assert_eq!(CompactCfiRegister::frame_pointer().name(&iter), Some("rbp"));
3612        }
3613
3614        Ok(())
3615    }
3616}