Skip to main content

llvm_bitcode/schema/
values.rs

1//! From the LLVM Project, under the [Apache License v2.0 with LLVM Exceptions](https://llvm.org/LICENSE.txt)
2
3use num_enum::{FromPrimitive, IntoPrimitive, TryFromPrimitive, TryFromPrimitiveError};
4use std::num::NonZero;
5
6#[derive(Debug, Copy, Clone, Eq, PartialEq, TryFromPrimitive)]
7#[repr(u8)]
8#[non_exhaustive]
9pub enum AttrKind {
10    /// Alignment of parameter (5 bits) stored as log2 of alignment with +1 bias.
11    /// 0 means unaligned (different from align(1)).
12    Alignment = 1,
13    /// inline=always.
14    AlwaysInline = 2,
15    /// Pass structure by value.
16    ByVal = 3,
17    /// Source said inlining was desirable.
18    InlineHint = 4,
19    /// Force argument to be passed in register.
20    InReg = 5,
21    /// Function must be optimized for size first.
22    MinSize = 6,
23    /// Naked function.
24    Naked = 7,
25    /// Nested function static chain.
26    Nest = 8,
27    /// Considered to not alias after call.
28    NoAlias = 9,
29    /// Callee isn't recognized as a builtin.
30    NoBuiltin = 10,
31    NoCapture = 11,
32    /// Call cannot be duplicated.
33    NoDuplicate = 12,
34    /// Disable implicit floating point insts.
35    NoImplicitFloat = 13,
36    /// inline=never.
37    NoInline = 14,
38    /// Function is called early and/or often, so lazy binding isn't worthwhile.
39    NonLazyBind = 15,
40    /// Disable redzone.
41    NoRedZone = 16,
42    /// Mark the function as not returning.
43    NoReturn = 17,
44    /// Function doesn't unwind stack.
45    NoUnwind = 18,
46    /// opt_size.
47    OptimizeForSize = 19,
48    /// Function does not access memory.
49    ReadNone = 20,
50    /// Function only reads from memory.
51    ReadOnly = 21,
52    /// Return value is always equal to this argument.
53    Returned = 22,
54    /// Function can return twice.
55    ReturnsTwice = 23,
56    /// Sign extended before/after call.
57    SExt = 24,
58    /// Alignment of stack for function (3 bits)  stored as log2 of alignment with
59    /// +1 bias 0 means unaligned (different from alignstack=(1)).
60    StackAlignment = 25,
61    /// Stack protection.
62    StackProtect = 26,
63    /// Stack protection required.
64    StackProtectReq = 27,
65    /// Strong Stack protection.
66    StackProtectStrong = 28,
67    /// Hidden pointer to structure to return.
68    StructRet = 29,
69    /// AddressSanitizer is on.
70    SanitizeAddress = 30,
71    /// ThreadSanitizer is on.
72    SanitizeThread = 31,
73    /// MemorySanitizer is on.
74    SanitizeMemory = 32,
75    /// Function must be in a unwind table.
76    UwTable = 33,
77    /// Zero extended before/after call.
78    ZExt = 34,
79    /// Callee is recognized as a builtin, despite nobuiltin attribute on its
80    /// declaration.
81    Builtin = 35,
82    /// Marks function as being in a cold path.
83    Cold = 36,
84    /// Function must not be optimized.
85    OptimizeNone = 37,
86    /// Pass structure in an alloca.
87    InAlloca = 38,
88    /// Pointer is known to be not null.
89    NonNull = 39,
90    /// Build jump-instruction tables and replace refs.
91    JumpTable = 40,
92    /// Pointer is known to be dereferenceable.
93    Dereferenceable = 41,
94    /// Pointer is either null or dereferenceable.
95    DereferenceableOrNull = 42,
96    /// Can only be moved to control-equivalent blocks.
97    /// NB: Could be IntersectCustom with "or" handling.
98    Convergent = 43,
99    /// Safe Stack protection.
100    Safestack = 44,
101    /// Unused
102    ArgMemOnly = 45,
103    /// Argument is swift self/context.
104    SwiftSelf = 46,
105    /// Argument is swift error.
106    SwiftError = 47,
107    /// The function does not recurse.
108    NoRecurse = 48,
109    /// Unused
110    InaccessibleMemOnly = 49,
111    /// Unused
112    InaccessibleMemOrArgMemOnly = 50,
113    /// The result of the function is guaranteed to point to a number of bytes that
114    /// we can determine if we know the value of the function's arguments.
115    AllocSize = 51,
116    /// Function only writes to memory.
117    WriteOnly = 52,
118    /// Function can be speculated.
119    Speculatable = 53,
120    /// Function was called in a scope requiring strict floating point semantics.
121    StrictFp = 54,
122    /// HWAddressSanitizer is on.
123    SanitizeHwAddress = 55,
124    /// Disable Indirect Branch Tracking.
125    NoCfCheck = 56,
126    /// Select optimizations for best fuzzing signal.
127    OptForFuzzing = 57,
128    /// Shadow Call Stack protection.
129    ShadowCallStack = 58,
130    /// Speculative Load Hardening is enabled.
131    ///
132    /// Note that this uses the default compatibility (always compatible during
133    /// inlining) and a conservative merge strategy where inlining an attributed
134    /// body will add the attribute to the caller. This ensures that code carrying
135    /// this attribute will always be lowered with hardening enabled.
136    SpeculativeLoadHardening = 59,
137    /// Parameter is required to be a trivial constant.
138    ImmArg = 60,
139    /// Function always comes back to callsite.
140    WillReturn = 61,
141    /// Function does not deallocate memory.
142    Nofree = 62,
143    /// Function does not synchronize.
144    Nosync = 63,
145    /// MemTagSanitizer is on.
146    SanitizeMemtag = 64,
147    /// Similar to byval but without a copy.
148    Preallocated = 65,
149    /// Disable merging for specified functions or call sites.
150    NoMerge = 66,
151    /// Null pointer in address space zero is valid.
152    NullPointerIsValid = 67,
153    /// Parameter or return value may not contain uninitialized or poison bits.
154    NoUndef = 68,
155    /// Mark in-memory ABI type.
156    ByRef = 69,
157    /// Function is required to make Forward Progress.
158    MustProgress = 70,
159    /// Function cannot enter into caller's translation unit.
160    NoCallback = 71,
161    /// Marks function as being in a hot path and frequently called.
162    Hot = 72,
163    /// Function should not be instrumented.
164    NoProfile = 73,
165    /// Minimum/Maximum vscale value for function.
166    VscaleRange = 74,
167    /// Argument is swift async context.
168    SwiftAsync = 75,
169    /// No SanitizeCoverage instrumentation.
170    NoSanitizeCoverage = 76,
171    /// Provide pointer element type to intrinsic.
172    Elementtype = 77,
173    /// Do not instrument function with sanitizers.
174    DisableSanitizerInstrumentation = 78,
175    /// No SanitizeBounds instrumentation.
176    NoSanitizeBounds = 79,
177    /// Parameter of a function that tells us the alignment of an allocation, as in
178    /// aligned_alloc and aligned ::operator::new.
179    AllocAlign = 80,
180    /// Parameter is the pointer to be manipulated by the allocator function.
181    AllocatedPointer = 81,
182    /// Describes behavior of an allocator function in terms of known properties.
183    AllocKind = 82,
184    /// Function is a presplit coroutine.
185    PresplitCoroutine = 83,
186    /// Whether to keep return instructions, or replace with a jump to an external
187    /// symbol.
188    FnRetThunkExtern = 84,
189    SkipProfile = 85,
190    /// Memory effects of the function.
191    Memory = 86,
192    /// Forbidden floating-point classes.
193    NoFpClass = 87,
194    /// Select optimizations that give decent debug info.
195    OptimizeForDebugging = 88,
196    /// Pointer argument is writable.
197    Writable = 89,
198    CoroOnlyDestroyWhenComplete = 90,
199    /// Argument is dead if the call unwinds.
200    DeadOnUnwind = 91,
201    /// Parameter or return value is within the specified range.
202    Range = 92,
203    /// NumericalStabilitySanitizer is on.
204    SanitizeNumericalStability = 93,
205    /// Pointer argument memory is initialized.
206    Initializes = 94,
207    /// Function has a hybrid patchable thunk.
208    HybridPatchable = 95,
209    /// RealtimeSanitizer is on.
210    SanitizeRealtime = 96,
211    /// RealtimeSanitizer should error if a real-time unsafe function is invoked
212    /// during a real-time sanitized function (see `sanitize_realtime`).
213    SanitizeRealtimeBlocking = 97,
214    /// The coroutine call meets the elide requirement. Hint the optimization
215    /// pipeline to perform elide on the call or invoke instruction.
216    CoroElideSafe = 98,
217    /// No extension needed before/after call (high bits are undefined).
218    NoExt = 99,
219    /// Function is not a source of divergence.
220    NoDivergenceSource = 100,
221    /// TypeSanitizer is on.
222    SanitizeType = 101,
223    /// Specify how the pointer may be captured.
224    Captures = 102,
225    /// Argument is dead upon function return.
226    DeadOnReturn = 103,
227    /// Allocation token instrumentation is on.
228    SanitizeAllocToken = 104,
229    /// Result will not be undef or poison if all arguments are not undef and not
230    /// poison.
231    NoCreateUndefOrPoison = 105,
232    /// Indicate the denormal handling of the default floating-point
233    /// environment.
234    DenormalFpEnv = 106,
235    NoOutline = 107,
236    /// Flatten function by recursively inlining all calls.
237    Flatten = 108,
238
239    /// llvm-bitcode-rs extension for storing string key/value attributes
240    StringAttribute = !0,
241}
242
243/// These are values used in the bitcode files to encode which
244/// cast a `CST_CODE_CE_CAST` refers to.
245#[derive(Debug, Copy, Clone, Eq, PartialEq, TryFromPrimitive)]
246#[repr(u8)]
247pub enum CastOpcode {
248    Trunc = 0,
249    ZExt = 1,
250    SExt = 2,
251    FpToUi = 3,
252    FpToSi = 4,
253    UiToFp = 5,
254    SiToFp = 6,
255    FpTrunc = 7,
256    FpExt = 8,
257    PtrToInt = 9,
258    IntToPtr = 10,
259    Bitcast = 11,
260    Addrspace = 12,
261}
262
263/// These are bitcode-specific values, different from C++ enum
264#[derive(Debug, Copy, Clone, Eq, PartialEq, TryFromPrimitive)]
265#[repr(u8)]
266#[non_exhaustive]
267pub enum Linkage {
268    /// Externally visible function
269    External = 0,
270    /// Keep one copy of named function when linking (weak)
271    /// Old value with implicit comdat.
272    #[deprecated]
273    WeakAnyOld = 1,
274    /// Special purpose, only applies to global arrays
275    Appending = 2,
276    /// Rename collisions when linking (static functions).
277    Internal = 3,
278    /// Keep one copy of function when linking (inline)
279    /// Old value with implicit comdat.
280    #[deprecated]
281    LinkOnceAnyOld = 4,
282    /// Externally visible function
283    /// Obsolete DLLImportLinkage
284    #[deprecated]
285    DllImport = 5,
286    /// Externally visible function
287    /// Obsolete DLLExportLinkage
288    #[deprecated]
289    DllExport = 6,
290    /// ExternalWeak linkage
291    ExternWeak = 7,
292    /// Tentative definitions.
293    Common = 8,
294    /// Like Internal, but omit from symbol table.
295    Private = 9,
296    /// Same, but only replaced by something equivalent.
297    /// Old value with implicit comdat.
298    #[deprecated]
299    WeakOdrOld = 10,
300    /// Same, but only replaced by something equivalent.
301    /// Old value with implicit comdat.
302    #[deprecated]
303    LinkOnceOdrOld = 11,
304    /// Available for inspection, not emission.
305    AvailableExternally = 12,
306    /// Like Internal, but omit from symbol table.
307    /// Obsolete LinkerPrivateLinkage
308    #[deprecated]
309    LinkerPrivate = 13,
310    /// Like Internal, but omit from symbol table.
311    /// Obsolete LinkerPrivateWeakLinkage
312    #[deprecated]
313    LinkerPrivateWeak = 14,
314    /// Externally visible function
315    /// Obsolete LinkOnceODRAutoHideLinkage
316    #[deprecated]
317    LinkOnceOdrAutoHide = 15,
318    /// Keep one copy of named function when linking (weak)
319    WeakAny = 16,
320    /// Same, but only replaced by something equivalent.
321    WeakOdr = 17,
322    /// Keep one copy of function when linking (inline)
323    LinkOnceAny = 18,
324    /// Same, but only replaced by something equivalent.
325    LinkOnceOdr = 19,
326}
327
328impl Linkage {
329    /// `Private`/`Internal`/`LinkerPrivate`/`LinkerPrivateWeak`
330    #[allow(deprecated)]
331    #[must_use]
332    pub fn is_private(self) -> bool {
333        matches!(
334            self,
335            Self::Private | Self::Internal | Self::LinkerPrivate | Self::LinkerPrivateWeak
336        )
337    }
338}
339
340#[derive(Debug, Copy, Clone, Default, Eq, PartialEq, TryFromPrimitive)]
341#[repr(u8)]
342pub enum DllStorageClass {
343    #[default]
344    Default = 0,
345    Import = 1,
346    Export = 2,
347}
348
349#[derive(Debug, Copy, Clone, Eq, PartialEq, TryFromPrimitive)]
350#[repr(u8)]
351#[non_exhaustive]
352pub enum CallConv {
353    /// The default llvm calling convention, compatible with C. This convention
354    /// is the only one that supports varargs calls. As with typical C calling
355    /// conventions, the callee/caller have to tolerate certain amounts of
356    /// prototype mismatch.
357    C = 0,
358    /// Attempts to make calls as fast as possible (e.g. by passing things in
359    /// registers).
360    Fast = 8,
361    /// Attempts to make code in the caller as efficient as possible under the
362    /// assumption that the call is not commonly executed. As such, these calls
363    /// often preserve all registers so that the call does not break any live
364    /// ranges in the caller side.
365    Cold = 9,
366    /// Used by the Glasgow Haskell Compiler (GHC).
367    Ghc = 10,
368    /// Used by the High-Performance Erlang Compiler (HiPE).
369    HiPE = 11,
370    /// Used for dynamic register based calls (e.g. stackmap and patchpoint
371    /// intrinsics).
372    AnyReg = 13,
373    /// Used for runtime calls that preserves most registers.
374    PreserveMost = 14,
375    /// Used for runtime calls that preserves (almost) all registers.
376    PreserveAll = 15,
377    /// Calling convention for Swift.
378    Swift = 16,
379    /// Used for access functions.
380    CxxFastTls = 17,
381    /// Attemps to make calls as fast as possible while guaranteeing that tail
382    /// call optimization can always be performed.
383    Tail = 18,
384    /// Special calling convention on Windows for calling the Control Guard
385    /// Check ICall funtion. The function takes exactly one argument (address of
386    /// the target function) passed in the first argument register, and has no
387    /// return value. All register values are preserved.
388    CfGuardCheck = 19,
389    /// This follows the Swift calling convention in how arguments are passed
390    /// but guarantees tail calls will be made by making the callee clean up
391    /// their stack.
392    SwiftTail = 20,
393    /// Used for runtime calls that preserves none general registers.
394    PreserveNone = 21,
395    /// stdcall is mostly used by the Win32 API. It is basically the same as the
396    /// C convention with the difference in that the callee is responsible for
397    /// popping the arguments from the stack.
398    X86StdCall = 64,
399    /// 'fast' analog of X86_StdCall. Passes first two arguments in ECX:EDX
400    /// registers, others - via stack. Callee is responsible for stack cleaning.
401    X86FastCall = 65,
402    /// ARM Procedure Calling Standard (obsolete, but still used on some
403    /// targets).
404    ArmApcs = 66,
405    /// ARM Architecture Procedure Calling Standard calling convention (aka
406    /// EABI). Soft float variant.
407    ArmAapcs = 67,
408    /// Same as ARM_AAPCS, but uses hard floating point ABI.
409    ArmAapcsVfp = 68,
410    /// Used for MSP430 interrupt routines.
411    Msp430Intr = 69,
412    /// Similar to X86_StdCall. Passes first argument in ECX, others via stack.
413    /// Callee is responsible for stack cleaning. MSVC uses this by default for
414    /// methods in its ABI.
415    X86ThisCall = 70,
416    /// Call to a PTX kernel. Passes all arguments in parameter space.
417    PtxKernel = 71,
418    /// Call to a PTX device function. Passes all arguments in register or
419    /// parameter space.
420    PtxDevice = 72,
421    /// Used for SPIR non-kernel device functions. No lowering or expansion of
422    /// arguments. Structures are passed as a pointer to a struct with the
423    /// byval attribute. Functions can only call SPIR_FUNC and SPIR_KERNEL
424    /// functions. Functions can only have zero or one return values. Variable
425    /// arguments are not allowed, except for printf. How arguments/return
426    /// values are lowered are not specified. Functions are only visible to the
427    /// devices.
428    SpirFunc = 75,
429    /// Used for SPIR kernel functions. Inherits the restrictions of SPIR_FUNC,
430    /// except it cannot have non-void return values, it cannot have variable
431    /// arguments, it can also be called by the host or it is externally
432    /// visible.
433    SpirKernel = 76,
434    /// Used for Intel OpenCL built-ins.
435    IntelOclBi = 77,
436    /// The C convention as specified in the x86-64 supplement to the System V
437    /// ABI, used on most non-Windows systems.
438    X8664SysV = 78,
439    /// The C convention as implemented on Windows/x86-64 and AArch64. It
440    /// differs from the more common \c X86_64_SysV convention in a number of
441    /// ways, most notably in that XMM registers used to pass arguments are
442    /// shadowed by GPRs, and vice versa. On AArch64, this is identical to the
443    /// normal C (AAPCS) calling convention for normal functions, but floats are
444    /// passed in integer registers to variadic functions.
445    Win64 = 79,
446    /// MSVC calling convention that passes vectors and vector aggregates in SSE
447    /// registers.
448    X86VectorCall = 80,
449    /// Placeholders for HHVM calling conventions (deprecated, removed).
450    #[deprecated]
451    DummyHhvm = 81,
452    DummyHhvmC = 82,
453    /// x86 hardware interrupt context. Callee may take one or two parameters,
454    /// where the 1st represents a pointer to hardware context frame and the 2nd
455    /// represents hardware error code, the presence of the later depends on the
456    /// interrupt vector taken. Valid for both 32- and 64-bit subtargets.
457    X86Intr = 83,
458    /// Used for AVR interrupt routines.
459    AvrIntr = 84,
460    /// Used for AVR signal routines.
461    AvrSignal = 85,
462    /// Used for special AVR rtlib functions which have an "optimized"
463    /// convention to preserve registers.
464    AvrBuiltin = 86,
465    /// Used for Mesa vertex shaders, or AMDPAL last shader stage before
466    /// rasterization (vertex shader if tessellation and geometry are not in
467    /// use, or otherwise copy shader if one is needed).
468    AmdGpuVs = 87,
469    /// Used for Mesa/AMDPAL geometry shaders.
470    AmdGpuGs = 88,
471    /// Used for Mesa/AMDPAL pixel shaders.
472    AmdGpuPs = 89,
473    /// Used for Mesa/AMDPAL compute shaders.
474    AmdGpuCs = 90,
475    /// Used for AMDGPU code object kernels.
476    AmdGpuKernel = 91,
477    /// Register calling convention used for parameters transfer optimization
478    X86RegCall = 92,
479    /// Used for Mesa/AMDPAL hull shaders (= tessellation control shaders).
480    AmdGpuHs = 93,
481    /// Used for special MSP430 rtlib functions which have an "optimized"
482    /// convention using additional registers.
483    Msp430Builtin = 94,
484    /// Used for AMDPAL vertex shader if tessellation is in use.
485    AmdGpuLs = 95,
486    /// Used for AMDPAL shader stage before geometry shader if geometry is in
487    /// use. So either the domain (= tessellation evaluation) shader if
488    /// tessellation is in use, or otherwise the vertex shader.
489    AmdGpuEs = 96,
490    /// Used between AArch64 Advanced SIMD functions
491    AArch64VectorCall = 97,
492    /// Used between AArch64 SVE functions
493    AArch64SveVectorCall = 98,
494    /// For emscripten __invoke_* functions. The first argument is required to
495    /// be the function ptr being indirectly called. The remainder matches the
496    /// regular calling convention.
497    WasmEmscriptenInvoke = 99,
498    /// Used for AMD graphics targets.
499    AmdGpuGfx = 100,
500    /// Used for M68k interrupt routines.
501    M68kIntr = 101,
502    /// Preserve X0-X13, X19-X29, SP, Z0-Z31, P0-P15.
503    AArch64SmeAbiSupportRoutinesPreserveMostFromX0 = 102,
504    /// Preserve X2-X15, X19-X29, SP, Z0-Z31, P0-P15.
505    AArch64SmeAbiSupportRoutinesPreserveMostFromX2 = 103,
506    /// Used on AMDGPUs to give the middle-end more control over argument
507    /// placement.
508    AmdGpuCsChain = 104,
509    /// Used on AMDGPUs to give the middle-end more control over argument
510    /// placement. Preserves active lane values for input VGPRs.
511    AmdGpuCsChainPreserve = 105,
512    /// Used for M68k rtd-based CC (similar to X86's stdcall).
513    M68kRtd = 106,
514    /// Used by GraalVM. Two additional registers are reserved.
515    Graal = 107,
516    /// Calling convention used in the ARM64EC ABI to implement calls between
517    /// x64 code and thunks. This is basically the x64 calling convention using
518    /// ARM64 register names. The first parameter is mapped to x9.
519    Arm64ecThunkX64 = 108,
520    /// Calling convention used in the ARM64EC ABI to implement calls between
521    /// ARM64 code and thunks. This is just the ARM64 calling convention,
522    /// except that the first parameter is mapped to x9.
523    Arm64ecThunkNative = 109,
524    /// Calling convention used for RISC-V V-extension.
525    RiscVVectorCall = 110,
526    /// Preserve X1-X15, X19-X29, SP, Z0-Z31, P0-P15.
527    AArch64SmeAbiSupportRoutinesPreserveMostFromX1 = 111,
528    /// Calling convention used for RISC-V V-extension fixed vectors.
529    RiscVVlsCall32 = 112,
530    RiscVVlsCall64 = 113,
531    RiscVVlsCall128 = 114,
532    RiscVVlsCall256 = 115,
533    RiscVVlsCall512 = 116,
534    RiscVVlsCall1024 = 117,
535    RiscVVlsCall2048 = 118,
536    RiscVVlsCall4096 = 119,
537    RiscVVlsCall8192 = 120,
538    RiscVVlsCall16384 = 121,
539    RiscVVlsCall32768 = 122,
540    RiscVVlsCall65536 = 123,
541    AmdGpuGfxWholeWave = 124,
542    /// Calling convention used for CHERIoT when crossing a protection boundary.
543    CHERIoTCompartmentCall = 125,
544    /// Calling convention used for the callee of CHERIoT_CompartmentCall.
545    /// Ignores the first two capability arguments and the first integer
546    /// argument, zeroes all unused return registers on return.
547    CHERIoTCompartmentCallee = 126,
548    /// Calling convention used for CHERIoT for cross-library calls to a
549    /// stateless compartment.
550    CHERIoTLibraryCall = 127,
551}
552
553/// call conv field in bitcode is often mixed with flags
554impl CallConv {
555    /// Extract calling convention from CALL/CALLBR CCInfo flags.
556    #[must_use]
557    pub fn from_call_flags(ccinfo_flags: u64) -> Option<Self> {
558        // static_cast<CallingConv::ID>((0x7ff & CCInfo) >> bitc::CALL_CCONV));
559        let id = u8::try_from((ccinfo_flags & 0x7ff) >> 1).ok()?;
560        Self::try_from_primitive(id).ok()
561    }
562
563    /// Extract calling convention from INVOKE CCInfo flags.
564    #[must_use]
565    pub fn from_invoke_flags(ccinfo_flags: u64) -> Option<Self> {
566        let id = u8::try_from(ccinfo_flags & 0x3ff).ok()?;
567        Self::try_from_primitive(id).ok()
568    }
569}
570
571/// These are values used in the bitcode files to encode which
572/// binop a `CST_CODE_CE_BINOP` refers to.
573#[derive(Debug, Copy, Clone, Eq, PartialEq, TryFromPrimitive)]
574#[repr(u8)]
575pub enum BinOpcode {
576    Add = 0,
577    Sub = 1,
578    Mul = 2,
579    UDiv = 3,
580    /// overloaded for FP
581    SDiv = 4,
582    URem = 5,
583    /// overloaded for FP
584    SRem = 6,
585    Shl = 7,
586    LShr = 8,
587    AShr = 9,
588    And = 10,
589    Or = 11,
590    Xor = 12,
591}
592
593/// Combines the opcode with its appropriate flags
594#[derive(Debug, Clone, Copy)]
595pub enum BinOpcodeFlags {
596    /// Addition with overflow flags (nuw, nsw)
597    Add(OverflowFlags),
598    /// Subtraction with overflow flags (nuw, nsw)
599    Sub(OverflowFlags),
600    /// Multiplication with overflow flags (nuw, nsw)
601    Mul(OverflowFlags),
602    /// Unsigned division with exact flag
603    UDiv { exact: bool },
604    /// Signed division with exact flag
605    SDiv { exact: bool },
606    /// Unsigned remainder
607    URem,
608    /// Signed remainder
609    SRem,
610    /// Shift left with overflow flags (nuw, nsw)
611    Shl(OverflowFlags),
612    /// Logical shift right with exact flag
613    LShr { exact: bool },
614    /// Arithmetic shift right with exact flag
615    AShr { exact: bool },
616    /// Bitwise and
617    And,
618    /// Bitwise or
619    Or,
620    /// Bitwise xor
621    Xor,
622}
623
624impl BinOpcode {
625    #[must_use]
626    pub fn with_flags(self, flags: u8) -> BinOpcodeFlags {
627        match self {
628            Self::Add => BinOpcodeFlags::Add(OverflowFlags::from_bits_truncate(flags)),
629            Self::Sub => BinOpcodeFlags::Sub(OverflowFlags::from_bits_truncate(flags)),
630            Self::Mul => BinOpcodeFlags::Mul(OverflowFlags::from_bits_truncate(flags)),
631            Self::UDiv => BinOpcodeFlags::UDiv { exact: flags != 0 },
632            Self::SDiv => BinOpcodeFlags::SDiv { exact: flags != 0 },
633            Self::URem => BinOpcodeFlags::URem,
634            Self::SRem => BinOpcodeFlags::SRem,
635            Self::Shl => BinOpcodeFlags::Shl(OverflowFlags::from_bits_truncate(flags)),
636            Self::LShr => BinOpcodeFlags::LShr { exact: flags != 0 },
637            Self::AShr => BinOpcodeFlags::AShr { exact: flags != 0 },
638            Self::And => BinOpcodeFlags::And,
639            Self::Or => BinOpcodeFlags::Or,
640            Self::Xor => BinOpcodeFlags::Xor,
641        }
642    }
643}
644
645/// Encoded `AtomicOrdering` values.
646#[derive(Debug, Copy, Clone, Default, Eq, PartialEq, TryFromPrimitive)]
647#[repr(u8)]
648pub enum AtomicOrdering {
649    #[default]
650    NotAtomic = 0,
651    Unordered = 1,
652    Monotonic = 2,
653    Acquire = 3,
654    Release = 4,
655    AcqRel = 5,
656    SeqCst = 6,
657}
658
659/// COMDATSELECTIONKIND enumerates the possible selection mechanisms for
660/// COMDAT sections.
661#[derive(Debug, Copy, Clone, Eq, PartialEq, Default, TryFromPrimitive)]
662#[repr(u8)]
663pub enum ComdatSelectionKind {
664    #[default]
665    Any = 1,
666    ExactMatch = 2,
667    Largest = 3,
668    NoDuplicates = 4,
669    SameSize = 5,
670}
671
672/// Atomic read-modify-write operations
673#[derive(Debug, Copy, Clone, TryFromPrimitive)]
674#[repr(u8)]
675#[non_exhaustive]
676pub enum RmwOperation {
677    /// `XCHG`
678    Xchg = 0,
679
680    /// `ADD`
681    Add = 1,
682
683    /// `SUB`
684    Sub = 2,
685
686    /// `AND`
687    And = 3,
688
689    /// `NAND`
690    Nand = 4,
691
692    /// `OR`
693    Or = 5,
694
695    /// `XOR`
696    Xor = 6,
697
698    /// `MAX`
699    Max = 7,
700
701    /// `MIN`
702    Min = 8,
703
704    /// `UMAX`
705    UMax = 9,
706
707    /// `UMIN`
708    UMin = 10,
709
710    /// `FADD`
711    FAdd = 11,
712
713    /// `FSUB`
714    FSub = 12,
715
716    /// `FMAX`
717    FMax = 13,
718
719    /// `FMIN`
720    FMin = 14,
721
722    /// `UINC_WRAP`
723    UIncWrap = 15,
724
725    /// `UDEC_WRAP`
726    UDecWrap = 16,
727
728    /// `USUB_COND`
729    USubCond = 17,
730
731    /// `USUB_SAT`
732    USubSat = 18,
733}
734
735/// Unary Opcodes
736#[derive(Debug, Copy, Clone, TryFromPrimitive)]
737#[repr(u8)]
738#[non_exhaustive]
739pub enum UnaryOpcode {
740    /// `UNOP_FNEG`
741    Fneg = 0,
742}
743
744bitflags::bitflags! {
745    #[derive(Debug, Copy, Clone, Default)]
746    pub struct InlineAsmFlags: u8 {
747        const SideEffect = 1 << 0;
748        const AlignStack = 1 << 1;
749        /// ATT unset, Intel when set
750        const AsmDialectIntel = 1 << 2;
751        /// May unwind
752        const Unwind = 1 << 3;
753    }
754}
755
756bitflags::bitflags! {
757    /// `OverflowingBinaryOperatorOptionalFlags`
758    #[derive(Debug, Copy, Clone, Default)]
759    pub struct OverflowFlags: u8 {
760        /// OBO_NO_UNSIGNED_WRAP = no unsigned wrap (nuw)
761        const NoUnsignedWrap = 1 << 0;
762        /// OBO_NO_SIGNED_WRAP = no signed wrap (nsw)
763        const NoSignedWrap = 1 << 1;
764    }
765}
766
767pub type OverflowingBinaryOperatorOptionalFlags = OverflowFlags;
768pub type TruncInstOptionalFlags = OverflowFlags;
769
770bitflags::bitflags! {
771    #[derive(Debug, Copy, Clone, Default)]
772    pub struct FastMathFlags: u8 {
773        /// Legacy flag for all unsafe optimizations
774        const UnsafeAlgebra = 1 << 0;
775        /// Allow optimizations to assume arguments and results are not NaN
776        const NoNans = 1 << 1;
777        /// Allow optimizations to assume arguments and results are not +/-Inf
778        const NoInfs = 1 << 2;
779        /// Allow optimizations to ignore the sign of zero
780        const NoSignedZeros = 1 << 3;
781        /// Allow optimizations to use reciprocal approximations
782        const AllowReciprocal = 1 << 4;
783        /// Allow fusing multiply-add operations
784        const AllowContract = 1 << 5;
785        /// Allow approximations for math library functions
786        const ApproxFunc = 1 << 6;
787        /// Allow reordering of floating-point operations
788        const AllowReassoc = 1 << 7;
789    }
790}
791
792bitflags::bitflags! {
793    /// `GetElementPtrOptionalFlags`
794    #[derive(Debug, Copy, Clone, Default)]
795    pub struct GEPFlags: u8 {
796        /// GEP_INBOUNDS = Index is guaranteed within bounds (enables optimizations)
797        const Inbounds = 1 << 0;
798        /// GEP_NUSW = No unsigned/signed wrap
799        const Nusw = 1 << 1;
800        /// GEP_NUW = No unsigned wrap
801        const Nuw = 1 << 2;
802    }
803}
804
805bitflags::bitflags! {
806    /// Markers and flags for call instruction
807    #[derive(Debug, Copy, Clone, Default)]
808    pub struct CallMarkersFlags: u32 {
809        const Tail = 1 << 0;
810        const Cconv = 1 << 1;
811        const MustTail = 1 << 14;
812        const ExplicitType = 1 << 15;
813        const NoTail = 1 << 16;
814        /// Call has optional fast-math-flags
815        const Fmf = 1 << 17;
816    }
817}
818/// `GlobalValue::VisibilityTypes`
819#[derive(Debug, Copy, Clone, Default, Eq, PartialEq, TryFromPrimitive)]
820#[repr(u8)]
821pub enum Visibility {
822    /// The GV is visible
823    #[default]
824    Default = 0,
825    /// The GV is hidden
826    Hidden = 1,
827    /// The GV is protected
828    Protected = 2,
829}
830
831/// `GlobalValue::ThreadLocalMode`
832#[derive(Debug, Copy, Clone, Default, Eq, PartialEq, TryFromPrimitive)]
833#[repr(u8)]
834pub enum ThreadLocalMode {
835    #[default]
836    NotThreadLocal = 0,
837    GeneralDynamic = 1,
838    LocalDynamic = 2,
839    InitialExec = 3,
840    LocalExec = 4,
841}
842
843/// `GlobalValue::UnnamedAddr`
844#[derive(Debug, Copy, Clone, Default, Eq, PartialEq, TryFromPrimitive)]
845#[repr(u8)]
846pub enum UnnamedAddr {
847    /// The address of the global is significant
848    #[default]
849    None = 0,
850    /// The address of the global is not significant, but the global cannot be merged with other globals
851    Global = 1,
852    /// The address of the global is not significant, and the global can be merged with other globals
853    Local = 2,
854}
855
856/// `GlobalValue::PreemptionSpecifier`
857#[derive(Debug, Copy, Clone, Default, Eq, PartialEq, TryFromPrimitive)]
858#[repr(u8)]
859pub enum PreemptionSpecifier {
860    /// The global may be replaced by a different definition at link time (interposable)
861    #[default]
862    DsoPreemptable = 0,
863    /// The global's definition is local to the DSO and cannot be replaced at link time
864    DsoLocal = 1,
865}
866
867bitflags::bitflags! {
868    /// Floating-point comparison predicates
869    ///
870    /// `CmpInst::Predicate` in `InstrTypes.h`
871    #[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
872    pub struct FCmpPredicate: u8 {
873        const Equal = 1 << 0;
874        const Less = 1 << 1;
875        const Greater = 1 << 2;
876        /// At least one operand is NaN
877        const Unordered = 1 << 3;
878
879        /// Always false (always folded)
880        const FALSE = 0;
881        /// Ordered and equal
882        const OEQ = Self::Equal.bits();
883        /// Ordered and greater than
884        const OGT = Self::Greater.bits();
885        /// Ordered and greater than or equal
886        const OGE = Self::Greater.bits() | Self::Equal.bits();
887        /// Ordered and less than
888        const OLT = Self::Less.bits();
889        /// Ordered and less than or equal
890        const OLE = Self::Less.bits() | Self::Equal.bits();
891        /// Ordered and not equal
892        const ONE = Self::Less.bits() | Self::Greater.bits();
893        /// Ordered (no NaNs)
894        const ORD = Self::Less.bits() | Self::Greater.bits() | Self::Equal.bits();
895        /// Unordered (isnan(X) | isnan(Y))
896        const UNO = Self::Unordered.bits();
897        /// Unordered or equal
898        const UEQ = Self::Unordered.bits() | Self::Equal.bits();
899        /// Unordered or greater than
900        const UGT = Self::Unordered.bits() | Self::Greater.bits();
901        /// Unordered, greater than, or equal
902        const UGE = Self::Unordered.bits() | Self::Greater.bits() | Self::Equal.bits();
903        /// Unordered or less than
904        const ULT = Self::Unordered.bits() | Self::Less.bits();
905        /// Unordered, less than, or equal
906        const ULE = Self::Unordered.bits() | Self::Less.bits() | Self::Equal.bits();
907        /// Unordered or not equal
908        const UNE = Self::Unordered.bits() | Self::Less.bits() | Self::Greater.bits();
909        /// Always true (always folded)
910        const TRUE = Self::Unordered.bits() | Self::Less.bits() | Self::Greater.bits() | Self::Equal.bits();
911    }
912}
913
914/// `CmpInst::Predicate` in `InstrTypes.h`
915#[derive(Debug, Copy, Clone, Eq, PartialEq, TryFromPrimitive)]
916#[repr(u8)]
917pub enum ICmpPredicate {
918    /// Equal
919    Eq = 32,
920    /// Not equal
921    Ne = 33,
922    /// Unsigned greater than
923    Ugt = 34,
924    /// Unsigned greater or equal
925    Uge = 35,
926    /// Unsigned less than
927    Ult = 36,
928    /// Unsigned less or equal
929    Ule = 37,
930    /// Signed greater than
931    Sgt = 38,
932    /// Signed greater or equal
933    Sge = 39,
934    /// Signed less than
935    Slt = 40,
936    /// Signed less or equal
937    Sle = 41,
938}
939
940impl ICmpPredicate {
941    #[must_use]
942    pub fn is_unsigned(self) -> bool {
943        matches!(self, Self::Ugt | Self::Uge | Self::Ult | Self::Ule)
944    }
945
946    #[must_use]
947    pub fn is_signed(self) -> bool {
948        matches!(self, Self::Sgt | Self::Sge | Self::Slt | Self::Sle)
949    }
950}
951
952/// Comparison predicate that can be either floating-point or integer
953///
954/// `CmpInst::Predicate` in `InstrTypes.h`
955#[derive(Debug, Copy, Clone, PartialEq, Eq)]
956pub enum CmpPredicate {
957    FCmp(FCmpPredicate),
958    ICmp(ICmpPredicate),
959}
960
961impl CmpPredicate {
962    #[must_use]
963    pub fn as_fp(self) -> Option<FCmpPredicate> {
964        match self {
965            Self::FCmp(p) => Some(p),
966            Self::ICmp(_) => None,
967        }
968    }
969
970    #[must_use]
971    pub fn as_int(self) -> Option<ICmpPredicate> {
972        match self {
973            Self::FCmp(_) => None,
974            Self::ICmp(p) => Some(p),
975        }
976    }
977}
978
979impl TryFrom<u8> for CmpPredicate {
980    type Error = TryFromPrimitiveError<ICmpPredicate>;
981
982    fn try_from(value: u8) -> Result<Self, Self::Error> {
983        if value <= 15 {
984            Ok(Self::FCmp(FCmpPredicate::from_bits_truncate(value)))
985        } else {
986            ICmpPredicate::try_from_primitive(value).map(Self::ICmp)
987        }
988    }
989}
990
991/// `DICompileUnit::DebugEmissionKind`
992#[derive(Debug, Copy, Clone, Default, Eq, PartialEq, TryFromPrimitive)]
993#[repr(u8)]
994pub enum DebugEmissionKind {
995    #[default]
996    NoDebug = 0,
997    FullDebug = 1,
998    LineTablesOnly = 2,
999    DebugDirectivesOnly = 3,
1000}
1001
1002/// `DICompileUnit::DebugNameTableKind`
1003#[derive(Debug, Copy, Clone, Default, Eq, PartialEq, TryFromPrimitive)]
1004#[repr(u8)]
1005pub enum DebugNameTableKind {
1006    /// Default name table
1007    #[default]
1008    Default = 0,
1009    /// GNU name table
1010    Gnu = 1,
1011    /// No name table
1012    None = 2,
1013    /// Apple name table
1014    Apple = 3,
1015}
1016
1017/// LLVM bitcode encodes alignment as `log2(alignment) + 1`
1018#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
1019#[repr(transparent)]
1020pub struct Alignment(NonZero<u8>);
1021
1022impl Alignment {
1023    /// From the bitcode-encoded value
1024    #[must_use]
1025    pub fn from_encoded(encoded: u8) -> Option<Self> {
1026        NonZero::new(encoded).map(Self)
1027    }
1028
1029    #[must_use]
1030    pub fn ilog2(self) -> u8 {
1031        self.0.get() - 1
1032    }
1033
1034    /// Alignment in bytes
1035    #[must_use]
1036    pub fn bytes(self) -> usize {
1037        1usize << self.ilog2()
1038    }
1039}
1040
1041/// Address spaces identify different memory regions. The default address space is 0.
1042#[derive(Debug, Copy, Clone, Eq, PartialEq, FromPrimitive, IntoPrimitive)]
1043#[repr(u8)]
1044pub enum AddressSpace {
1045    /// Default/generic address space
1046    Generic = 0,
1047    /// Global memory
1048    ///
1049    /// used by AMDGPU, NVPTX
1050    Global = 1,
1051    /// Region memory
1052    ///
1053    /// AMDGPU specific
1054    Region = 2,
1055    /// Local (AMDGPU)/shared memory (NVPTX, OpenMP)
1056    Local = 3,
1057    /// Constant memory
1058    ///
1059    /// used by AMDGPU, NVPTX, OpenMP
1060    Constant = 4,
1061    /// Private memory
1062    ///
1063    /// used by AMDGPU, NVPTX, OpenMP
1064    Private = 5,
1065    /// AMDGPU specific
1066    Constant32Bit = 6,
1067    /// AMDGPU specific
1068    Flat = 7,
1069
1070    #[num_enum(catch_all)]
1071    Other(u8),
1072}
1073
1074#[allow(clippy::derivable_impls)]
1075impl Default for AddressSpace {
1076    fn default() -> Self {
1077        Self::Generic
1078    }
1079}
1080
1081#[derive(Debug, Copy, Clone, Eq, PartialEq, TryFromPrimitive)]
1082#[repr(u8)]
1083#[non_exhaustive]
1084pub enum FuncletPad {
1085    CleanupPad = 0,
1086    CatchPad = 1,
1087}