Skip to main content

llvm_bitcode/schema/
values.rs

1//! From the LLVM Project, under the [Apache License v2.0 with LLVM Exceptions](https://llvm.org/LICENSE.txt)
2
3use num_enum::TryFromPrimitive;
4
5#[derive(Debug, Copy, Clone, PartialEq, TryFromPrimitive)]
6#[repr(u8)]
7pub enum AttrKind {
8    /// Alignment of parameter (5 bits) stored as log2 of alignment with +1 bias.
9    /// 0 means unaligned (different from align(1)).
10    Alignment = 1,
11    /// inline=always.
12    AlwaysInline = 2,
13    /// Pass structure by value.
14    ByVal = 3,
15    /// Source said inlining was desirable.
16    InlineHint = 4,
17    /// Force argument to be passed in register.
18    InReg = 5,
19    /// Function must be optimized for size first.
20    MinSize = 6,
21    /// Naked function.
22    Naked = 7,
23    /// Nested function static chain.
24    Nest = 8,
25    /// Considered to not alias after call.
26    NoAlias = 9,
27    /// Callee isn't recognized as a builtin.
28    NoBuiltin = 10,
29    NoCapture = 11,
30    /// Call cannot be duplicated.
31    NoDuplicate = 12,
32    /// Disable implicit floating point insts.
33    NoImplicitFloat = 13,
34    /// inline=never.
35    NoInline = 14,
36    /// Function is called early and/or often, so lazy binding isn't worthwhile.
37    NonLazyBind = 15,
38    /// Disable redzone.
39    NoRedZone = 16,
40    /// Mark the function as not returning.
41    NoReturn = 17,
42    /// Function doesn't unwind stack.
43    NoUnwind = 18,
44    /// opt_size.
45    OptimizeForSize = 19,
46    /// Function does not access memory.
47    ReadNone = 20,
48    /// Function only reads from memory.
49    ReadOnly = 21,
50    /// Return value is always equal to this argument.
51    Returned = 22,
52    /// Function can return twice.
53    ReturnsTwice = 23,
54    /// Sign extended before/after call.
55    SExt = 24,
56    /// Alignment of stack for function (3 bits)  stored as log2 of alignment with
57    /// +1 bias 0 means unaligned (different from alignstack=(1)).
58    StackAlignment = 25,
59    /// Stack protection.
60    StackProtect = 26,
61    /// Stack protection required.
62    StackProtectReq = 27,
63    /// Strong Stack protection.
64    StackProtectStrong = 28,
65    /// Hidden pointer to structure to return.
66    StructRet = 29,
67    /// AddressSanitizer is on.
68    SanitizeAddress = 30,
69    /// ThreadSanitizer is on.
70    SanitizeThread = 31,
71    /// MemorySanitizer is on.
72    SanitizeMemory = 32,
73    /// Function must be in a unwind table.
74    UwTable = 33,
75    /// Zero extended before/after call.
76    ZExt = 34,
77    /// Callee is recognized as a builtin, despite nobuiltin attribute on its
78    /// declaration.
79    Builtin = 35,
80    /// Marks function as being in a cold path.
81    Cold = 36,
82    /// Function must not be optimized.
83    OptimizeNone = 37,
84    /// Pass structure in an alloca.
85    InAlloca = 38,
86    /// Pointer is known to be not null.
87    NonNull = 39,
88    /// Build jump-instruction tables and replace refs.
89    JumpTable = 40,
90    /// Pointer is known to be dereferenceable.
91    Dereferenceable = 41,
92    /// Pointer is either null or dereferenceable.
93    DereferenceableOrNull = 42,
94    /// Can only be moved to control-equivalent blocks.
95    /// NB: Could be IntersectCustom with "or" handling.
96    Convergent = 43,
97    /// Safe Stack protection.
98    Safestack = 44,
99    /// Unused
100    ArgMemOnly = 45,
101    /// Argument is swift self/context.
102    SwiftSelf = 46,
103    /// Argument is swift error.
104    SwiftError = 47,
105    /// The function does not recurse.
106    NoRecurse = 48,
107    /// Unused
108    InaccessibleMemOnly = 49,
109    /// Unused
110    InaccessibleMemOrArgMemOnly = 50,
111    /// The result of the function is guaranteed to point to a number of bytes that
112    /// we can determine if we know the value of the function's arguments.
113    AllocSize = 51,
114    /// Function only writes to memory.
115    WriteOnly = 52,
116    /// Function can be speculated.
117    Speculatable = 53,
118    /// Function was called in a scope requiring strict floating point semantics.
119    StrictFp = 54,
120    /// HWAddressSanitizer is on.
121    SanitizeHwAddress = 55,
122    /// Disable Indirect Branch Tracking.
123    NoCfCheck = 56,
124    /// Select optimizations for best fuzzing signal.
125    OptForFuzzing = 57,
126    /// Shadow Call Stack protection.
127    Shadowcallstack = 58,
128    /// Speculative Load Hardening is enabled.
129    ///
130    /// Note that this uses the default compatibility (always compatible during
131    /// inlining) and a conservative merge strategy where inlining an attributed
132    /// body will add the attribute to the caller. This ensures that code carrying
133    /// this attribute will always be lowered with hardening enabled.
134    SpeculativeLoadHardening = 59,
135    /// Parameter is required to be a trivial constant.
136    ImmArg = 60,
137    /// Function always comes back to callsite.
138    Willreturn = 61,
139    /// Function does not deallocate memory.
140    Nofree = 62,
141    /// Function does not synchronize.
142    Nosync = 63,
143    /// MemTagSanitizer is on.
144    SanitizeMemtag = 64,
145    /// Similar to byval but without a copy.
146    Preallocated = 65,
147    /// Disable merging for specified functions or call sites.
148    NoMerge = 66,
149    /// Null pointer in address space zero is valid.
150    NullPointerIsValid = 67,
151    /// Parameter or return value may not contain uninitialized or poison bits.
152    NoUndef = 68,
153    /// Mark in-memory ABI type.
154    Byref = 69,
155    /// Function is required to make Forward Progress.
156    MustProgress = 70,
157    /// Function cannot enter into caller's translation unit.
158    NoCallback = 71,
159    /// Marks function as being in a hot path and frequently called.
160    Hot = 72,
161    /// Function should not be instrumented.
162    NoProfile = 73,
163    /// Minimum/Maximum vscale value for function.
164    VscaleRange = 74,
165    /// Argument is swift async context.
166    SwiftAsync = 75,
167    /// No SanitizeCoverage instrumentation.
168    NoSanitizeCoverage = 76,
169    /// Provide pointer element type to intrinsic.
170    Elementtype = 77,
171    /// Do not instrument function with sanitizers.
172    DisableSanitizerInstrumentation = 78,
173    /// No SanitizeBounds instrumentation.
174    NoSanitizeBounds = 79,
175    /// Parameter of a function that tells us the alignment of an allocation, as in
176    /// aligned_alloc and aligned ::operator::new.
177    AllocAlign = 80,
178    /// Parameter is the pointer to be manipulated by the allocator function.
179    AllocatedPointer = 81,
180    /// Describes behavior of an allocator function in terms of known properties.
181    AllocKind = 82,
182    /// Function is a presplit coroutine.
183    PresplitCoroutine = 83,
184    /// Whether to keep return instructions, or replace with a jump to an external
185    /// symbol.
186    FnretthunkExtern = 84,
187    SkipProfile = 85,
188    /// Memory effects of the function.
189    Memory = 86,
190    /// Forbidden floating-point classes.
191    Nofpclass = 87,
192    /// Select optimizations that give decent debug info.
193    OptimizeForDebugging = 88,
194    /// Pointer argument is writable.
195    Writable = 89,
196    CoroOnlyDestroyWhenComplete = 90,
197    /// Argument is dead if the call unwinds.
198    DeadOnUnwind = 91,
199    /// Parameter or return value is within the specified range.
200    Range = 92,
201    /// NumericalStabilitySanitizer is on.
202    SanitizeNumericalStability = 93,
203    /// Pointer argument memory is initialized.
204    Initializes = 94,
205    /// Function has a hybrid patchable thunk.
206    HybridPatchable = 95,
207    /// RealtimeSanitizer is on.
208    SanitizeRealtime = 96,
209    /// RealtimeSanitizer should error if a real-time unsafe function is invoked
210    /// during a real-time sanitized function (see `sanitize_realtime`).
211    SanitizeRealtimeBlocking = 97,
212    /// The coroutine call meets the elide requirement. Hint the optimization
213    /// pipeline to perform elide on the call or invoke instruction.
214    CoroElideSafe = 98,
215    /// No extension needed before/after call (high bits are undefined).
216    NoExt = 99,
217    /// Function is not a source of divergence.
218    NoDivergenceSource = 100,
219    /// TypeSanitizer is on.
220    SanitizeType = 101,
221    /// Specify how the pointer may be captured.
222    Captures = 102,
223    /// Argument is dead upon function return.
224    DeadOnReturn = 103,
225    /// Allocation token instrumentation is on.
226    SanitizeAllocToken = 104,
227    /// Result will not be undef or poison if all arguments are not undef and not
228    /// poison.
229    NoCreateUndefOrPoison = 105,
230    /// Indicate the denormal handling of the default floating-point
231    /// environment.
232    DenormalFpEnv = 106,
233    NoOutline = 107,
234}
235
236/// These are values used in the bitcode files to encode which
237/// cast a `CST_CODE_CE_CAST` refers to.
238#[derive(Debug, TryFromPrimitive)]
239#[repr(u8)]
240pub enum CastOpcode {
241    Trunc = 0,
242    ZExt = 1,
243    SExt = 2,
244    FpToUi = 3,
245    FpToSi = 4,
246    UiToFp = 5,
247    SiToFp = 6,
248    FpTrunc = 7,
249    FpExt = 8,
250    PtrToInt = 9,
251    IntToPtr = 10,
252    Bitcast = 11,
253    Addrspace = 12,
254}
255
256/// These are bitcode-specific values, different from C++ enum
257#[derive(Debug, TryFromPrimitive)]
258#[repr(u8)]
259#[non_exhaustive]
260pub enum Linkage {
261    /// Externally visible function
262    External = 0,
263    /// Keep one copy of named function when linking (weak)
264    /// Old value with implicit comdat.
265    #[deprecated]
266    WeakAnyOld = 1,
267    /// Special purpose, only applies to global arrays
268    Appending = 2,
269    /// Rename collisions when linking (static functions).
270    Internal = 3,
271    /// Keep one copy of function when linking (inline)
272    /// Old value with implicit comdat.
273    #[deprecated]
274    LinkOnceAnyOld = 4,
275    /// Externally visible function
276    /// Obsolete DLLImportLinkage
277    #[deprecated]
278    DllImport = 5,
279    /// Externally visible function
280    /// Obsolete DLLExportLinkage
281    #[deprecated]
282    DllExport = 6,
283    /// ExternalWeak linkage
284    ExternWeak = 7,
285    /// Tentative definitions.
286    Common = 8,
287    /// Like Internal, but omit from symbol table.
288    Private = 9,
289    /// Same, but only replaced by something equivalent.
290    /// Old value with implicit comdat.
291    #[deprecated]
292    WeakOdrOld = 10,
293    /// Same, but only replaced by something equivalent.
294    /// Old value with implicit comdat.
295    #[deprecated]
296    LinkOnceOdrOld = 11,
297    /// Available for inspection, not emission.
298    AvailableExternally = 12,
299    /// Like Internal, but omit from symbol table.
300    /// Obsolete LinkerPrivateLinkage
301    #[deprecated]
302    LinkerPrivate = 13,
303    /// Like Internal, but omit from symbol table.
304    /// Obsolete LinkerPrivateWeakLinkage
305    #[deprecated]
306    LinkerPrivateWeak = 14,
307    /// Externally visible function
308    /// Obsolete LinkOnceODRAutoHideLinkage
309    #[deprecated]
310    LinkOnceOdrAutoHide = 15,
311    /// Keep one copy of named function when linking (weak)
312    WeakAny = 16,
313    /// Same, but only replaced by something equivalent.
314    WeakOdr = 17,
315    /// Keep one copy of function when linking (inline)
316    LinkOnceAny = 18,
317    /// Same, but only replaced by something equivalent.
318    LinkOnceOdr = 19,
319}
320
321#[derive(Debug, TryFromPrimitive)]
322#[repr(u8)]
323pub enum DllStorageClass {
324    Default = 0,
325    Import = 1,
326    Export = 2,
327}
328
329#[derive(Debug, TryFromPrimitive)]
330#[repr(u8)]
331#[non_exhaustive]
332pub enum CallConv {
333    /// The default llvm calling convention, compatible with C. This convention
334    /// is the only one that supports varargs calls. As with typical C calling
335    /// conventions, the callee/caller have to tolerate certain amounts of
336    /// prototype mismatch.
337    C = 0,
338    /// Attempts to make calls as fast as possible (e.g. by passing things in
339    /// registers).
340    Fast = 8,
341    /// Attempts to make code in the caller as efficient as possible under the
342    /// assumption that the call is not commonly executed. As such, these calls
343    /// often preserve all registers so that the call does not break any live
344    /// ranges in the caller side.
345    Cold = 9,
346    /// Used by the Glasgow Haskell Compiler (GHC).
347    Ghc = 10,
348    /// Used by the High-Performance Erlang Compiler (HiPE).
349    HiPE = 11,
350    /// Used for dynamic register based calls (e.g. stackmap and patchpoint
351    /// intrinsics).
352    AnyReg = 13,
353    /// Used for runtime calls that preserves most registers.
354    PreserveMost = 14,
355    /// Used for runtime calls that preserves (almost) all registers.
356    PreserveAll = 15,
357    /// Calling convention for Swift.
358    Swift = 16,
359    /// Used for access functions.
360    CxxFastTls = 17,
361    /// Attemps to make calls as fast as possible while guaranteeing that tail
362    /// call optimization can always be performed.
363    Tail = 18,
364    /// Special calling convention on Windows for calling the Control Guard
365    /// Check ICall funtion. The function takes exactly one argument (address of
366    /// the target function) passed in the first argument register, and has no
367    /// return value. All register values are preserved.
368    CfGuardCheck = 19,
369    /// This follows the Swift calling convention in how arguments are passed
370    /// but guarantees tail calls will be made by making the callee clean up
371    /// their stack.
372    SwiftTail = 20,
373    /// Used for runtime calls that preserves none general registers.
374    PreserveNone = 21,
375    /// stdcall is mostly used by the Win32 API. It is basically the same as the
376    /// C convention with the difference in that the callee is responsible for
377    /// popping the arguments from the stack.
378    X86StdCall = 64,
379    /// 'fast' analog of X86_StdCall. Passes first two arguments in ECX:EDX
380    /// registers, others - via stack. Callee is responsible for stack cleaning.
381    X86FastCall = 65,
382    /// ARM Procedure Calling Standard (obsolete, but still used on some
383    /// targets).
384    ArmApcs = 66,
385    /// ARM Architecture Procedure Calling Standard calling convention (aka
386    /// EABI). Soft float variant.
387    ArmAapcs = 67,
388    /// Same as ARM_AAPCS, but uses hard floating point ABI.
389    ArmAapcsVfp = 68,
390    /// Used for MSP430 interrupt routines.
391    Msp430Intr = 69,
392    /// Similar to X86_StdCall. Passes first argument in ECX, others via stack.
393    /// Callee is responsible for stack cleaning. MSVC uses this by default for
394    /// methods in its ABI.
395    X86ThisCall = 70,
396    /// Call to a PTX kernel. Passes all arguments in parameter space.
397    PtxKernel = 71,
398    /// Call to a PTX device function. Passes all arguments in register or
399    /// parameter space.
400    PtxDevice = 72,
401    /// Used for SPIR non-kernel device functions. No lowering or expansion of
402    /// arguments. Structures are passed as a pointer to a struct with the
403    /// byval attribute. Functions can only call SPIR_FUNC and SPIR_KERNEL
404    /// functions. Functions can only have zero or one return values. Variable
405    /// arguments are not allowed, except for printf. How arguments/return
406    /// values are lowered are not specified. Functions are only visible to the
407    /// devices.
408    SpirFunc = 75,
409    /// Used for SPIR kernel functions. Inherits the restrictions of SPIR_FUNC,
410    /// except it cannot have non-void return values, it cannot have variable
411    /// arguments, it can also be called by the host or it is externally
412    /// visible.
413    SpirKernel = 76,
414    /// Used for Intel OpenCL built-ins.
415    IntelOclBi = 77,
416    /// The C convention as specified in the x86-64 supplement to the System V
417    /// ABI, used on most non-Windows systems.
418    X8664SysV = 78,
419    /// The C convention as implemented on Windows/x86-64 and AArch64. It
420    /// differs from the more common \c X86_64_SysV convention in a number of
421    /// ways, most notably in that XMM registers used to pass arguments are
422    /// shadowed by GPRs, and vice versa. On AArch64, this is identical to the
423    /// normal C (AAPCS) calling convention for normal functions, but floats are
424    /// passed in integer registers to variadic functions.
425    Win64 = 79,
426    /// MSVC calling convention that passes vectors and vector aggregates in SSE
427    /// registers.
428    X86VectorCall = 80,
429    /// Placeholders for HHVM calling conventions (deprecated, removed).
430    #[deprecated]
431    DummyHhvm = 81,
432    DummyHhvmC = 82,
433    /// x86 hardware interrupt context. Callee may take one or two parameters,
434    /// where the 1st represents a pointer to hardware context frame and the 2nd
435    /// represents hardware error code, the presence of the later depends on the
436    /// interrupt vector taken. Valid for both 32- and 64-bit subtargets.
437    X86Intr = 83,
438    /// Used for AVR interrupt routines.
439    AvrIntr = 84,
440    /// Used for AVR signal routines.
441    AvrSignal = 85,
442    /// Used for special AVR rtlib functions which have an "optimized"
443    /// convention to preserve registers.
444    AvrBuiltin = 86,
445    /// Used for Mesa vertex shaders, or AMDPAL last shader stage before
446    /// rasterization (vertex shader if tessellation and geometry are not in
447    /// use, or otherwise copy shader if one is needed).
448    AmdGpuVs = 87,
449    /// Used for Mesa/AMDPAL geometry shaders.
450    AmdGpuGs = 88,
451    /// Used for Mesa/AMDPAL pixel shaders.
452    AmdGpuPs = 89,
453    /// Used for Mesa/AMDPAL compute shaders.
454    AmdGpuCs = 90,
455    /// Used for AMDGPU code object kernels.
456    AmdGpuKernel = 91,
457    /// Register calling convention used for parameters transfer optimization
458    X86RegCall = 92,
459    /// Used for Mesa/AMDPAL hull shaders (= tessellation control shaders).
460    AmdGpuHs = 93,
461    /// Used for special MSP430 rtlib functions which have an "optimized"
462    /// convention using additional registers.
463    Msp430Builtin = 94,
464    /// Used for AMDPAL vertex shader if tessellation is in use.
465    AmdGpuLs = 95,
466    /// Used for AMDPAL shader stage before geometry shader if geometry is in
467    /// use. So either the domain (= tessellation evaluation) shader if
468    /// tessellation is in use, or otherwise the vertex shader.
469    AmdGpuEs = 96,
470    /// Used between AArch64 Advanced SIMD functions
471    AArch64VectorCall = 97,
472    /// Used between AArch64 SVE functions
473    AArch64SveVectorCall = 98,
474    /// For emscripten __invoke_* functions. The first argument is required to
475    /// be the function ptr being indirectly called. The remainder matches the
476    /// regular calling convention.
477    WasmEmscriptenInvoke = 99,
478    /// Used for AMD graphics targets.
479    AmdGpuGfx = 100,
480    /// Used for M68k interrupt routines.
481    M68kIntr = 101,
482    /// Preserve X0-X13, X19-X29, SP, Z0-Z31, P0-P15.
483    AArch64SmeAbiSupportRoutinesPreserveMostFromX0 = 102,
484    /// Preserve X2-X15, X19-X29, SP, Z0-Z31, P0-P15.
485    AArch64SmeAbiSupportRoutinesPreserveMostFromX2 = 103,
486    /// Used on AMDGPUs to give the middle-end more control over argument
487    /// placement.
488    AmdGpuCsChain = 104,
489    /// Used on AMDGPUs to give the middle-end more control over argument
490    /// placement. Preserves active lane values for input VGPRs.
491    AmdGpuCsChainPreserve = 105,
492    /// Used for M68k rtd-based CC (similar to X86's stdcall).
493    M68kRtd = 106,
494    /// Used by GraalVM. Two additional registers are reserved.
495    Graal = 107,
496    /// Calling convention used in the ARM64EC ABI to implement calls between
497    /// x64 code and thunks. This is basically the x64 calling convention using
498    /// ARM64 register names. The first parameter is mapped to x9.
499    Arm64ecThunkX64 = 108,
500    /// Calling convention used in the ARM64EC ABI to implement calls between
501    /// ARM64 code and thunks. This is just the ARM64 calling convention,
502    /// except that the first parameter is mapped to x9.
503    Arm64ecThunkNative = 109,
504    /// Calling convention used for RISC-V V-extension.
505    RiscVVectorCall = 110,
506    /// Preserve X1-X15, X19-X29, SP, Z0-Z31, P0-P15.
507    AArch64SmeAbiSupportRoutinesPreserveMostFromX1 = 111,
508    /// Calling convention used for RISC-V V-extension fixed vectors.
509    RiscVVlsCall32 = 112,
510    RiscVVlsCall64 = 113,
511    RiscVVlsCall128 = 114,
512    RiscVVlsCall256 = 115,
513    RiscVVlsCall512 = 116,
514    RiscVVlsCall1024 = 117,
515    RiscVVlsCall2048 = 118,
516    RiscVVlsCall4096 = 119,
517    RiscVVlsCall8192 = 120,
518    RiscVVlsCall16384 = 121,
519    RiscVVlsCall32768 = 122,
520    RiscVVlsCall65536 = 123,
521    AmdGpuGfxWholeWave = 124,
522    /// Calling convention used for CHERIoT when crossing a protection boundary.
523    CHERIoTCompartmentCall = 125,
524    /// Calling convention used for the callee of CHERIoT_CompartmentCall.
525    /// Ignores the first two capability arguments and the first integer
526    /// argument, zeroes all unused return registers on return.
527    CHERIoTCompartmentCallee = 126,
528    /// Calling convention used for CHERIoT for cross-library calls to a
529    /// stateless compartment.
530    CHERIoTLibraryCall = 127,
531}
532
533/// call conv field in bitcode is often mixed with flags
534impl CallConv {
535    #[doc(hidden)]
536    #[deprecated]
537    pub fn from_flags(ccinfo_flags: u64) -> Result<Self, String> {
538        Self::from_call_flags(ccinfo_flags).ok_or_else(|| "out of range".into())
539    }
540
541    /// Extract calling convention from CALL/CALLBR CCInfo flags.
542    #[must_use]
543    pub fn from_call_flags(ccinfo_flags: u64) -> Option<Self> {
544        // static_cast<CallingConv::ID>((0x7ff & CCInfo) >> bitc::CALL_CCONV));
545        let id = u8::try_from((ccinfo_flags & 0x7ff) >> 1).ok()?;
546        Self::try_from_primitive(id).ok()
547    }
548
549    /// Extract calling convention from INVOKE CCInfo flags.
550    #[must_use]
551    pub fn from_invoke_flags(ccinfo_flags: u64) -> Option<Self> {
552        let id = u8::try_from(ccinfo_flags & 0x3ff).ok()?;
553        Self::try_from_primitive(id).ok()
554    }
555}
556
557/// These are values used in the bitcode files to encode which
558/// binop a `CST_CODE_CE_BINOP` refers to.
559#[derive(Debug, TryFromPrimitive)]
560#[repr(u8)]
561pub enum BinOpcode {
562    Add = 0,
563    Sub = 1,
564    Mul = 2,
565    Udiv = 3,
566    /// overloaded for FP
567    Sdiv = 4,
568    Urem = 5,
569    /// overloaded for FP
570    Srem = 6,
571    Shl = 7,
572    Lshr = 8,
573    Ashr = 9,
574    And = 10,
575    Or = 11,
576    Xor = 12,
577}
578
579/// Encoded `AtomicOrdering` values.
580#[derive(Debug, TryFromPrimitive, Default)]
581#[repr(u8)]
582pub enum AtomicOrdering {
583    #[default]
584    Notatomic = 0,
585    Unordered = 1,
586    Monotonic = 2,
587    Acquire = 3,
588    Release = 4,
589    AcqRel = 5,
590    SeqCst = 6,
591}
592
593/// COMDATSELECTIONKIND enumerates the possible selection mechanisms for
594/// COMDAT sections.
595#[derive(Debug, Clone, Copy, TryFromPrimitive)]
596#[repr(u8)]
597pub enum ComdatSelectionKind {
598    Any = 1,
599    ExactMatch = 2,
600    Largest = 3,
601    NoDuplicates = 4,
602    SameSize = 5,
603}
604
605/// Atomic read-modify-write operations
606#[derive(Debug, Clone, Copy, TryFromPrimitive)]
607#[repr(u8)]
608#[non_exhaustive]
609pub enum RmwOperation {
610    /// `XCHG`
611    Xchg = 0,
612
613    /// `ADD`
614    Add = 1,
615
616    /// `SUB`
617    Sub = 2,
618
619    /// `AND`
620    And = 3,
621
622    /// `NAND`
623    Nand = 4,
624
625    /// `OR`
626    Or = 5,
627
628    /// `XOR`
629    Xor = 6,
630
631    /// `MAX`
632    Max = 7,
633
634    /// `MIN`
635    Min = 8,
636
637    /// `UMAX`
638    Umax = 9,
639
640    /// `UMIN`
641    Umin = 10,
642
643    /// `FADD`
644    Fadd = 11,
645
646    /// `FSUB`
647    Fsub = 12,
648
649    /// `FMAX`
650    Fmax = 13,
651
652    /// `FMIN`
653    Fmin = 14,
654
655    /// `UINC_WRAP`
656    UincWrap = 15,
657
658    /// `UDEC_WRAP`
659    UdecWrap = 16,
660
661    /// `USUB_COND`
662    UsSubCond = 17,
663
664    /// `USUB_SAT`
665    UsSubSat = 18,
666}
667
668/// Unary Opcodes
669#[derive(Debug, Clone, Copy, TryFromPrimitive)]
670#[repr(u8)]
671#[non_exhaustive]
672pub enum UnaryOpcode {
673    /// `UNOP_FNEG`
674    Fneg = 0,
675}
676
677/// Flags for serializing
678/// OverflowingBinaryOperator's SubclassOptionalData contents.
679#[derive(Debug, Clone, Copy, TryFromPrimitive)]
680#[repr(u8)]
681#[non_exhaustive]
682pub enum OverflowingBinaryOperatorOptionalFlags {
683    NoUnsignedWrap = 0,
684    NoSignedWrap = 1,
685}
686
687/// Flags for serializing
688/// TruncInstOptionalFlags's SubclassOptionalData contents.
689#[derive(Debug, Clone, Copy, TryFromPrimitive)]
690#[repr(u8)]
691#[non_exhaustive]
692pub enum TruncInstOptionalFlags {
693    NoUnsignedWrap = 0,
694    NoSignedWrap = 1,
695}
696
697/// FastMath Flags
698/// This is a fixed layout derived from the bitcode emitted by LLVM 5.0
699/// intended to decouple the in-memory representation from the serialization.
700#[derive(Debug, Clone, Copy, TryFromPrimitive)]
701#[repr(u8)]
702pub enum FastMathMap {
703    UnsafeAlgebra = 1 << 0, // Legacy
704    NoNaNs = 1 << 1,
705    NoInfs = 1 << 2,
706    NoSignedZeros = 1 << 3,
707    AllowReciprocal = 1 << 4,
708    AllowContract = 1 << 5,
709    ApproxFunc = 1 << 6,
710    AllowReassoc = 1 << 7,
711}
712
713/// Flags for serializing
714/// GEPOperator's SubclassOptionalData contents.
715#[derive(Debug, Clone, Copy, TryFromPrimitive)]
716#[repr(u8)]
717#[non_exhaustive]
718pub enum GetElementPtrOptionalFlags {
719    Inbounds = 0,
720    Nusw = 1,
721    Nuw = 2,
722}
723
724/// Markers and flags for call instruction.
725#[derive(Debug, Clone, Copy, TryFromPrimitive)]
726#[repr(u8)]
727#[non_exhaustive]
728pub enum CallMarkersFlags {
729    Tail = 0,
730    Cconv = 1,
731    MustTail = 14,
732    ExplicitType = 15,
733    NoTail = 16,
734    Fmf = 17, // Call has optional fast-math-flags.
735}