Skip to main content

llvm_bitcode/schema/
values.rs

1//! From the LLVM Project, under the [Apache License v2.0 with LLVM Exceptions](https://llvm.org/LICENSE.txt)
2
3use num_enum::TryFromPrimitive;
4
5#[derive(Debug, Copy, Clone, PartialEq, TryFromPrimitive)]
6#[repr(u8)]
7pub enum AttrKind {
8    /// Alignment of parameter (5 bits) stored as log2 of alignment with +1 bias.
9    /// 0 means unaligned (different from align(1)).
10    Alignment = 1,
11    /// inline=always.
12    AlwaysInline = 2,
13    /// Pass structure by value.
14    ByVal = 3,
15    /// Source said inlining was desirable.
16    InlineHint = 4,
17    /// Force argument to be passed in register.
18    InReg = 5,
19    /// Function must be optimized for size first.
20    MinSize = 6,
21    /// Naked function.
22    Naked = 7,
23    /// Nested function static chain.
24    Nest = 8,
25    /// Considered to not alias after call.
26    NoAlias = 9,
27    /// Callee isn't recognized as a builtin.
28    NoBuiltin = 10,
29    NoCapture = 11,
30    /// Call cannot be duplicated.
31    NoDuplicate = 12,
32    /// Disable implicit floating point insts.
33    NoImplicitFloat = 13,
34    /// inline=never.
35    NoInline = 14,
36    /// Function is called early and/or often, so lazy binding isn't worthwhile.
37    NonLazyBind = 15,
38    /// Disable redzone.
39    NoRedZone = 16,
40    /// Mark the function as not returning.
41    NoReturn = 17,
42    /// Function doesn't unwind stack.
43    NoUnwind = 18,
44    /// opt_size.
45    OptimizeForSize = 19,
46    /// Function does not access memory.
47    ReadNone = 20,
48    /// Function only reads from memory.
49    ReadOnly = 21,
50    /// Return value is always equal to this argument.
51    Returned = 22,
52    /// Function can return twice.
53    ReturnsTwice = 23,
54    /// Sign extended before/after call.
55    SExt = 24,
56    /// Alignment of stack for function (3 bits)  stored as log2 of alignment with
57    /// +1 bias 0 means unaligned (different from alignstack=(1)).
58    StackAlignment = 25,
59    /// Stack protection.
60    StackProtect = 26,
61    /// Stack protection required.
62    StackProtectReq = 27,
63    /// Strong Stack protection.
64    StackProtectStrong = 28,
65    /// Hidden pointer to structure to return.
66    StructRet = 29,
67    /// AddressSanitizer is on.
68    SanitizeAddress = 30,
69    /// ThreadSanitizer is on.
70    SanitizeThread = 31,
71    /// MemorySanitizer is on.
72    SanitizeMemory = 32,
73    /// Function must be in a unwind table.
74    UwTable = 33,
75    /// Zero extended before/after call.
76    ZExt = 34,
77    /// Callee is recognized as a builtin, despite nobuiltin attribute on its
78    /// declaration.
79    Builtin = 35,
80    /// Marks function as being in a cold path.
81    Cold = 36,
82    /// Function must not be optimized.
83    OptimizeNone = 37,
84    /// Pass structure in an alloca.
85    InAlloca = 38,
86    /// Pointer is known to be not null.
87    NonNull = 39,
88    /// Build jump-instruction tables and replace refs.
89    JumpTable = 40,
90    /// Pointer is known to be dereferenceable.
91    Dereferenceable = 41,
92    /// Pointer is either null or dereferenceable.
93    DereferenceableOrNull = 42,
94    /// Can only be moved to control-equivalent blocks.
95    /// NB: Could be IntersectCustom with "or" handling.
96    Convergent = 43,
97    /// Safe Stack protection.
98    Safestack = 44,
99    /// Unused
100    ArgMemOnly = 45,
101    /// Argument is swift self/context.
102    SwiftSelf = 46,
103    /// Argument is swift error.
104    SwiftError = 47,
105    /// The function does not recurse.
106    NoRecurse = 48,
107    /// Unused
108    InaccessibleMemOnly = 49,
109    /// Unused
110    InaccessiblememOrArgMemOnly = 50,
111    /// The result of the function is guaranteed to point to a number of bytes that
112    /// we can determine if we know the value of the function's arguments.
113    AllocSize = 51,
114    /// Function only writes to memory.
115    Writeonly = 52,
116    /// Function can be speculated.
117    Speculatable = 53,
118    /// Function was called in a scope requiring strict floating point semantics.
119    StrictFp = 54,
120    /// HWAddressSanitizer is on.
121    SanitizeHwaddress = 55,
122    /// Disable Indirect Branch Tracking.
123    NocfCheck = 56,
124    /// Select optimizations for best fuzzing signal.
125    OptForFuzzing = 57,
126    /// Shadow Call Stack protection.
127    Shadowcallstack = 58,
128    /// Speculative Load Hardening is enabled.
129    ///
130    /// Note that this uses the default compatibility (always compatible during
131    /// inlining) and a conservative merge strategy where inlining an attributed
132    /// body will add the attribute to the caller. This ensures that code carrying
133    /// this attribute will always be lowered with hardening enabled.
134    SpeculativeLoadHardening = 59,
135    /// Parameter is required to be a trivial constant.
136    Immarg = 60,
137    /// Function always comes back to callsite.
138    Willreturn = 61,
139    /// Function does not deallocate memory.
140    Nofree = 62,
141    /// Function does not synchronize.
142    Nosync = 63,
143    /// MemTagSanitizer is on.
144    SanitizeMemtag = 64,
145    /// Similar to byval but without a copy.
146    Preallocated = 65,
147    /// Disable merging for specified functions or call sites.
148    NoMerge = 66,
149    /// Null pointer in address space zero is valid.
150    NullPointerIsValid = 67,
151    /// Parameter or return value may not contain uninitialized or poison bits.
152    Noundef = 68,
153    /// Mark in-memory ABI type.
154    Byref = 69,
155    /// Function is required to make Forward Progress.
156    Mustprogress = 70,
157    /// Function cannot enter into caller's translation unit.
158    NoCallback = 71,
159    /// Marks function as being in a hot path and frequently called.
160    Hot = 72,
161    /// Function should not be instrumented.
162    NoProfile = 73,
163    /// Minimum/Maximum vscale value for function.
164    VscaleRange = 74,
165    /// Argument is swift async context.
166    SwiftAsync = 75,
167    /// No SanitizeCoverage instrumentation.
168    NoSanitizeCoverage = 76,
169    /// Provide pointer element type to intrinsic.
170    Elementtype = 77,
171    /// Do not instrument function with sanitizers.
172    DisableSanitizerInstrumentation = 78,
173    /// No SanitizeBounds instrumentation.
174    NoSanitizeBounds = 79,
175    /// Parameter of a function that tells us the alignment of an allocation, as in
176    /// aligned_alloc and aligned ::operator::new.
177    AllocAlign = 80,
178    /// Parameter is the pointer to be manipulated by the allocator function.
179    AllocatedPointer = 81,
180    /// Describes behavior of an allocator function in terms of known properties.
181    AllocKind = 82,
182    /// Function is a presplit coroutine.
183    PresplitCoroutine = 83,
184    /// Whether to keep return instructions, or replace with a jump to an external
185    /// symbol.
186    FnretthunkExtern = 84,
187    SkipProfile = 85,
188    /// Memory effects of the function.
189    Memory = 86,
190    /// Forbidden floating-point classes.
191    Nofpclass = 87,
192    /// Select optimizations that give decent debug info.
193    OptimizeForDebugging = 88,
194    /// Pointer argument is writable.
195    Writable = 89,
196    CoroOnlyDestroyWhenComplete = 90,
197    /// Argument is dead if the call unwinds.
198    DeadOnUnwind = 91,
199    /// Parameter or return value is within the specified range.
200    Range = 92,
201    /// NumericalStabilitySanitizer is on.
202    SanitizeNumericalStability = 93,
203    /// Pointer argument memory is initialized.
204    Initializes = 94,
205    /// Function has a hybrid patchable thunk.
206    HybridPatchable = 95,
207}
208
209/// These are values used in the bitcode files to encode which
210/// cast a `CST_CODE_CE_CAST` refers to.
211#[derive(Debug, TryFromPrimitive)]
212#[repr(u8)]
213pub enum CastOpcode {
214    Trunc = 0,
215    ZExt = 1,
216    SExt = 2,
217    FpToUi = 3,
218    FpToSi = 4,
219    UiToFp = 5,
220    SiToFp = 6,
221    FpTrunc = 7,
222    FpExt = 8,
223    PtrToInt = 9,
224    IntToPtr = 10,
225    Bitcast = 11,
226    Addrspace = 12,
227}
228
229/// These are bitcode-specific values, different from C++ enum
230#[derive(Debug, TryFromPrimitive)]
231#[repr(u8)]
232pub enum Linkage {
233    /// Externally visible function
234    External = 0,
235    Weak = 1,
236    /// Special purpose, only applies to global arrays
237    Appending = 2,
238    /// Rename collisions when linking (static functions).
239    Internal = 3,
240    Linkonce = 4,
241    /// Externally visible function
242    /// Obsolete DLLImportLinkage
243    #[deprecated]
244    Dllimport = 5,
245    /// Externally visible function
246    /// Obsolete DLLExportLinkage
247    #[deprecated]
248    Dllexport = 6,
249    /// ExternalWeak linkage description.
250    ExternWeak = 7,
251    /// Tentative definitions.
252    Common = 8,
253    /// Like Internal, but omit from symbol table.
254    Private = 9,
255    WeakOdr = 10,
256    LinkonceOdr = 11,
257    /// Available for inspection, not emission.
258    AvailableExternally = 12,
259    Deprecated1 = 13,
260    Deprecated2 = 14,
261}
262
263#[derive(Debug, TryFromPrimitive)]
264#[repr(u8)]
265pub enum DllStorageClass {
266    Default = 0,
267    Import = 1,
268    Export = 2,
269}
270
271#[derive(Debug, TryFromPrimitive)]
272#[repr(u8)]
273pub enum CallConv {
274    /// The default llvm calling convention, compatible with C. This convention
275    /// is the only one that supports varargs calls. As with typical C calling
276    /// conventions, the callee/caller have to tolerate certain amounts of
277    /// prototype mismatch.
278    C = 0,
279    /// Attempts to make calls as fast as possible (e.g. by passing things in
280    /// registers).
281    Fast = 8,
282    /// Attempts to make code in the caller as efficient as possible under the
283    /// assumption that the call is not commonly executed. As such, these calls
284    /// often preserve all registers so that the call does not break any live
285    /// ranges in the caller side.
286    Cold = 9,
287    /// Used by the Glasgow Haskell Compiler (GHC).
288    GHC = 10,
289    /// Used by the High-Performance Erlang Compiler (HiPE).
290    HiPE = 11,
291    /// Used for dynamic register based calls (e.g. stackmap and patchpoint
292    /// intrinsics).
293    AnyReg = 13,
294    /// Used for runtime calls that preserves most registers.
295    PreserveMost = 14,
296    /// Used for runtime calls that preserves (almost) all registers.
297    PreserveAll = 15,
298    /// Calling convention for Swift.
299    Swift = 16,
300    /// Used for access functions.
301    CxxFastTls = 17,
302    /// Attemps to make calls as fast as possible while guaranteeing that tail
303    /// call optimization can always be performed.
304    Tail = 18,
305    /// Special calling convention on Windows for calling the Control Guard
306    /// Check ICall funtion. The function takes exactly one argument (address of
307    /// the target function) passed in the first argument register, and has no
308    /// return value. All register values are preserved.
309    CFGuardCheck = 19,
310    /// This follows the Swift calling convention in how arguments are passed
311    /// but guarantees tail calls will be made by making the callee clean up
312    /// their stack.
313    SwiftTail = 20,
314    /// Used for runtime calls that preserves none general registers.
315    PreserveNone = 21,
316    /// stdcall is mostly used by the Win32 API. It is basically the same as the
317    /// C convention with the difference in that the callee is responsible for
318    /// popping the arguments from the stack.
319    X86StdCall = 64,
320    /// 'fast' analog of X86_StdCall. Passes first two arguments in ECX:EDX
321    /// registers, others - via stack. Callee is responsible for stack cleaning.
322    X86FastCall = 65,
323    /// ARM Procedure Calling Standard (obsolete, but still used on some
324    /// targets).
325    ArmApcs = 66,
326    /// ARM Architecture Procedure Calling Standard calling convention (aka
327    /// EABI). Soft float variant.
328    ArmAapcs = 67,
329    /// Same as ARM_AAPCS, but uses hard floating point ABI.
330    ArmAapcsVfp = 68,
331    /// Used for MSP430 interrupt routines.
332    Msp430Intr = 69,
333    /// Similar to X86_StdCall. Passes first argument in ECX, others via stack.
334    /// Callee is responsible for stack cleaning. MSVC uses this by default for
335    /// methods in its ABI.
336    X86ThisCall = 70,
337    /// Call to a PTX kernel. Passes all arguments in parameter space.
338    PTXKernel = 71,
339    /// Call to a PTX device function. Passes all arguments in register or
340    /// parameter space.
341    PTXDevice = 72,
342    /// Used for SPIR non-kernel device functions. No lowering or expansion of
343    /// arguments. Structures are passed as a pointer to a struct with the
344    /// byval attribute. Functions can only call SPIR_FUNC and SPIR_KERNEL
345    /// functions. Functions can only have zero or one return values. Variable
346    /// arguments are not allowed, except for printf. How arguments/return
347    /// values are lowered are not specified. Functions are only visible to the
348    /// devices.
349    SpirFunc = 75,
350    /// Used for SPIR kernel functions. Inherits the restrictions of SPIR_FUNC,
351    /// except it cannot have non-void return values, it cannot have variable
352    /// arguments, it can also be called by the host or it is externally
353    /// visible.
354    SpirKernel = 76,
355    /// Used for Intel OpenCL built-ins.
356    IntelOclBi = 77,
357    /// The C convention as specified in the x86-64 supplement to the System V
358    /// ABI, used on most non-Windows systems.
359    X8664SysV = 78,
360    /// The C convention as implemented on Windows/x86-64 and AArch64. It
361    /// differs from the more common \c X86_64_SysV convention in a number of
362    /// ways, most notably in that XMM registers used to pass arguments are
363    /// shadowed by GPRs, and vice versa. On AArch64, this is identical to the
364    /// normal C (AAPCS) calling convention for normal functions, but floats are
365    /// passed in integer registers to variadic functions.
366    Win64 = 79,
367    /// MSVC calling convention that passes vectors and vector aggregates in SSE
368    /// registers.
369    X86VectorCall = 80,
370    /// Placeholders for HHVM calling conventions (deprecated, removed).
371    #[deprecated]
372    DummyHhvm = 81,
373    DummyHhvmC = 82,
374    /// x86 hardware interrupt context. Callee may take one or two parameters,
375    /// where the 1st represents a pointer to hardware context frame and the 2nd
376    /// represents hardware error code, the presence of the later depends on the
377    /// interrupt vector taken. Valid for both 32- and 64-bit subtargets.
378    X86Intr = 83,
379    /// Used for AVR interrupt routines.
380    AvrIntr = 84,
381    /// Used for AVR signal routines.
382    AvrSignal = 85,
383    /// Used for special AVR rtlib functions which have an "optimized"
384    /// convention to preserve registers.
385    AvrBuiltin = 86,
386    /// Used for Mesa vertex shaders, or AMDPAL last shader stage before
387    /// rasterization (vertex shader if tessellation and geometry are not in
388    /// use, or otherwise copy shader if one is needed).
389    AmdGpuVs = 87,
390    /// Used for Mesa/AMDPAL geometry shaders.
391    AmdGpuGs = 88,
392    /// Used for Mesa/AMDPAL pixel shaders.
393    AmdGpuPs = 89,
394    /// Used for Mesa/AMDPAL compute shaders.
395    AmdGpuCs = 90,
396    /// Used for AMDGPU code object kernels.
397    AmdGpuKernel = 91,
398    /// Register calling convention used for parameters transfer optimization
399    X86RegCall = 92,
400    /// Used for Mesa/AMDPAL hull shaders (= tessellation control shaders).
401    AmdGpuHs = 93,
402    /// Used for special MSP430 rtlib functions which have an "optimized"
403    /// convention using additional registers.
404    Msp430Builtin = 94,
405    /// Used for AMDPAL vertex shader if tessellation is in use.
406    AmdGpuLs = 95,
407    /// Used for AMDPAL shader stage before geometry shader if geometry is in
408    /// use. So either the domain (= tessellation evaluation) shader if
409    /// tessellation is in use, or otherwise the vertex shader.
410    AmdGpuEs = 96,
411    /// Used between AArch64 Advanced SIMD functions
412    AArch64VectorCall = 97,
413    /// Used between AArch64 SVE functions
414    AArch64SVEVectorCall = 98,
415    /// For emscripten __invoke_* functions. The first argument is required to
416    /// be the function ptr being indirectly called. The remainder matches the
417    /// regular calling convention.
418    WasmEmscriptenInvoke = 99,
419    /// Used for AMD graphics targets.
420    AmdGpuGfx = 100,
421    /// Used for M68k interrupt routines.
422    M68kIntr = 101,
423    /// Preserve X0-X13, X19-X29, SP, Z0-Z31, P0-P15.
424    AArch64SmeAbiSupportRoutinesPreserveMostFromX0 = 102,
425    /// Preserve X2-X15, X19-X29, SP, Z0-Z31, P0-P15.
426    AArch64SmeAbiSupportRoutinesPreserveMostFromX2 = 103,
427    /// Used on AMDGPUs to give the middle-end more control over argument
428    /// placement.
429    AmdGpuCSChain = 104,
430    /// Used on AMDGPUs to give the middle-end more control over argument
431    /// placement. Preserves active lane values for input VGPRs.
432    AmdGpuCSChainPreserve = 105,
433    /// Used for M68k rtd-based CC (similar to X86's stdcall).
434    M68kRTD = 106,
435    /// Used by GraalVM. Two additional registers are reserved.
436    Graal = 107,
437    /// Calling convention used in the ARM64EC ABI to implement calls between
438    /// x64 code and thunks. This is basically the x64 calling convention using
439    /// ARM64 register names. The first parameter is mapped to x9.
440    Arm64ECThunkX64 = 108,
441    /// Calling convention used in the ARM64EC ABI to implement calls between
442    /// ARM64 code and thunks. This is just the ARM64 calling convention,
443    /// except that the first parameter is mapped to x9.
444    Arm64ECThunkNative = 109,
445    /// Calling convention used for RISC-V V-extension.
446    RiscVVectorCall = 110,
447    /// Preserve X1-X15, X19-X29, SP, Z0-Z31, P0-P15.
448    AArch64SmeAbiSupportRoutinesPreserveMostFromX1 = 111,
449}
450
451/// call conv field in bitcode is often mixed with flags
452impl CallConv {
453    #[doc(hidden)]
454    #[deprecated]
455    pub fn from_flags(ccinfo_flags: u64) -> Result<Self, String> {
456        Self::from_call_flags(ccinfo_flags).ok_or_else(|| "out of range".into())
457    }
458
459    /// Extract calling convention from CALL/CALLBR CCInfo flags.
460    #[must_use]
461    pub fn from_call_flags(ccinfo_flags: u64) -> Option<Self> {
462        // static_cast<CallingConv::ID>((0x7ff & CCInfo) >> bitc::CALL_CCONV));
463        let id = u8::try_from((ccinfo_flags & 0x7ff) >> 1).ok()?;
464        Self::try_from_primitive(id).ok()
465    }
466
467    /// Extract calling convention from INVOKE CCInfo flags.
468    #[must_use]
469    pub fn from_invoke_flags(ccinfo_flags: u64) -> Option<Self> {
470        let id = u8::try_from(ccinfo_flags & 0x3ff).ok()?;
471        Self::try_from_primitive(id).ok()
472    }
473}
474
475/// These are values used in the bitcode files to encode which
476/// binop a `CST_CODE_CE_BINOP` refers to.
477#[derive(Debug, TryFromPrimitive)]
478#[repr(u8)]
479pub enum BinOpcode {
480    Add = 0,
481    Sub = 1,
482    Mul = 2,
483    Udiv = 3,
484    /// overloaded for FP
485    Sdiv = 4,
486    Urem = 5,
487    /// overloaded for FP
488    Srem = 6,
489    Shl = 7,
490    Lshr = 8,
491    Ashr = 9,
492    And = 10,
493    Or = 11,
494    Xor = 12,
495}
496
497/// Encoded `AtomicOrdering` values.
498#[derive(Debug, TryFromPrimitive, Default)]
499#[repr(u8)]
500pub enum AtomicOrdering {
501    #[default]
502    Notatomic = 0,
503    Unordered = 1,
504    Monotonic = 2,
505    Acquire = 3,
506    Release = 4,
507    AcqRel = 5,
508    SeqCst = 6,
509}
510
511/// COMDATSELECTIONKIND enumerates the possible selection mechanisms for
512/// COMDAT sections.
513#[derive(Debug, Clone, Copy, TryFromPrimitive)]
514#[repr(u8)]
515pub enum ComdatSelectionKind {
516    Any = 1,
517    ExactMatch = 2,
518    Largest = 3,
519    NoDuplicates = 4,
520    SameSize = 5,
521}
522
523/// Atomic read-modify-write operations
524#[derive(Debug, Clone, Copy, TryFromPrimitive)]
525#[repr(u8)]
526#[non_exhaustive]
527pub enum RmwOperation {
528    /// `XCHG`
529    Xchg = 0,
530
531    /// `ADD`
532    Add = 1,
533
534    /// `SUB`
535    Sub = 2,
536
537    /// `AND`
538    And = 3,
539
540    /// `NAND`
541    Nand = 4,
542
543    /// `OR`
544    Or = 5,
545
546    /// `XOR`
547    Xor = 6,
548
549    /// `MAX`
550    Max = 7,
551
552    /// `MIN`
553    Min = 8,
554
555    /// `UMAX`
556    Umax = 9,
557
558    /// `UMIN`
559    Umin = 10,
560
561    /// `FADD`
562    Fadd = 11,
563
564    /// `FSUB`
565    Fsub = 12,
566
567    /// `FMAX`
568    Fmax = 13,
569
570    /// `FMIN`
571    Fmin = 14,
572
573    /// `UINC_WRAP`
574    UincWrap = 15,
575
576    /// `UDEC_WRAP`
577    UdecWrap = 16,
578
579    /// `USUB_COND`
580    UsSubCond = 17,
581
582    /// `USUB_SAT`
583    UsSubSat = 18,
584}
585
586/// Unary Opcodes
587#[derive(Debug, Clone, Copy, TryFromPrimitive)]
588#[repr(u8)]
589#[non_exhaustive]
590pub enum UnaryOpcode {
591    /// `UNOP_FNEG`
592    Fneg = 0,
593}
594
595/// Flags for serializing
596/// OverflowingBinaryOperator's SubclassOptionalData contents.
597#[derive(Debug, Clone, Copy, TryFromPrimitive)]
598#[repr(u8)]
599#[non_exhaustive]
600pub enum OverflowingBinaryOperatorOptionalFlags {
601    NoUnsignedWrap = 0,
602    NoSignedWrap = 1,
603}
604
605/// Flags for serializing
606/// TruncInstOptionalFlags's SubclassOptionalData contents.
607#[derive(Debug, Clone, Copy, TryFromPrimitive)]
608#[repr(u8)]
609#[non_exhaustive]
610pub enum TruncInstOptionalFlags {
611    NoUnsignedWrap = 0,
612    NoSignedWrap = 1,
613}
614
615/// FastMath Flags
616/// This is a fixed layout derived from the bitcode emitted by LLVM 5.0
617/// intended to decouple the in-memory representation from the serialization.
618#[derive(Debug, Clone, Copy, TryFromPrimitive)]
619#[repr(u8)]
620pub enum FastMathMap {
621    UnsafeAlgebra = 1 << 0, // Legacy
622    NoNaNs = 1 << 1,
623    NoInfs = 1 << 2,
624    NoSignedZeros = 1 << 3,
625    AllowReciprocal = 1 << 4,
626    AllowContract = 1 << 5,
627    ApproxFunc = 1 << 6,
628    AllowReassoc = 1 << 7,
629}
630
631/// Flags for serializing
632/// GEPOperator's SubclassOptionalData contents.
633#[derive(Debug, Clone, Copy, TryFromPrimitive)]
634#[repr(u8)]
635#[non_exhaustive]
636pub enum GetElementPtrOptionalFlags {
637    Inbounds = 0,
638    Nusw = 1,
639    Nuw = 2,
640}
641
642/// Markers and flags for call instruction.
643#[derive(Debug, Clone, Copy, TryFromPrimitive)]
644#[repr(u8)]
645#[non_exhaustive]
646pub enum CallMarkersFlags {
647    Tail = 0,
648    Cconv = 1,
649    MustTail = 14,
650    ExplicitType = 15,
651    NoTail = 16,
652    Fmf = 17, // Call has optional fast-math-flags.
653}