#[repr(u8)]pub enum Opcode {
Show 188 variants
Jump = 1,
Brif = 2,
BrTable = 3,
Debugtrap = 4,
Trap = 5,
Trapz = 6,
Trapnz = 7,
Return = 8,
Call = 9,
CallIndirect = 10,
ReturnCall = 11,
ReturnCallIndirect = 12,
FuncAddr = 13,
TryCall = 14,
TryCallIndirect = 15,
Splat = 16,
Swizzle = 17,
X86Pshufb = 18,
Insertlane = 19,
Extractlane = 20,
Smin = 21,
Umin = 22,
Smax = 23,
Umax = 24,
AvgRound = 25,
UaddSat = 26,
SaddSat = 27,
UsubSat = 28,
SsubSat = 29,
Load = 30,
Store = 31,
Uload8 = 32,
Sload8 = 33,
Istore8 = 34,
Uload16 = 35,
Sload16 = 36,
Istore16 = 37,
Uload32 = 38,
Sload32 = 39,
Istore32 = 40,
StackSwitch = 41,
Uload8x8 = 42,
Sload8x8 = 43,
Uload16x4 = 44,
Sload16x4 = 45,
Uload32x2 = 46,
Sload32x2 = 47,
StackLoad = 48,
StackStore = 49,
StackAddr = 50,
DynamicStackLoad = 51,
DynamicStackStore = 52,
DynamicStackAddr = 53,
GlobalValue = 54,
SymbolValue = 55,
TlsValue = 56,
GetPinnedReg = 57,
SetPinnedReg = 58,
GetFramePointer = 59,
GetStackPointer = 60,
GetReturnAddress = 61,
GetExceptionHandlerAddress = 62,
Iconst = 63,
F16const = 64,
F32const = 65,
F64const = 66,
F128const = 67,
Vconst = 68,
Shuffle = 69,
Nop = 70,
Select = 71,
SelectSpectreGuard = 72,
Bitselect = 73,
X86Blendv = 74,
VanyTrue = 75,
VallTrue = 76,
VhighBits = 77,
Icmp = 78,
IcmpImm = 79,
Iadd = 80,
Isub = 81,
Ineg = 82,
Iabs = 83,
Imul = 84,
Umulhi = 85,
Smulhi = 86,
SqmulRoundSat = 87,
X86Pmulhrsw = 88,
Udiv = 89,
Sdiv = 90,
Urem = 91,
Srem = 92,
IaddImm = 93,
ImulImm = 94,
UdivImm = 95,
SdivImm = 96,
UremImm = 97,
SremImm = 98,
IrsubImm = 99,
SaddOverflowCin = 100,
UaddOverflowCin = 101,
UaddOverflow = 102,
SaddOverflow = 103,
UsubOverflow = 104,
SsubOverflow = 105,
UmulOverflow = 106,
SmulOverflow = 107,
UaddOverflowTrap = 108,
SsubOverflowBin = 109,
UsubOverflowBin = 110,
Band = 111,
Bor = 112,
Bxor = 113,
Bnot = 114,
BandNot = 115,
BorNot = 116,
BxorNot = 117,
BandImm = 118,
BorImm = 119,
BxorImm = 120,
Rotl = 121,
Rotr = 122,
RotlImm = 123,
RotrImm = 124,
Ishl = 125,
Ushr = 126,
Sshr = 127,
IshlImm = 128,
UshrImm = 129,
SshrImm = 130,
Bitrev = 131,
Clz = 132,
Cls = 133,
Ctz = 134,
Bswap = 135,
Popcnt = 136,
Fcmp = 137,
Fadd = 138,
Fsub = 139,
Fmul = 140,
Fdiv = 141,
Sqrt = 142,
Fma = 143,
Fneg = 144,
Fabs = 145,
Fcopysign = 146,
Fmin = 147,
Fmax = 148,
Ceil = 149,
Floor = 150,
Trunc = 151,
Nearest = 152,
Bitcast = 153,
ScalarToVector = 154,
Bmask = 155,
Ireduce = 156,
Snarrow = 157,
Unarrow = 158,
Uunarrow = 159,
SwidenLow = 160,
SwidenHigh = 161,
UwidenLow = 162,
UwidenHigh = 163,
IaddPairwise = 164,
X86Pmaddubsw = 165,
Uextend = 166,
Sextend = 167,
Fpromote = 168,
Fdemote = 169,
Fvdemote = 170,
FvpromoteLow = 171,
FcvtToUint = 172,
FcvtToSint = 173,
FcvtToUintSat = 174,
FcvtToSintSat = 175,
X86Cvtt2dq = 176,
FcvtFromUint = 177,
FcvtFromSint = 178,
Isplit = 179,
Iconcat = 180,
AtomicRmw = 181,
AtomicCas = 182,
AtomicLoad = 183,
AtomicStore = 184,
Fence = 185,
ExtractVector = 186,
SequencePoint = 187,
PatchableCall = 188,
}Expand description
An instruction opcode.
All instructions from all supported ISAs are present.
Variants§
Jump = 1
jump block_call. (Jump)
Brif = 2
brif c, block_then, block_else. (Brif)
Type inferred from c.
BrTable = 3
br_table x, JT. (BranchTable)
Debugtrap = 4
debugtrap. (NullAry)
Trap = 5
trap code. (Trap)
Trapz = 6
trapz c, code. (CondTrap)
Type inferred from c.
Trapnz = 7
trapnz c, code. (CondTrap)
Type inferred from c.
Return = 8
return rvals. (MultiAry)
Call = 9
rvals = call FN, args. (Call)
CallIndirect = 10
rvals = call_indirect SIG, callee, args. (CallIndirect)
Type inferred from callee.
ReturnCall = 11
return_call FN, args. (Call)
ReturnCallIndirect = 12
return_call_indirect SIG, callee, args. (CallIndirect)
Type inferred from callee.
FuncAddr = 13
addr = func_addr FN. (FuncAddr)
TryCall = 14
try_call callee, args, ET. (TryCall)
TryCallIndirect = 15
try_call_indirect callee, args, ET. (TryCallIndirect)
Type inferred from callee.
Splat = 16
a = splat x. (Unary)
Swizzle = 17
a = swizzle x, y. (Binary)
X86Pshufb = 18
a = x86_pshufb x, y. (Binary)
Insertlane = 19
a = insertlane x, y, Idx. (TernaryImm8)
Type inferred from x.
Extractlane = 20
a = extractlane x, Idx. (BinaryImm8)
Type inferred from x.
Smin = 21
a = smin x, y. (Binary)
Type inferred from x.
Umin = 22
a = umin x, y. (Binary)
Type inferred from x.
Smax = 23
a = smax x, y. (Binary)
Type inferred from x.
Umax = 24
a = umax x, y. (Binary)
Type inferred from x.
AvgRound = 25
a = avg_round x, y. (Binary)
Type inferred from x.
UaddSat = 26
a = uadd_sat x, y. (Binary)
Type inferred from x.
SaddSat = 27
a = sadd_sat x, y. (Binary)
Type inferred from x.
UsubSat = 28
a = usub_sat x, y. (Binary)
Type inferred from x.
SsubSat = 29
a = ssub_sat x, y. (Binary)
Type inferred from x.
Load = 30
a = load MemFlags, p, Offset. (Load)
Store = 31
store MemFlags, x, p, Offset. (Store)
Type inferred from x.
Uload8 = 32
a = uload8 MemFlags, p, Offset. (Load)
Sload8 = 33
a = sload8 MemFlags, p, Offset. (Load)
Istore8 = 34
istore8 MemFlags, x, p, Offset. (Store)
Type inferred from x.
Uload16 = 35
a = uload16 MemFlags, p, Offset. (Load)
Sload16 = 36
a = sload16 MemFlags, p, Offset. (Load)
Istore16 = 37
istore16 MemFlags, x, p, Offset. (Store)
Type inferred from x.
Uload32 = 38
a = uload32 MemFlags, p, Offset. (Load)
Type inferred from p.
Sload32 = 39
a = sload32 MemFlags, p, Offset. (Load)
Type inferred from p.
Istore32 = 40
istore32 MemFlags, x, p, Offset. (Store)
Type inferred from x.
StackSwitch = 41
out_payload0 = stack_switch store_context_ptr, load_context_ptr, in_payload0. (Ternary)
Type inferred from load_context_ptr.
Uload8x8 = 42
a = uload8x8 MemFlags, p, Offset. (Load)
Type inferred from p.
Sload8x8 = 43
a = sload8x8 MemFlags, p, Offset. (Load)
Type inferred from p.
Uload16x4 = 44
a = uload16x4 MemFlags, p, Offset. (Load)
Type inferred from p.
Sload16x4 = 45
a = sload16x4 MemFlags, p, Offset. (Load)
Type inferred from p.
Uload32x2 = 46
a = uload32x2 MemFlags, p, Offset. (Load)
Type inferred from p.
Sload32x2 = 47
a = sload32x2 MemFlags, p, Offset. (Load)
Type inferred from p.
StackLoad = 48
a = stack_load SS, Offset. (StackLoad)
StackStore = 49
stack_store x, SS, Offset. (StackStore)
Type inferred from x.
StackAddr = 50
addr = stack_addr SS, Offset. (StackLoad)
DynamicStackLoad = 51
a = dynamic_stack_load DSS. (DynamicStackLoad)
DynamicStackStore = 52
dynamic_stack_store x, DSS. (DynamicStackStore)
Type inferred from x.
DynamicStackAddr = 53
addr = dynamic_stack_addr DSS. (DynamicStackLoad)
GlobalValue = 54
a = global_value GV. (UnaryGlobalValue)
SymbolValue = 55
a = symbol_value GV. (UnaryGlobalValue)
TlsValue = 56
a = tls_value GV. (UnaryGlobalValue)
GetPinnedReg = 57
addr = get_pinned_reg. (NullAry)
SetPinnedReg = 58
set_pinned_reg addr. (Unary)
Type inferred from addr.
GetFramePointer = 59
addr = get_frame_pointer. (NullAry)
GetStackPointer = 60
addr = get_stack_pointer. (NullAry)
GetReturnAddress = 61
addr = get_return_address. (NullAry)
GetExceptionHandlerAddress = 62
addr = get_exception_handler_address block, index. (ExceptionHandlerAddress)
Iconst = 63
a = iconst N. (UnaryImm)
F16const = 64
a = f16const N. (UnaryIeee16)
F32const = 65
a = f32const N. (UnaryIeee32)
F64const = 66
a = f64const N. (UnaryIeee64)
F128const = 67
a = f128const N. (UnaryConst)
Vconst = 68
a = vconst N. (UnaryConst)
Shuffle = 69
a = shuffle a, b, mask. (Shuffle)
Nop = 70
nop. (NullAry)
Select = 71
a = select c, x, y. (Ternary)
Type inferred from x.
SelectSpectreGuard = 72
a = select_spectre_guard c, x, y. (Ternary)
Type inferred from x.
Bitselect = 73
a = bitselect c, x, y. (Ternary)
Type inferred from x.
X86Blendv = 74
a = x86_blendv c, x, y. (Ternary)
Type inferred from x.
VanyTrue = 75
s = vany_true a. (Unary)
Type inferred from a.
VallTrue = 76
s = vall_true a. (Unary)
Type inferred from a.
VhighBits = 77
x = vhigh_bits a. (Unary)
Icmp = 78
a = icmp Cond, x, y. (IntCompare)
Type inferred from x.
IcmpImm = 79
a = icmp_imm Cond, x, Y. (IntCompareImm)
Type inferred from x.
Iadd = 80
a = iadd x, y. (Binary)
Type inferred from x.
Isub = 81
a = isub x, y. (Binary)
Type inferred from x.
Ineg = 82
a = ineg x. (Unary)
Type inferred from x.
Iabs = 83
a = iabs x. (Unary)
Type inferred from x.
Imul = 84
a = imul x, y. (Binary)
Type inferred from x.
Umulhi = 85
a = umulhi x, y. (Binary)
Type inferred from x.
Smulhi = 86
a = smulhi x, y. (Binary)
Type inferred from x.
SqmulRoundSat = 87
a = sqmul_round_sat x, y. (Binary)
Type inferred from x.
X86Pmulhrsw = 88
a = x86_pmulhrsw x, y. (Binary)
Type inferred from x.
Udiv = 89
a = udiv x, y. (Binary)
Type inferred from x.
Sdiv = 90
a = sdiv x, y. (Binary)
Type inferred from x.
Urem = 91
a = urem x, y. (Binary)
Type inferred from x.
Srem = 92
a = srem x, y. (Binary)
Type inferred from x.
IaddImm = 93
a = iadd_imm x, Y. (BinaryImm64)
Type inferred from x.
ImulImm = 94
a = imul_imm x, Y. (BinaryImm64)
Type inferred from x.
UdivImm = 95
a = udiv_imm x, Y. (BinaryImm64)
Type inferred from x.
SdivImm = 96
a = sdiv_imm x, Y. (BinaryImm64)
Type inferred from x.
UremImm = 97
a = urem_imm x, Y. (BinaryImm64)
Type inferred from x.
SremImm = 98
a = srem_imm x, Y. (BinaryImm64)
Type inferred from x.
IrsubImm = 99
a = irsub_imm x, Y. (BinaryImm64)
Type inferred from x.
SaddOverflowCin = 100
a, c_out = sadd_overflow_cin x, y, c_in. (Ternary)
Type inferred from y.
UaddOverflowCin = 101
a, c_out = uadd_overflow_cin x, y, c_in. (Ternary)
Type inferred from y.
UaddOverflow = 102
a, of = uadd_overflow x, y. (Binary)
Type inferred from x.
SaddOverflow = 103
a, of = sadd_overflow x, y. (Binary)
Type inferred from x.
UsubOverflow = 104
a, of = usub_overflow x, y. (Binary)
Type inferred from x.
SsubOverflow = 105
a, of = ssub_overflow x, y. (Binary)
Type inferred from x.
UmulOverflow = 106
a, of = umul_overflow x, y. (Binary)
Type inferred from x.
SmulOverflow = 107
a, of = smul_overflow x, y. (Binary)
Type inferred from x.
UaddOverflowTrap = 108
a = uadd_overflow_trap x, y, code. (IntAddTrap)
Type inferred from x.
SsubOverflowBin = 109
a, b_out = ssub_overflow_bin x, y, b_in. (Ternary)
Type inferred from y.
UsubOverflowBin = 110
a, b_out = usub_overflow_bin x, y, b_in. (Ternary)
Type inferred from y.
Band = 111
a = band x, y. (Binary)
Type inferred from x.
Bor = 112
a = bor x, y. (Binary)
Type inferred from x.
Bxor = 113
a = bxor x, y. (Binary)
Type inferred from x.
Bnot = 114
a = bnot x. (Unary)
Type inferred from x.
BandNot = 115
a = band_not x, y. (Binary)
Type inferred from x.
BorNot = 116
a = bor_not x, y. (Binary)
Type inferred from x.
BxorNot = 117
a = bxor_not x, y. (Binary)
Type inferred from x.
BandImm = 118
a = band_imm x, Y. (BinaryImm64)
Type inferred from x.
BorImm = 119
a = bor_imm x, Y. (BinaryImm64)
Type inferred from x.
BxorImm = 120
a = bxor_imm x, Y. (BinaryImm64)
Type inferred from x.
Rotl = 121
a = rotl x, y. (Binary)
Type inferred from x.
Rotr = 122
a = rotr x, y. (Binary)
Type inferred from x.
RotlImm = 123
a = rotl_imm x, Y. (BinaryImm64)
Type inferred from x.
RotrImm = 124
a = rotr_imm x, Y. (BinaryImm64)
Type inferred from x.
Ishl = 125
a = ishl x, y. (Binary)
Type inferred from x.
Ushr = 126
a = ushr x, y. (Binary)
Type inferred from x.
Sshr = 127
a = sshr x, y. (Binary)
Type inferred from x.
IshlImm = 128
a = ishl_imm x, Y. (BinaryImm64)
Type inferred from x.
UshrImm = 129
a = ushr_imm x, Y. (BinaryImm64)
Type inferred from x.
SshrImm = 130
a = sshr_imm x, Y. (BinaryImm64)
Type inferred from x.
Bitrev = 131
a = bitrev x. (Unary)
Type inferred from x.
Clz = 132
a = clz x. (Unary)
Type inferred from x.
Cls = 133
a = cls x. (Unary)
Type inferred from x.
Ctz = 134
a = ctz x. (Unary)
Type inferred from x.
Bswap = 135
a = bswap x. (Unary)
Type inferred from x.
Popcnt = 136
a = popcnt x. (Unary)
Type inferred from x.
Fcmp = 137
a = fcmp Cond, x, y. (FloatCompare)
Type inferred from x.
Fadd = 138
a = fadd x, y. (Binary)
Type inferred from x.
Fsub = 139
a = fsub x, y. (Binary)
Type inferred from x.
Fmul = 140
a = fmul x, y. (Binary)
Type inferred from x.
Fdiv = 141
a = fdiv x, y. (Binary)
Type inferred from x.
Sqrt = 142
a = sqrt x. (Unary)
Type inferred from x.
Fma = 143
a = fma x, y, z. (Ternary)
Type inferred from y.
Fneg = 144
a = fneg x. (Unary)
Type inferred from x.
Fabs = 145
a = fabs x. (Unary)
Type inferred from x.
Fcopysign = 146
a = fcopysign x, y. (Binary)
Type inferred from x.
Fmin = 147
a = fmin x, y. (Binary)
Type inferred from x.
Fmax = 148
a = fmax x, y. (Binary)
Type inferred from x.
Ceil = 149
a = ceil x. (Unary)
Type inferred from x.
Floor = 150
a = floor x. (Unary)
Type inferred from x.
Trunc = 151
a = trunc x. (Unary)
Type inferred from x.
Nearest = 152
a = nearest x. (Unary)
Type inferred from x.
Bitcast = 153
a = bitcast MemFlags, x. (LoadNoOffset)
ScalarToVector = 154
a = scalar_to_vector s. (Unary)
Bmask = 155
a = bmask x. (Unary)
Ireduce = 156
a = ireduce x. (Unary)
Snarrow = 157
a = snarrow x, y. (Binary)
Type inferred from x.
Unarrow = 158
a = unarrow x, y. (Binary)
Type inferred from x.
Uunarrow = 159
a = uunarrow x, y. (Binary)
Type inferred from x.
SwidenLow = 160
a = swiden_low x. (Unary)
Type inferred from x.
SwidenHigh = 161
a = swiden_high x. (Unary)
Type inferred from x.
UwidenLow = 162
a = uwiden_low x. (Unary)
Type inferred from x.
UwidenHigh = 163
a = uwiden_high x. (Unary)
Type inferred from x.
IaddPairwise = 164
a = iadd_pairwise x, y. (Binary)
Type inferred from x.
X86Pmaddubsw = 165
a = x86_pmaddubsw x, y. (Binary)
Uextend = 166
a = uextend x. (Unary)
Sextend = 167
a = sextend x. (Unary)
Fpromote = 168
a = fpromote x. (Unary)
Fdemote = 169
a = fdemote x. (Unary)
Fvdemote = 170
a = fvdemote x. (Unary)
FvpromoteLow = 171
x = fvpromote_low a. (Unary)
FcvtToUint = 172
a = fcvt_to_uint x. (Unary)
FcvtToSint = 173
a = fcvt_to_sint x. (Unary)
FcvtToUintSat = 174
a = fcvt_to_uint_sat x. (Unary)
FcvtToSintSat = 175
a = fcvt_to_sint_sat x. (Unary)
X86Cvtt2dq = 176
a = x86_cvtt2dq x. (Unary)
FcvtFromUint = 177
a = fcvt_from_uint x. (Unary)
FcvtFromSint = 178
a = fcvt_from_sint x. (Unary)
Isplit = 179
lo, hi = isplit x. (Unary)
Type inferred from x.
Iconcat = 180
a = iconcat lo, hi. (Binary)
Type inferred from lo.
AtomicRmw = 181
a = atomic_rmw MemFlags, AtomicRmwOp, p, x. (AtomicRmw)
AtomicCas = 182
a = atomic_cas MemFlags, p, e, x. (AtomicCas)
Type inferred from x.
AtomicLoad = 183
a = atomic_load MemFlags, p. (LoadNoOffset)
AtomicStore = 184
atomic_store MemFlags, x, p. (StoreNoOffset)
Type inferred from x.
Fence = 185
fence. (NullAry)
ExtractVector = 186
a = extract_vector x, y. (BinaryImm8)
Type inferred from x.
SequencePoint = 187
sequence_point. (NullAry)
PatchableCall = 188
patchable_call FN, args. (Call)
Implementations§
Source§impl Opcode
impl Opcode
Sourcepub fn is_terminator(self) -> bool
pub fn is_terminator(self) -> bool
True for instructions that terminate the block
Sourcepub fn other_side_effects(self) -> bool
pub fn other_side_effects(self) -> bool
Does this instruction have other side effects besides can_* flags?
Sourcepub fn side_effects_idempotent(self) -> bool
pub fn side_effects_idempotent(self) -> bool
Despite having side effects, is this instruction okay to GVN?
Source§impl Opcode
impl Opcode
Sourcepub fn format(self) -> InstructionFormat
pub fn format(self) -> InstructionFormat
Get the instruction format for this opcode.
Sourcepub fn constraints(self) -> OpcodeConstraints
pub fn constraints(self) -> OpcodeConstraints
Get the constraint descriptor for this opcode.
Panic if this is called on NotAnOpcode.
Sourcepub fn is_safepoint(self) -> bool
pub fn is_safepoint(self) -> bool
Is this instruction a GC safepoint?
Safepoints are all kinds of calls, except for tail calls.
Trait Implementations§
impl Copy for Opcode
impl Eq for Opcode
impl StructuralPartialEq for Opcode
Auto Trait Implementations§
impl Freeze for Opcode
impl RefUnwindSafe for Opcode
impl Send for Opcode
impl Sync for Opcode
impl Unpin for Opcode
impl UnwindSafe for Opcode
Blanket Implementations§
Source§impl<T> AlignerFor<1> for T
impl<T> AlignerFor<1> for T
Source§impl<T> AlignerFor<1024> for T
impl<T> AlignerFor<1024> for T
Source§type Aligner = AlignTo1024<T>
type Aligner = AlignTo1024<T>
AlignTo* type which aligns Self to ALIGNMENT.Source§impl<T> AlignerFor<128> for T
impl<T> AlignerFor<128> for T
Source§type Aligner = AlignTo128<T>
type Aligner = AlignTo128<T>
AlignTo* type which aligns Self to ALIGNMENT.Source§impl<T> AlignerFor<16> for T
impl<T> AlignerFor<16> for T
Source§impl<T> AlignerFor<16384> for T
impl<T> AlignerFor<16384> for T
Source§type Aligner = AlignTo16384<T>
type Aligner = AlignTo16384<T>
AlignTo* type which aligns Self to ALIGNMENT.Source§impl<T> AlignerFor<2> for T
impl<T> AlignerFor<2> for T
Source§impl<T> AlignerFor<2048> for T
impl<T> AlignerFor<2048> for T
Source§type Aligner = AlignTo2048<T>
type Aligner = AlignTo2048<T>
AlignTo* type which aligns Self to ALIGNMENT.Source§impl<T> AlignerFor<256> for T
impl<T> AlignerFor<256> for T
Source§type Aligner = AlignTo256<T>
type Aligner = AlignTo256<T>
AlignTo* type which aligns Self to ALIGNMENT.Source§impl<T> AlignerFor<32> for T
impl<T> AlignerFor<32> for T
Source§impl<T> AlignerFor<32768> for T
impl<T> AlignerFor<32768> for T
Source§type Aligner = AlignTo32768<T>
type Aligner = AlignTo32768<T>
AlignTo* type which aligns Self to ALIGNMENT.Source§impl<T> AlignerFor<4> for T
impl<T> AlignerFor<4> for T
Source§impl<T> AlignerFor<4096> for T
impl<T> AlignerFor<4096> for T
Source§type Aligner = AlignTo4096<T>
type Aligner = AlignTo4096<T>
AlignTo* type which aligns Self to ALIGNMENT.Source§impl<T> AlignerFor<512> for T
impl<T> AlignerFor<512> for T
Source§type Aligner = AlignTo512<T>
type Aligner = AlignTo512<T>
AlignTo* type which aligns Self to ALIGNMENT.Source§impl<T> AlignerFor<64> for T
impl<T> AlignerFor<64> for T
Source§impl<T> AlignerFor<8> for T
impl<T> AlignerFor<8> for T
Source§impl<T> AlignerFor<8192> for T
impl<T> AlignerFor<8192> for T
Source§type Aligner = AlignTo8192<T>
type Aligner = AlignTo8192<T>
AlignTo* type which aligns Self to ALIGNMENT.Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Source§impl<T> CloneToUninit for Twhere
T: Clone,
impl<T> CloneToUninit for Twhere
T: Clone,
Source§impl<T> DistributionExt for Twhere
T: ?Sized,
impl<T> DistributionExt for Twhere
T: ?Sized,
Source§impl<Q, K> Equivalent<K> for Q
impl<Q, K> Equivalent<K> for Q
Source§impl<Q, K> Equivalent<K> for Q
impl<Q, K> Equivalent<K> for Q
Source§fn equivalent(&self, key: &K) -> bool
fn equivalent(&self, key: &K) -> bool
key and return true if they are equal.Source§impl<Q, K> Equivalent<K> for Q
impl<Q, K> Equivalent<K> for Q
Source§impl<Q, K> Equivalent<K> for Q
impl<Q, K> Equivalent<K> for Q
Source§impl<T, W> HasTypeWitness<W> for Twhere
W: MakeTypeWitness<Arg = T>,
T: ?Sized,
impl<T, W> HasTypeWitness<W> for Twhere
W: MakeTypeWitness<Arg = T>,
T: ?Sized,
Source§impl<T> Identity for Twhere
T: ?Sized,
impl<T> Identity for Twhere
T: ?Sized,
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self> ⓘ
fn into_either(self, into_left: bool) -> Either<Self, Self> ⓘ
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self> ⓘ
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self> ⓘ
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§impl<T> Pointable for T
impl<T> Pointable for T
Source§impl<'a, T> RCowCompatibleRef<'a> for Twhere
T: Clone + 'a,
impl<'a, T> RCowCompatibleRef<'a> for Twhere
T: Clone + 'a,
Source§fn as_c_ref(from: &'a T) -> <T as RCowCompatibleRef<'a>>::RefC
fn as_c_ref(from: &'a T) -> <T as RCowCompatibleRef<'a>>::RefC
Source§fn as_rust_ref(from: <T as RCowCompatibleRef<'a>>::RefC) -> &'a T
fn as_rust_ref(from: <T as RCowCompatibleRef<'a>>::RefC) -> &'a T
Source§impl<S> ROExtAcc for S
impl<S> ROExtAcc for S
Source§fn f_get<F>(&self, offset: FieldOffset<S, F, Aligned>) -> &F
fn f_get<F>(&self, offset: FieldOffset<S, F, Aligned>) -> &F
offset. Read moreSource§fn f_get_mut<F>(&mut self, offset: FieldOffset<S, F, Aligned>) -> &mut F
fn f_get_mut<F>(&mut self, offset: FieldOffset<S, F, Aligned>) -> &mut F
offset. Read moreSource§fn f_get_ptr<F, A>(&self, offset: FieldOffset<S, F, A>) -> *const F
fn f_get_ptr<F, A>(&self, offset: FieldOffset<S, F, A>) -> *const F
offset. Read moreSource§fn f_get_mut_ptr<F, A>(&mut self, offset: FieldOffset<S, F, A>) -> *mut F
fn f_get_mut_ptr<F, A>(&mut self, offset: FieldOffset<S, F, A>) -> *mut F
offset. Read moreSource§impl<S> ROExtOps<Aligned> for S
impl<S> ROExtOps<Aligned> for S
Source§fn f_replace<F>(&mut self, offset: FieldOffset<S, F, Aligned>, value: F) -> F
fn f_replace<F>(&mut self, offset: FieldOffset<S, F, Aligned>, value: F) -> F
offset) with value,
returning the previous value of the field. Read moreSource§fn f_get_copy<F>(&self, offset: FieldOffset<S, F, Aligned>) -> Fwhere
F: Copy,
fn f_get_copy<F>(&self, offset: FieldOffset<S, F, Aligned>) -> Fwhere
F: Copy,
Source§impl<S> ROExtOps<Unaligned> for S
impl<S> ROExtOps<Unaligned> for S
Source§fn f_replace<F>(&mut self, offset: FieldOffset<S, F, Unaligned>, value: F) -> F
fn f_replace<F>(&mut self, offset: FieldOffset<S, F, Unaligned>, value: F) -> F
offset) with value,
returning the previous value of the field. Read moreSource§fn f_get_copy<F>(&self, offset: FieldOffset<S, F, Unaligned>) -> Fwhere
F: Copy,
fn f_get_copy<F>(&self, offset: FieldOffset<S, F, Unaligned>) -> Fwhere
F: Copy,
Source§impl<T> SelfOps for Twhere
T: ?Sized,
impl<T> SelfOps for Twhere
T: ?Sized,
Source§fn piped<F, U>(self, f: F) -> U
fn piped<F, U>(self, f: F) -> U
Source§fn piped_ref<'a, F, U>(&'a self, f: F) -> Uwhere
F: FnOnce(&'a Self) -> U,
fn piped_ref<'a, F, U>(&'a self, f: F) -> Uwhere
F: FnOnce(&'a Self) -> U,
piped except that the function takes &Self
Useful for functions that take &Self instead of Self. Read moreSource§fn piped_mut<'a, F, U>(&'a mut self, f: F) -> Uwhere
F: FnOnce(&'a mut Self) -> U,
fn piped_mut<'a, F, U>(&'a mut self, f: F) -> Uwhere
F: FnOnce(&'a mut Self) -> U,
piped, except that the function takes &mut Self.
Useful for functions that take &mut Self instead of Self.Source§fn mutated<F>(self, f: F) -> Self
fn mutated<F>(self, f: F) -> Self
Source§fn observe<F>(self, f: F) -> Self
fn observe<F>(self, f: F) -> Self
Source§fn as_ref_<T>(&self) -> &T
fn as_ref_<T>(&self) -> &T
AsRef,
using the turbofish .as_ref_::<_>() syntax. Read moreSource§impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
Source§fn to_subset(&self) -> Option<SS>
fn to_subset(&self) -> Option<SS>
self from the equivalent element of its
superset. Read moreSource§fn is_in_subset(&self) -> bool
fn is_in_subset(&self) -> bool
self is actually part of its subset T (and can be converted to it).Source§fn to_subset_unchecked(&self) -> SS
fn to_subset_unchecked(&self) -> SS
self.to_subset but without any property checks. Always succeeds.Source§fn from_subset(element: &SS) -> SP
fn from_subset(element: &SS) -> SP
self to the equivalent element of its superset.Source§impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
Source§fn to_subset(&self) -> Option<SS>
fn to_subset(&self) -> Option<SS>
self from the equivalent element of its
superset. Read moreSource§fn is_in_subset(&self) -> bool
fn is_in_subset(&self) -> bool
self is actually part of its subset T (and can be converted to it).Source§unsafe fn to_subset_unchecked(&self) -> SS
unsafe fn to_subset_unchecked(&self) -> SS
self.to_subset but without any property checks. Always succeeds.Source§fn from_subset(element: &SS) -> SP
fn from_subset(element: &SS) -> SP
self to the equivalent element of its superset.