pub struct MemFlags { /* private fields */ }
Expand description

Flags for memory operations like load/store.

Each of these flags introduce a limited form of undefined behavior. The flags each enable certain optimizations that need to make additional assumptions. Generally, the semantics of a program does not change when a flag is removed, but adding a flag will.

In addition, the flags determine the endianness of the memory access. By default, any memory access uses the native endianness determined by the target ISA. This can be overridden for individual accesses by explicitly specifying little- or big-endian semantics via the flags.

Implementations§

Create a new empty set of flags.

Examples found in repository?
src/ir/memflags.rs (line 63)
62
63
64
65
66
67
    pub fn trusted() -> Self {
        let mut result = Self::new();
        result.set_notrap();
        result.set_aligned();
        result
    }
More examples
Hide additional examples
src/verifier/mod.rs (line 1096)
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
    fn verify_bitcast(
        &self,
        inst: Inst,
        flags: MemFlags,
        arg: Value,
        errors: &mut VerifierErrors,
    ) -> VerifierStepResult<()> {
        let typ = self.func.dfg.ctrl_typevar(inst);
        let value_type = self.func.dfg.value_type(arg);

        if typ.bits() != value_type.bits() {
            errors.fatal((
                inst,
                format!(
                    "The bitcast argument {} has a type of {} bits, which doesn't match an expected type of {} bits",
                    arg,
                    value_type.bits(),
                    typ.bits()
                ),
            ))
        } else if flags != MemFlags::new()
            && flags != MemFlags::new().with_endianness(ir::Endianness::Little)
            && flags != MemFlags::new().with_endianness(ir::Endianness::Big)
        {
            errors.fatal((
                inst,
                "The bitcast instruction only accepts the `big` or `little` memory flags",
            ))
        } else if flags == MemFlags::new() && typ.lane_count() != value_type.lane_count() {
            errors.fatal((
                inst,
                "Byte order specifier required for bitcast instruction changing lane count",
            ))
        } else {
            Ok(())
        }
    }
src/legalizer/mod.rs (line 121)
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
pub fn simple_legalize(func: &mut ir::Function, cfg: &mut ControlFlowGraph, isa: &dyn TargetIsa) {
    trace!("Pre-legalization function:\n{}", func.display());

    let mut pos = FuncCursor::new(func);
    let func_begin = pos.position();
    pos.set_position(func_begin);
    while let Some(_block) = pos.next_block() {
        let mut prev_pos = pos.position();
        while let Some(inst) = pos.next_inst() {
            match pos.func.dfg[inst] {
                // control flow
                InstructionData::CondTrap {
                    opcode:
                        opcode @ (ir::Opcode::Trapnz | ir::Opcode::Trapz | ir::Opcode::ResumableTrapnz),
                    arg,
                    code,
                } => {
                    expand_cond_trap(inst, &mut pos.func, cfg, opcode, arg, code);
                }

                // memory and constants
                InstructionData::UnaryGlobalValue {
                    opcode: ir::Opcode::GlobalValue,
                    global_value,
                } => expand_global_value(inst, &mut pos.func, isa, global_value),
                InstructionData::HeapAddr {
                    opcode: ir::Opcode::HeapAddr,
                    heap,
                    arg,
                    offset,
                    size,
                } => expand_heap_addr(inst, &mut pos.func, cfg, isa, heap, arg, offset, size),
                InstructionData::HeapLoad {
                    opcode: ir::Opcode::HeapLoad,
                    heap_imm,
                    arg,
                } => expand_heap_load(inst, &mut pos.func, cfg, isa, heap_imm, arg),
                InstructionData::HeapStore {
                    opcode: ir::Opcode::HeapStore,
                    heap_imm,
                    args,
                } => expand_heap_store(inst, &mut pos.func, cfg, isa, heap_imm, args[0], args[1]),
                InstructionData::StackLoad {
                    opcode: ir::Opcode::StackLoad,
                    stack_slot,
                    offset,
                } => {
                    let ty = pos.func.dfg.value_type(pos.func.dfg.first_result(inst));
                    let addr_ty = isa.pointer_type();

                    let mut pos = FuncCursor::new(pos.func).at_inst(inst);
                    pos.use_srcloc(inst);

                    let addr = pos.ins().stack_addr(addr_ty, stack_slot, offset);

                    // Stack slots are required to be accessible and aligned.
                    let mflags = MemFlags::trusted();
                    pos.func.dfg.replace(inst).load(ty, mflags, addr, 0);
                }
                InstructionData::StackStore {
                    opcode: ir::Opcode::StackStore,
                    arg,
                    stack_slot,
                    offset,
                } => {
                    let addr_ty = isa.pointer_type();

                    let mut pos = FuncCursor::new(pos.func).at_inst(inst);
                    pos.use_srcloc(inst);

                    let addr = pos.ins().stack_addr(addr_ty, stack_slot, offset);

                    let mut mflags = MemFlags::new();
                    // Stack slots are required to be accessible and aligned.
                    mflags.set_notrap();
                    mflags.set_aligned();
                    pos.func.dfg.replace(inst).store(mflags, arg, addr, 0);
                }
                InstructionData::DynamicStackLoad {
                    opcode: ir::Opcode::DynamicStackLoad,
                    dynamic_stack_slot,
                } => {
                    let ty = pos.func.dfg.value_type(pos.func.dfg.first_result(inst));
                    assert!(ty.is_dynamic_vector());
                    let addr_ty = isa.pointer_type();

                    let mut pos = FuncCursor::new(pos.func).at_inst(inst);
                    pos.use_srcloc(inst);

                    let addr = pos.ins().dynamic_stack_addr(addr_ty, dynamic_stack_slot);

                    // Stack slots are required to be accessible and aligned.
                    let mflags = MemFlags::trusted();
                    pos.func.dfg.replace(inst).load(ty, mflags, addr, 0);
                }
                InstructionData::DynamicStackStore {
                    opcode: ir::Opcode::DynamicStackStore,
                    arg,
                    dynamic_stack_slot,
                } => {
                    pos.use_srcloc(inst);
                    let addr_ty = isa.pointer_type();
                    let vector_ty = pos.func.dfg.value_type(arg);
                    assert!(vector_ty.is_dynamic_vector());

                    let addr = pos.ins().dynamic_stack_addr(addr_ty, dynamic_stack_slot);

                    let mut mflags = MemFlags::new();
                    // Stack slots are required to be accessible and aligned.
                    mflags.set_notrap();
                    mflags.set_aligned();
                    pos.func.dfg.replace(inst).store(mflags, arg, addr, 0);
                }
                InstructionData::TableAddr {
                    opcode: ir::Opcode::TableAddr,
                    table,
                    arg,
                    offset,
                } => expand_table_addr(isa, inst, &mut pos.func, table, arg, offset),

                InstructionData::BinaryImm64 { opcode, arg, imm } => {
                    let is_signed = match opcode {
                        ir::Opcode::IaddImm
                        | ir::Opcode::IrsubImm
                        | ir::Opcode::ImulImm
                        | ir::Opcode::SdivImm
                        | ir::Opcode::SremImm
                        | ir::Opcode::IfcmpImm => true,
                        _ => false,
                    };

                    let imm = imm_const(&mut pos, arg, imm, is_signed);
                    let replace = pos.func.dfg.replace(inst);
                    match opcode {
                        // bitops
                        ir::Opcode::BandImm => {
                            replace.band(arg, imm);
                        }
                        ir::Opcode::BorImm => {
                            replace.bor(arg, imm);
                        }
                        ir::Opcode::BxorImm => {
                            replace.bxor(arg, imm);
                        }
                        // bitshifting
                        ir::Opcode::IshlImm => {
                            replace.ishl(arg, imm);
                        }
                        ir::Opcode::RotlImm => {
                            replace.rotl(arg, imm);
                        }
                        ir::Opcode::RotrImm => {
                            replace.rotr(arg, imm);
                        }
                        ir::Opcode::SshrImm => {
                            replace.sshr(arg, imm);
                        }
                        ir::Opcode::UshrImm => {
                            replace.ushr(arg, imm);
                        }
                        // math
                        ir::Opcode::IaddImm => {
                            replace.iadd(arg, imm);
                        }
                        ir::Opcode::IrsubImm => {
                            // note: arg order reversed
                            replace.isub(imm, arg);
                        }
                        ir::Opcode::ImulImm => {
                            replace.imul(arg, imm);
                        }
                        ir::Opcode::SdivImm => {
                            replace.sdiv(arg, imm);
                        }
                        ir::Opcode::SremImm => {
                            replace.srem(arg, imm);
                        }
                        ir::Opcode::UdivImm => {
                            replace.udiv(arg, imm);
                        }
                        ir::Opcode::UremImm => {
                            replace.urem(arg, imm);
                        }
                        // comparisons
                        ir::Opcode::IfcmpImm => {
                            replace.ifcmp(arg, imm);
                        }
                        _ => prev_pos = pos.position(),
                    };
                }

                // comparisons
                InstructionData::IntCompareImm {
                    opcode: ir::Opcode::IcmpImm,
                    cond,
                    arg,
                    imm,
                } => {
                    let imm = imm_const(&mut pos, arg, imm, true);
                    pos.func.dfg.replace(inst).icmp(cond, arg, imm);
                }

                _ => {
                    prev_pos = pos.position();
                    continue;
                }
            }

            // Legalization implementations require fixpoint loop here.
            // TODO: fix this.
            pos.set_position(prev_pos);
        }
    }

    trace!("Post-legalization function:\n{}", func.display());
}

Create a set of flags representing an access from a “trusted” address, meaning it’s known to be aligned and non-trapping.

Examples found in repository?
src/isa/x64/inst/args.rs (line 264)
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
    pub(crate) fn imm_reg(simm32: u32, base: Reg) -> Self {
        debug_assert!(base.class() == RegClass::Int);
        Self::ImmReg {
            simm32,
            base,
            flags: MemFlags::trusted(),
        }
    }

    pub(crate) fn imm_reg_reg_shift(simm32: u32, base: Gpr, index: Gpr, shift: u8) -> Self {
        debug_assert!(base.class() == RegClass::Int);
        debug_assert!(index.class() == RegClass::Int);
        debug_assert!(shift <= 3);
        Self::ImmRegRegShift {
            simm32,
            base,
            index,
            shift,
            flags: MemFlags::trusted(),
        }
    }

    pub(crate) fn rip_relative(target: MachLabel) -> Self {
        Self::RipRelative { target }
    }

    pub(crate) fn with_flags(&self, flags: MemFlags) -> Self {
        match self {
            &Self::ImmReg { simm32, base, .. } => Self::ImmReg {
                simm32,
                base,
                flags,
            },
            &Self::ImmRegRegShift {
                simm32,
                base,
                index,
                shift,
                ..
            } => Self::ImmRegRegShift {
                simm32,
                base,
                index,
                shift,
                flags,
            },
            _ => panic!("Amode {:?} cannot take memflags", self),
        }
    }

    /// Add the registers mentioned by `self` to `collector`.
    pub(crate) fn get_operands<F: Fn(VReg) -> VReg>(
        &self,
        collector: &mut OperandCollector<'_, F>,
    ) {
        match self {
            Amode::ImmReg { base, .. } => {
                if *base != regs::rbp() && *base != regs::rsp() {
                    collector.reg_use(*base);
                }
            }
            Amode::ImmRegRegShift { base, index, .. } => {
                debug_assert_ne!(base.to_reg(), regs::rbp());
                debug_assert_ne!(base.to_reg(), regs::rsp());
                collector.reg_use(base.to_reg());
                debug_assert_ne!(index.to_reg(), regs::rbp());
                debug_assert_ne!(index.to_reg(), regs::rsp());
                collector.reg_use(index.to_reg());
            }
            Amode::RipRelative { .. } => {
                // RIP isn't involved in regalloc.
            }
        }
    }

    /// Same as `get_operands`, but add the registers in the "late" phase.
    pub(crate) fn get_operands_late<F: Fn(VReg) -> VReg>(
        &self,
        collector: &mut OperandCollector<'_, F>,
    ) {
        match self {
            Amode::ImmReg { base, .. } => {
                collector.reg_late_use(*base);
            }
            Amode::ImmRegRegShift { base, index, .. } => {
                collector.reg_late_use(base.to_reg());
                collector.reg_late_use(index.to_reg());
            }
            Amode::RipRelative { .. } => {
                // RIP isn't involved in regalloc.
            }
        }
    }

    pub(crate) fn get_flags(&self) -> MemFlags {
        match self {
            Amode::ImmReg { flags, .. } | Amode::ImmRegRegShift { flags, .. } => *flags,
            Amode::RipRelative { .. } => MemFlags::trusted(),
        }
    }
More examples
Hide additional examples
src/legalizer/globalvalue.rs (line 120)
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
fn load_addr(
    inst: ir::Inst,
    func: &mut ir::Function,
    base: ir::GlobalValue,
    offset: ir::immediates::Offset32,
    global_type: ir::Type,
    readonly: bool,
    isa: &dyn TargetIsa,
) {
    // We need to load a pointer from the `base` global value, so insert a new `global_value`
    // instruction. This depends on the iterative legalization loop. Note that the IR verifier
    // detects any cycles in the `load` globals.
    let ptr_ty = isa.pointer_type();
    let mut pos = FuncCursor::new(func).at_inst(inst);
    pos.use_srcloc(inst);

    // Get the value for the base. For tidiness, expand VMContext here so that we avoid
    // `vmctx_addr` which creates an otherwise unneeded value alias.
    let base_addr = if let ir::GlobalValueData::VMContext = pos.func.global_values[base] {
        pos.func
            .special_param(ir::ArgumentPurpose::VMContext)
            .expect("Missing vmctx parameter")
    } else {
        pos.ins().global_value(ptr_ty, base)
    };

    // Global-value loads are always notrap and aligned. They may be readonly.
    let mut mflags = ir::MemFlags::trusted();
    if readonly {
        mflags.set_readonly();
    }

    // Perform the load.
    pos.func
        .dfg
        .replace(inst)
        .load(global_type, mflags, base_addr, offset);
}
src/isa/x64/abi.rs (line 757)
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
    fn from(amode: StackAMode) -> Self {
        // We enforce a 128 MB stack-frame size limit above, so these
        // `expect()`s should never fail.
        match amode {
            StackAMode::FPOffset(off, _ty) => {
                let off = i32::try_from(off)
                    .expect("Offset in FPOffset is greater than 2GB; should hit impl limit first");
                let simm32 = off as u32;
                SyntheticAmode::Real(Amode::ImmReg {
                    simm32,
                    base: regs::rbp(),
                    flags: MemFlags::trusted(),
                })
            }
            StackAMode::NominalSPOffset(off, _ty) => {
                let off = i32::try_from(off).expect(
                    "Offset in NominalSPOffset is greater than 2GB; should hit impl limit first",
                );
                let simm32 = off as u32;
                SyntheticAmode::nominal_sp_offset(simm32)
            }
            StackAMode::SPOffset(off, _ty) => {
                let off = i32::try_from(off)
                    .expect("Offset in SPOffset is greater than 2GB; should hit impl limit first");
                let simm32 = off as u32;
                SyntheticAmode::Real(Amode::ImmReg {
                    simm32,
                    base: regs::rsp(),
                    flags: MemFlags::trusted(),
                })
            }
        }
    }
src/legalizer/mod.rs (line 105)
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
pub fn simple_legalize(func: &mut ir::Function, cfg: &mut ControlFlowGraph, isa: &dyn TargetIsa) {
    trace!("Pre-legalization function:\n{}", func.display());

    let mut pos = FuncCursor::new(func);
    let func_begin = pos.position();
    pos.set_position(func_begin);
    while let Some(_block) = pos.next_block() {
        let mut prev_pos = pos.position();
        while let Some(inst) = pos.next_inst() {
            match pos.func.dfg[inst] {
                // control flow
                InstructionData::CondTrap {
                    opcode:
                        opcode @ (ir::Opcode::Trapnz | ir::Opcode::Trapz | ir::Opcode::ResumableTrapnz),
                    arg,
                    code,
                } => {
                    expand_cond_trap(inst, &mut pos.func, cfg, opcode, arg, code);
                }

                // memory and constants
                InstructionData::UnaryGlobalValue {
                    opcode: ir::Opcode::GlobalValue,
                    global_value,
                } => expand_global_value(inst, &mut pos.func, isa, global_value),
                InstructionData::HeapAddr {
                    opcode: ir::Opcode::HeapAddr,
                    heap,
                    arg,
                    offset,
                    size,
                } => expand_heap_addr(inst, &mut pos.func, cfg, isa, heap, arg, offset, size),
                InstructionData::HeapLoad {
                    opcode: ir::Opcode::HeapLoad,
                    heap_imm,
                    arg,
                } => expand_heap_load(inst, &mut pos.func, cfg, isa, heap_imm, arg),
                InstructionData::HeapStore {
                    opcode: ir::Opcode::HeapStore,
                    heap_imm,
                    args,
                } => expand_heap_store(inst, &mut pos.func, cfg, isa, heap_imm, args[0], args[1]),
                InstructionData::StackLoad {
                    opcode: ir::Opcode::StackLoad,
                    stack_slot,
                    offset,
                } => {
                    let ty = pos.func.dfg.value_type(pos.func.dfg.first_result(inst));
                    let addr_ty = isa.pointer_type();

                    let mut pos = FuncCursor::new(pos.func).at_inst(inst);
                    pos.use_srcloc(inst);

                    let addr = pos.ins().stack_addr(addr_ty, stack_slot, offset);

                    // Stack slots are required to be accessible and aligned.
                    let mflags = MemFlags::trusted();
                    pos.func.dfg.replace(inst).load(ty, mflags, addr, 0);
                }
                InstructionData::StackStore {
                    opcode: ir::Opcode::StackStore,
                    arg,
                    stack_slot,
                    offset,
                } => {
                    let addr_ty = isa.pointer_type();

                    let mut pos = FuncCursor::new(pos.func).at_inst(inst);
                    pos.use_srcloc(inst);

                    let addr = pos.ins().stack_addr(addr_ty, stack_slot, offset);

                    let mut mflags = MemFlags::new();
                    // Stack slots are required to be accessible and aligned.
                    mflags.set_notrap();
                    mflags.set_aligned();
                    pos.func.dfg.replace(inst).store(mflags, arg, addr, 0);
                }
                InstructionData::DynamicStackLoad {
                    opcode: ir::Opcode::DynamicStackLoad,
                    dynamic_stack_slot,
                } => {
                    let ty = pos.func.dfg.value_type(pos.func.dfg.first_result(inst));
                    assert!(ty.is_dynamic_vector());
                    let addr_ty = isa.pointer_type();

                    let mut pos = FuncCursor::new(pos.func).at_inst(inst);
                    pos.use_srcloc(inst);

                    let addr = pos.ins().dynamic_stack_addr(addr_ty, dynamic_stack_slot);

                    // Stack slots are required to be accessible and aligned.
                    let mflags = MemFlags::trusted();
                    pos.func.dfg.replace(inst).load(ty, mflags, addr, 0);
                }
                InstructionData::DynamicStackStore {
                    opcode: ir::Opcode::DynamicStackStore,
                    arg,
                    dynamic_stack_slot,
                } => {
                    pos.use_srcloc(inst);
                    let addr_ty = isa.pointer_type();
                    let vector_ty = pos.func.dfg.value_type(arg);
                    assert!(vector_ty.is_dynamic_vector());

                    let addr = pos.ins().dynamic_stack_addr(addr_ty, dynamic_stack_slot);

                    let mut mflags = MemFlags::new();
                    // Stack slots are required to be accessible and aligned.
                    mflags.set_notrap();
                    mflags.set_aligned();
                    pos.func.dfg.replace(inst).store(mflags, arg, addr, 0);
                }
                InstructionData::TableAddr {
                    opcode: ir::Opcode::TableAddr,
                    table,
                    arg,
                    offset,
                } => expand_table_addr(isa, inst, &mut pos.func, table, arg, offset),

                InstructionData::BinaryImm64 { opcode, arg, imm } => {
                    let is_signed = match opcode {
                        ir::Opcode::IaddImm
                        | ir::Opcode::IrsubImm
                        | ir::Opcode::ImulImm
                        | ir::Opcode::SdivImm
                        | ir::Opcode::SremImm
                        | ir::Opcode::IfcmpImm => true,
                        _ => false,
                    };

                    let imm = imm_const(&mut pos, arg, imm, is_signed);
                    let replace = pos.func.dfg.replace(inst);
                    match opcode {
                        // bitops
                        ir::Opcode::BandImm => {
                            replace.band(arg, imm);
                        }
                        ir::Opcode::BorImm => {
                            replace.bor(arg, imm);
                        }
                        ir::Opcode::BxorImm => {
                            replace.bxor(arg, imm);
                        }
                        // bitshifting
                        ir::Opcode::IshlImm => {
                            replace.ishl(arg, imm);
                        }
                        ir::Opcode::RotlImm => {
                            replace.rotl(arg, imm);
                        }
                        ir::Opcode::RotrImm => {
                            replace.rotr(arg, imm);
                        }
                        ir::Opcode::SshrImm => {
                            replace.sshr(arg, imm);
                        }
                        ir::Opcode::UshrImm => {
                            replace.ushr(arg, imm);
                        }
                        // math
                        ir::Opcode::IaddImm => {
                            replace.iadd(arg, imm);
                        }
                        ir::Opcode::IrsubImm => {
                            // note: arg order reversed
                            replace.isub(imm, arg);
                        }
                        ir::Opcode::ImulImm => {
                            replace.imul(arg, imm);
                        }
                        ir::Opcode::SdivImm => {
                            replace.sdiv(arg, imm);
                        }
                        ir::Opcode::SremImm => {
                            replace.srem(arg, imm);
                        }
                        ir::Opcode::UdivImm => {
                            replace.udiv(arg, imm);
                        }
                        ir::Opcode::UremImm => {
                            replace.urem(arg, imm);
                        }
                        // comparisons
                        ir::Opcode::IfcmpImm => {
                            replace.ifcmp(arg, imm);
                        }
                        _ => prev_pos = pos.position(),
                    };
                }

                // comparisons
                InstructionData::IntCompareImm {
                    opcode: ir::Opcode::IcmpImm,
                    cond,
                    arg,
                    imm,
                } => {
                    let imm = imm_const(&mut pos, arg, imm, true);
                    pos.func.dfg.replace(inst).icmp(cond, arg, imm);
                }

                _ => {
                    prev_pos = pos.position();
                    continue;
                }
            }

            // Legalization implementations require fixpoint loop here.
            // TODO: fix this.
            pos.set_position(prev_pos);
        }
    }

    trace!("Post-legalization function:\n{}", func.display());
}

Set a flag bit by name.

Returns true if the flag was found and set, false for an unknown flag name. Will also return false when trying to set inconsistent endianness flags.

Return endianness of the memory access. This will return the endianness explicitly specified by the flags if any, and will default to the native endianness otherwise. The native endianness has to be provided by the caller since it is not explicitly encoded in CLIF IR – this allows a front end to create IR without having to know the target endianness.

Set endianness of the memory access.

Examples found in repository?
src/ir/memflags.rs (line 126)
125
126
127
128
    pub fn with_endianness(mut self, endianness: Endianness) -> Self {
        self.set_endianness(endianness);
        self
    }

Set endianness of the memory access, returning new flags.

Examples found in repository?
src/verifier/mod.rs (line 1097)
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
    fn verify_bitcast(
        &self,
        inst: Inst,
        flags: MemFlags,
        arg: Value,
        errors: &mut VerifierErrors,
    ) -> VerifierStepResult<()> {
        let typ = self.func.dfg.ctrl_typevar(inst);
        let value_type = self.func.dfg.value_type(arg);

        if typ.bits() != value_type.bits() {
            errors.fatal((
                inst,
                format!(
                    "The bitcast argument {} has a type of {} bits, which doesn't match an expected type of {} bits",
                    arg,
                    value_type.bits(),
                    typ.bits()
                ),
            ))
        } else if flags != MemFlags::new()
            && flags != MemFlags::new().with_endianness(ir::Endianness::Little)
            && flags != MemFlags::new().with_endianness(ir::Endianness::Big)
        {
            errors.fatal((
                inst,
                "The bitcast instruction only accepts the `big` or `little` memory flags",
            ))
        } else if flags == MemFlags::new() && typ.lane_count() != value_type.lane_count() {
            errors.fatal((
                inst,
                "Byte order specifier required for bitcast instruction changing lane count",
            ))
        } else {
            Ok(())
        }
    }

Test if the notrap flag is set.

Normally, trapping is part of the semantics of a load/store operation. If the platform would cause a trap when accessing the effective address, the Cranelift memory operation is also required to trap.

The notrap flag tells Cranelift that the memory is accessible, which means that accesses will not trap. This makes it possible to delete an unused load or a dead store instruction.

Examples found in repository?
src/isa/x64/inst/args.rs (line 361)
360
361
362
    pub(crate) fn can_trap(&self) -> bool {
        !self.get_flags().notrap()
    }
More examples
Hide additional examples
src/licm.rs (line 150)
148
149
150
151
152
153
fn is_unsafe_load(inst_data: &InstructionData) -> bool {
    match *inst_data {
        InstructionData::Load { flags, .. } => !flags.readonly() || !flags.notrap(),
        _ => inst_data.opcode().can_load(),
    }
}
src/inst_predicates.rs (line 34)
28
29
30
31
32
33
34
35
36
37
fn is_load_with_defined_trapping(opcode: Opcode, data: &InstructionData) -> bool {
    if !opcode.can_load() {
        return false;
    }
    match *data {
        InstructionData::StackLoad { .. } => false,
        InstructionData::Load { flags, .. } => !flags.notrap(),
        _ => true,
    }
}
src/egraph.rs (line 193)
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
    fn build(&mut self, func: &Function) {
        // Mapping of SSA `Value` to eclass ID.
        let mut value_to_id = FxHashMap::default();

        // For each block in RPO, create an enode for block entry, for
        // each block param, and for each instruction.
        for &block in self.domtree.cfg_postorder().iter().rev() {
            let loop_level = self.loop_analysis.loop_level(block);
            let blockparam_start =
                u32::try_from(self.blockparam_ids_tys.len()).expect("Overflow in blockparam count");
            for (i, &value) in func.dfg.block_params(block).iter().enumerate() {
                let ty = func.dfg.value_type(value);
                let param = self
                    .egraph
                    .add(
                        Node::Param {
                            block,
                            index: i
                                .try_into()
                                .expect("blockparam index should fit in Node::Param"),
                            ty,
                            loop_level,
                        },
                        &mut self.node_ctx,
                    )
                    .get();
                value_to_id.insert(value, param);
                self.blockparam_ids_tys.push((param, ty));
                self.stats.node_created += 1;
                self.stats.node_param += 1;
            }
            let blockparam_end =
                u32::try_from(self.blockparam_ids_tys.len()).expect("Overflow in blockparam count");
            self.blockparams[block] = blockparam_start..blockparam_end;

            let side_effect_start =
                u32::try_from(self.side_effect_ids.len()).expect("Overflow in side-effect count");
            for inst in func.layout.block_insts(block) {
                // Build args from SSA values.
                let args = EntityList::from_iter(
                    func.dfg.inst_args(inst).iter().map(|&arg| {
                        let arg = func.dfg.resolve_aliases(arg);
                        *value_to_id
                            .get(&arg)
                            .expect("Must have seen def before this use")
                    }),
                    &mut self.node_ctx.args,
                );

                let results = func.dfg.inst_results(inst);
                let ty = if results.len() == 1 {
                    func.dfg.value_type(results[0])
                } else {
                    crate::ir::types::INVALID
                };

                let load_mem_state = self.alias_analysis.get_state_for_load(inst);
                let is_readonly_load = match func.dfg[inst] {
                    InstructionData::Load {
                        opcode: Opcode::Load,
                        flags,
                        ..
                    } => flags.readonly() && flags.notrap(),
                    _ => false,
                };

                // Create the egraph node.
                let op = InstructionImms::from(&func.dfg[inst]);
                let opcode = op.opcode();
                let srcloc = func.srclocs[inst];
                let arity = u16::try_from(results.len())
                    .expect("More than 2^16 results from an instruction");

                let node = if is_readonly_load {
                    self.stats.node_created += 1;
                    self.stats.node_pure += 1;
                    Node::Pure {
                        op,
                        args,
                        ty,
                        arity,
                    }
                } else if let Some(load_mem_state) = load_mem_state {
                    let addr = args.as_slice(&self.node_ctx.args)[0];
                    trace!("load at inst {} has mem state {:?}", inst, load_mem_state);
                    self.stats.node_created += 1;
                    self.stats.node_load += 1;
                    Node::Load {
                        op,
                        ty,
                        addr,
                        mem_state: load_mem_state,
                        srcloc,
                    }
                } else if has_side_effect(func, inst) || opcode.can_load() {
                    self.stats.node_created += 1;
                    self.stats.node_inst += 1;
                    Node::Inst {
                        op,
                        args,
                        ty,
                        arity,
                        srcloc,
                        loop_level,
                    }
                } else {
                    self.stats.node_created += 1;
                    self.stats.node_pure += 1;
                    Node::Pure {
                        op,
                        args,
                        ty,
                        arity,
                    }
                };
                let dedup_needed = self.node_ctx.needs_dedup(&node);
                let is_pure = matches!(node, Node::Pure { .. });

                let mut id = self.egraph.add(node, &mut self.node_ctx);

                if dedup_needed {
                    self.stats.node_dedup_query += 1;
                    match id {
                        NewOrExisting::New(_) => {
                            self.stats.node_dedup_miss += 1;
                        }
                        NewOrExisting::Existing(_) => {
                            self.stats.node_dedup_hit += 1;
                        }
                    }
                }

                if opcode == Opcode::Store {
                    let store_data_ty = func.dfg.value_type(func.dfg.inst_args(inst)[0]);
                    self.store_nodes.insert(inst, (store_data_ty, id.get()));
                    self.stats.store_map_insert += 1;
                }

                // Loads that did not already merge into an existing
                // load: try to forward from a store (store-to-load
                // forwarding).
                if let NewOrExisting::New(new_id) = id {
                    if load_mem_state.is_some() {
                        let opt_id = crate::opts::store_to_load(new_id, self);
                        trace!("store_to_load: {} -> {}", new_id, opt_id);
                        if opt_id != new_id {
                            id = NewOrExisting::Existing(opt_id);
                        }
                    }
                }

                // Now either optimize (for new pure nodes), or add to
                // the side-effecting list (for all other new nodes).
                let id = match id {
                    NewOrExisting::Existing(id) => id,
                    NewOrExisting::New(id) if is_pure => {
                        // Apply all optimization rules immediately; the
                        // aegraph (acyclic egraph) works best when we do
                        // this so all uses pick up the eclass with all
                        // possible enodes.
                        crate::opts::optimize_eclass(id, self)
                    }
                    NewOrExisting::New(id) => {
                        self.side_effect_ids.push(id);
                        self.stats.side_effect_nodes += 1;
                        id
                    }
                };

                // Create results and save in Value->Id map.
                match results {
                    &[] => {}
                    &[one_result] => {
                        trace!("build: value {} -> id {}", one_result, id);
                        value_to_id.insert(one_result, id);
                    }
                    many_results => {
                        debug_assert!(many_results.len() > 1);
                        for (i, &result) in many_results.iter().enumerate() {
                            let ty = func.dfg.value_type(result);
                            let projection = self
                                .egraph
                                .add(
                                    Node::Result {
                                        value: id,
                                        result: i,
                                        ty,
                                    },
                                    &mut self.node_ctx,
                                )
                                .get();
                            self.stats.node_created += 1;
                            self.stats.node_result += 1;
                            trace!("build: value {} -> id {}", result, projection);
                            value_to_id.insert(result, projection);
                        }
                    }
                }
            }

            let side_effect_end =
                u32::try_from(self.side_effect_ids.len()).expect("Overflow in side-effect count");
            let side_effect_range = side_effect_start..side_effect_end;
            self.side_effects[block] = side_effect_range;
        }
    }

Set the notrap flag.

Examples found in repository?
src/ir/memflags.rs (line 64)
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
    pub fn trusted() -> Self {
        let mut result = Self::new();
        result.set_notrap();
        result.set_aligned();
        result
    }

    /// Read a flag bit.
    fn read(self, bit: FlagBit) -> bool {
        self.bits & (1 << bit as usize) != 0
    }

    /// Set a flag bit.
    fn set(&mut self, bit: FlagBit) {
        self.bits |= 1 << bit as usize
    }

    /// Set a flag bit by name.
    ///
    /// Returns true if the flag was found and set, false for an unknown flag name.
    /// Will also return false when trying to set inconsistent endianness flags.
    pub fn set_by_name(&mut self, name: &str) -> bool {
        match NAMES.iter().position(|&s| s == name) {
            Some(bit) => {
                let bits = self.bits | 1 << bit;
                if (bits & (1 << FlagBit::LittleEndian as usize)) != 0
                    && (bits & (1 << FlagBit::BigEndian as usize)) != 0
                {
                    false
                } else {
                    self.bits = bits;
                    true
                }
            }
            None => false,
        }
    }

    /// Return endianness of the memory access.  This will return the endianness
    /// explicitly specified by the flags if any, and will default to the native
    /// endianness otherwise.  The native endianness has to be provided by the
    /// caller since it is not explicitly encoded in CLIF IR -- this allows a
    /// front end to create IR without having to know the target endianness.
    pub fn endianness(self, native_endianness: Endianness) -> Endianness {
        if self.read(FlagBit::LittleEndian) {
            Endianness::Little
        } else if self.read(FlagBit::BigEndian) {
            Endianness::Big
        } else {
            native_endianness
        }
    }

    /// Set endianness of the memory access.
    pub fn set_endianness(&mut self, endianness: Endianness) {
        match endianness {
            Endianness::Little => self.set(FlagBit::LittleEndian),
            Endianness::Big => self.set(FlagBit::BigEndian),
        };
        assert!(!(self.read(FlagBit::LittleEndian) && self.read(FlagBit::BigEndian)));
    }

    /// Set endianness of the memory access, returning new flags.
    pub fn with_endianness(mut self, endianness: Endianness) -> Self {
        self.set_endianness(endianness);
        self
    }

    /// Test if the `notrap` flag is set.
    ///
    /// Normally, trapping is part of the semantics of a load/store operation. If the platform
    /// would cause a trap when accessing the effective address, the Cranelift memory operation is
    /// also required to trap.
    ///
    /// The `notrap` flag tells Cranelift that the memory is *accessible*, which means that
    /// accesses will not trap. This makes it possible to delete an unused load or a dead store
    /// instruction.
    pub fn notrap(self) -> bool {
        self.read(FlagBit::Notrap)
    }

    /// Set the `notrap` flag.
    pub fn set_notrap(&mut self) {
        self.set(FlagBit::Notrap)
    }

    /// Set the `notrap` flag, returning new flags.
    pub fn with_notrap(mut self) -> Self {
        self.set_notrap();
        self
    }
More examples
Hide additional examples
src/legalizer/mod.rs (line 123)
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
pub fn simple_legalize(func: &mut ir::Function, cfg: &mut ControlFlowGraph, isa: &dyn TargetIsa) {
    trace!("Pre-legalization function:\n{}", func.display());

    let mut pos = FuncCursor::new(func);
    let func_begin = pos.position();
    pos.set_position(func_begin);
    while let Some(_block) = pos.next_block() {
        let mut prev_pos = pos.position();
        while let Some(inst) = pos.next_inst() {
            match pos.func.dfg[inst] {
                // control flow
                InstructionData::CondTrap {
                    opcode:
                        opcode @ (ir::Opcode::Trapnz | ir::Opcode::Trapz | ir::Opcode::ResumableTrapnz),
                    arg,
                    code,
                } => {
                    expand_cond_trap(inst, &mut pos.func, cfg, opcode, arg, code);
                }

                // memory and constants
                InstructionData::UnaryGlobalValue {
                    opcode: ir::Opcode::GlobalValue,
                    global_value,
                } => expand_global_value(inst, &mut pos.func, isa, global_value),
                InstructionData::HeapAddr {
                    opcode: ir::Opcode::HeapAddr,
                    heap,
                    arg,
                    offset,
                    size,
                } => expand_heap_addr(inst, &mut pos.func, cfg, isa, heap, arg, offset, size),
                InstructionData::HeapLoad {
                    opcode: ir::Opcode::HeapLoad,
                    heap_imm,
                    arg,
                } => expand_heap_load(inst, &mut pos.func, cfg, isa, heap_imm, arg),
                InstructionData::HeapStore {
                    opcode: ir::Opcode::HeapStore,
                    heap_imm,
                    args,
                } => expand_heap_store(inst, &mut pos.func, cfg, isa, heap_imm, args[0], args[1]),
                InstructionData::StackLoad {
                    opcode: ir::Opcode::StackLoad,
                    stack_slot,
                    offset,
                } => {
                    let ty = pos.func.dfg.value_type(pos.func.dfg.first_result(inst));
                    let addr_ty = isa.pointer_type();

                    let mut pos = FuncCursor::new(pos.func).at_inst(inst);
                    pos.use_srcloc(inst);

                    let addr = pos.ins().stack_addr(addr_ty, stack_slot, offset);

                    // Stack slots are required to be accessible and aligned.
                    let mflags = MemFlags::trusted();
                    pos.func.dfg.replace(inst).load(ty, mflags, addr, 0);
                }
                InstructionData::StackStore {
                    opcode: ir::Opcode::StackStore,
                    arg,
                    stack_slot,
                    offset,
                } => {
                    let addr_ty = isa.pointer_type();

                    let mut pos = FuncCursor::new(pos.func).at_inst(inst);
                    pos.use_srcloc(inst);

                    let addr = pos.ins().stack_addr(addr_ty, stack_slot, offset);

                    let mut mflags = MemFlags::new();
                    // Stack slots are required to be accessible and aligned.
                    mflags.set_notrap();
                    mflags.set_aligned();
                    pos.func.dfg.replace(inst).store(mflags, arg, addr, 0);
                }
                InstructionData::DynamicStackLoad {
                    opcode: ir::Opcode::DynamicStackLoad,
                    dynamic_stack_slot,
                } => {
                    let ty = pos.func.dfg.value_type(pos.func.dfg.first_result(inst));
                    assert!(ty.is_dynamic_vector());
                    let addr_ty = isa.pointer_type();

                    let mut pos = FuncCursor::new(pos.func).at_inst(inst);
                    pos.use_srcloc(inst);

                    let addr = pos.ins().dynamic_stack_addr(addr_ty, dynamic_stack_slot);

                    // Stack slots are required to be accessible and aligned.
                    let mflags = MemFlags::trusted();
                    pos.func.dfg.replace(inst).load(ty, mflags, addr, 0);
                }
                InstructionData::DynamicStackStore {
                    opcode: ir::Opcode::DynamicStackStore,
                    arg,
                    dynamic_stack_slot,
                } => {
                    pos.use_srcloc(inst);
                    let addr_ty = isa.pointer_type();
                    let vector_ty = pos.func.dfg.value_type(arg);
                    assert!(vector_ty.is_dynamic_vector());

                    let addr = pos.ins().dynamic_stack_addr(addr_ty, dynamic_stack_slot);

                    let mut mflags = MemFlags::new();
                    // Stack slots are required to be accessible and aligned.
                    mflags.set_notrap();
                    mflags.set_aligned();
                    pos.func.dfg.replace(inst).store(mflags, arg, addr, 0);
                }
                InstructionData::TableAddr {
                    opcode: ir::Opcode::TableAddr,
                    table,
                    arg,
                    offset,
                } => expand_table_addr(isa, inst, &mut pos.func, table, arg, offset),

                InstructionData::BinaryImm64 { opcode, arg, imm } => {
                    let is_signed = match opcode {
                        ir::Opcode::IaddImm
                        | ir::Opcode::IrsubImm
                        | ir::Opcode::ImulImm
                        | ir::Opcode::SdivImm
                        | ir::Opcode::SremImm
                        | ir::Opcode::IfcmpImm => true,
                        _ => false,
                    };

                    let imm = imm_const(&mut pos, arg, imm, is_signed);
                    let replace = pos.func.dfg.replace(inst);
                    match opcode {
                        // bitops
                        ir::Opcode::BandImm => {
                            replace.band(arg, imm);
                        }
                        ir::Opcode::BorImm => {
                            replace.bor(arg, imm);
                        }
                        ir::Opcode::BxorImm => {
                            replace.bxor(arg, imm);
                        }
                        // bitshifting
                        ir::Opcode::IshlImm => {
                            replace.ishl(arg, imm);
                        }
                        ir::Opcode::RotlImm => {
                            replace.rotl(arg, imm);
                        }
                        ir::Opcode::RotrImm => {
                            replace.rotr(arg, imm);
                        }
                        ir::Opcode::SshrImm => {
                            replace.sshr(arg, imm);
                        }
                        ir::Opcode::UshrImm => {
                            replace.ushr(arg, imm);
                        }
                        // math
                        ir::Opcode::IaddImm => {
                            replace.iadd(arg, imm);
                        }
                        ir::Opcode::IrsubImm => {
                            // note: arg order reversed
                            replace.isub(imm, arg);
                        }
                        ir::Opcode::ImulImm => {
                            replace.imul(arg, imm);
                        }
                        ir::Opcode::SdivImm => {
                            replace.sdiv(arg, imm);
                        }
                        ir::Opcode::SremImm => {
                            replace.srem(arg, imm);
                        }
                        ir::Opcode::UdivImm => {
                            replace.udiv(arg, imm);
                        }
                        ir::Opcode::UremImm => {
                            replace.urem(arg, imm);
                        }
                        // comparisons
                        ir::Opcode::IfcmpImm => {
                            replace.ifcmp(arg, imm);
                        }
                        _ => prev_pos = pos.position(),
                    };
                }

                // comparisons
                InstructionData::IntCompareImm {
                    opcode: ir::Opcode::IcmpImm,
                    cond,
                    arg,
                    imm,
                } => {
                    let imm = imm_const(&mut pos, arg, imm, true);
                    pos.func.dfg.replace(inst).icmp(cond, arg, imm);
                }

                _ => {
                    prev_pos = pos.position();
                    continue;
                }
            }

            // Legalization implementations require fixpoint loop here.
            // TODO: fix this.
            pos.set_position(prev_pos);
        }
    }

    trace!("Post-legalization function:\n{}", func.display());
}

Set the notrap flag, returning new flags.

Test if the aligned flag is set.

By default, Cranelift memory instructions work with any unaligned effective address. If the aligned flag is set, the instruction is permitted to trap or return a wrong result if the effective address is misaligned.

Examples found in repository?
src/isa/x64/lower.rs (line 109)
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
fn is_mergeable_load(ctx: &mut Lower<Inst>, src_insn: IRInst) -> Option<(InsnInput, i32)> {
    let insn_data = ctx.data(src_insn);
    let inputs = ctx.num_inputs(src_insn);
    if inputs != 1 {
        return None;
    }

    let load_ty = ctx.output_ty(src_insn, 0);
    if ty_bits(load_ty) < 32 {
        // Narrower values are handled by ALU insts that are at least 32 bits
        // wide, which is normally OK as we ignore upper buts; but, if we
        // generate, e.g., a direct-from-memory 32-bit add for a byte value and
        // the byte is the last byte in a page, the extra data that we load is
        // incorrectly accessed. So we only allow loads to merge for
        // 32-bit-and-above widths.
        return None;
    }

    // SIMD instructions can only be load-coalesced when the loaded value comes
    // from an aligned address.
    if load_ty.is_vector() && !insn_data.memflags().map_or(false, |f| f.aligned()) {
        return None;
    }

    // Just testing the opcode is enough, because the width will always match if
    // the type does (and the type should match if the CLIF is properly
    // constructed).
    if insn_data.opcode() == Opcode::Load {
        let offset = insn_data
            .load_store_offset()
            .expect("load should have offset");
        Some((
            InsnInput {
                insn: src_insn,
                input: 0,
            },
            offset,
        ))
    } else {
        None
    }
}

Set the aligned flag.

Examples found in repository?
src/ir/memflags.rs (line 65)
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
    pub fn trusted() -> Self {
        let mut result = Self::new();
        result.set_notrap();
        result.set_aligned();
        result
    }

    /// Read a flag bit.
    fn read(self, bit: FlagBit) -> bool {
        self.bits & (1 << bit as usize) != 0
    }

    /// Set a flag bit.
    fn set(&mut self, bit: FlagBit) {
        self.bits |= 1 << bit as usize
    }

    /// Set a flag bit by name.
    ///
    /// Returns true if the flag was found and set, false for an unknown flag name.
    /// Will also return false when trying to set inconsistent endianness flags.
    pub fn set_by_name(&mut self, name: &str) -> bool {
        match NAMES.iter().position(|&s| s == name) {
            Some(bit) => {
                let bits = self.bits | 1 << bit;
                if (bits & (1 << FlagBit::LittleEndian as usize)) != 0
                    && (bits & (1 << FlagBit::BigEndian as usize)) != 0
                {
                    false
                } else {
                    self.bits = bits;
                    true
                }
            }
            None => false,
        }
    }

    /// Return endianness of the memory access.  This will return the endianness
    /// explicitly specified by the flags if any, and will default to the native
    /// endianness otherwise.  The native endianness has to be provided by the
    /// caller since it is not explicitly encoded in CLIF IR -- this allows a
    /// front end to create IR without having to know the target endianness.
    pub fn endianness(self, native_endianness: Endianness) -> Endianness {
        if self.read(FlagBit::LittleEndian) {
            Endianness::Little
        } else if self.read(FlagBit::BigEndian) {
            Endianness::Big
        } else {
            native_endianness
        }
    }

    /// Set endianness of the memory access.
    pub fn set_endianness(&mut self, endianness: Endianness) {
        match endianness {
            Endianness::Little => self.set(FlagBit::LittleEndian),
            Endianness::Big => self.set(FlagBit::BigEndian),
        };
        assert!(!(self.read(FlagBit::LittleEndian) && self.read(FlagBit::BigEndian)));
    }

    /// Set endianness of the memory access, returning new flags.
    pub fn with_endianness(mut self, endianness: Endianness) -> Self {
        self.set_endianness(endianness);
        self
    }

    /// Test if the `notrap` flag is set.
    ///
    /// Normally, trapping is part of the semantics of a load/store operation. If the platform
    /// would cause a trap when accessing the effective address, the Cranelift memory operation is
    /// also required to trap.
    ///
    /// The `notrap` flag tells Cranelift that the memory is *accessible*, which means that
    /// accesses will not trap. This makes it possible to delete an unused load or a dead store
    /// instruction.
    pub fn notrap(self) -> bool {
        self.read(FlagBit::Notrap)
    }

    /// Set the `notrap` flag.
    pub fn set_notrap(&mut self) {
        self.set(FlagBit::Notrap)
    }

    /// Set the `notrap` flag, returning new flags.
    pub fn with_notrap(mut self) -> Self {
        self.set_notrap();
        self
    }

    /// Test if the `aligned` flag is set.
    ///
    /// By default, Cranelift memory instructions work with any unaligned effective address. If the
    /// `aligned` flag is set, the instruction is permitted to trap or return a wrong result if the
    /// effective address is misaligned.
    pub fn aligned(self) -> bool {
        self.read(FlagBit::Aligned)
    }

    /// Set the `aligned` flag.
    pub fn set_aligned(&mut self) {
        self.set(FlagBit::Aligned)
    }

    /// Set the `aligned` flag, returning new flags.
    pub fn with_aligned(mut self) -> Self {
        self.set_aligned();
        self
    }
More examples
Hide additional examples
src/legalizer/mod.rs (line 124)
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
pub fn simple_legalize(func: &mut ir::Function, cfg: &mut ControlFlowGraph, isa: &dyn TargetIsa) {
    trace!("Pre-legalization function:\n{}", func.display());

    let mut pos = FuncCursor::new(func);
    let func_begin = pos.position();
    pos.set_position(func_begin);
    while let Some(_block) = pos.next_block() {
        let mut prev_pos = pos.position();
        while let Some(inst) = pos.next_inst() {
            match pos.func.dfg[inst] {
                // control flow
                InstructionData::CondTrap {
                    opcode:
                        opcode @ (ir::Opcode::Trapnz | ir::Opcode::Trapz | ir::Opcode::ResumableTrapnz),
                    arg,
                    code,
                } => {
                    expand_cond_trap(inst, &mut pos.func, cfg, opcode, arg, code);
                }

                // memory and constants
                InstructionData::UnaryGlobalValue {
                    opcode: ir::Opcode::GlobalValue,
                    global_value,
                } => expand_global_value(inst, &mut pos.func, isa, global_value),
                InstructionData::HeapAddr {
                    opcode: ir::Opcode::HeapAddr,
                    heap,
                    arg,
                    offset,
                    size,
                } => expand_heap_addr(inst, &mut pos.func, cfg, isa, heap, arg, offset, size),
                InstructionData::HeapLoad {
                    opcode: ir::Opcode::HeapLoad,
                    heap_imm,
                    arg,
                } => expand_heap_load(inst, &mut pos.func, cfg, isa, heap_imm, arg),
                InstructionData::HeapStore {
                    opcode: ir::Opcode::HeapStore,
                    heap_imm,
                    args,
                } => expand_heap_store(inst, &mut pos.func, cfg, isa, heap_imm, args[0], args[1]),
                InstructionData::StackLoad {
                    opcode: ir::Opcode::StackLoad,
                    stack_slot,
                    offset,
                } => {
                    let ty = pos.func.dfg.value_type(pos.func.dfg.first_result(inst));
                    let addr_ty = isa.pointer_type();

                    let mut pos = FuncCursor::new(pos.func).at_inst(inst);
                    pos.use_srcloc(inst);

                    let addr = pos.ins().stack_addr(addr_ty, stack_slot, offset);

                    // Stack slots are required to be accessible and aligned.
                    let mflags = MemFlags::trusted();
                    pos.func.dfg.replace(inst).load(ty, mflags, addr, 0);
                }
                InstructionData::StackStore {
                    opcode: ir::Opcode::StackStore,
                    arg,
                    stack_slot,
                    offset,
                } => {
                    let addr_ty = isa.pointer_type();

                    let mut pos = FuncCursor::new(pos.func).at_inst(inst);
                    pos.use_srcloc(inst);

                    let addr = pos.ins().stack_addr(addr_ty, stack_slot, offset);

                    let mut mflags = MemFlags::new();
                    // Stack slots are required to be accessible and aligned.
                    mflags.set_notrap();
                    mflags.set_aligned();
                    pos.func.dfg.replace(inst).store(mflags, arg, addr, 0);
                }
                InstructionData::DynamicStackLoad {
                    opcode: ir::Opcode::DynamicStackLoad,
                    dynamic_stack_slot,
                } => {
                    let ty = pos.func.dfg.value_type(pos.func.dfg.first_result(inst));
                    assert!(ty.is_dynamic_vector());
                    let addr_ty = isa.pointer_type();

                    let mut pos = FuncCursor::new(pos.func).at_inst(inst);
                    pos.use_srcloc(inst);

                    let addr = pos.ins().dynamic_stack_addr(addr_ty, dynamic_stack_slot);

                    // Stack slots are required to be accessible and aligned.
                    let mflags = MemFlags::trusted();
                    pos.func.dfg.replace(inst).load(ty, mflags, addr, 0);
                }
                InstructionData::DynamicStackStore {
                    opcode: ir::Opcode::DynamicStackStore,
                    arg,
                    dynamic_stack_slot,
                } => {
                    pos.use_srcloc(inst);
                    let addr_ty = isa.pointer_type();
                    let vector_ty = pos.func.dfg.value_type(arg);
                    assert!(vector_ty.is_dynamic_vector());

                    let addr = pos.ins().dynamic_stack_addr(addr_ty, dynamic_stack_slot);

                    let mut mflags = MemFlags::new();
                    // Stack slots are required to be accessible and aligned.
                    mflags.set_notrap();
                    mflags.set_aligned();
                    pos.func.dfg.replace(inst).store(mflags, arg, addr, 0);
                }
                InstructionData::TableAddr {
                    opcode: ir::Opcode::TableAddr,
                    table,
                    arg,
                    offset,
                } => expand_table_addr(isa, inst, &mut pos.func, table, arg, offset),

                InstructionData::BinaryImm64 { opcode, arg, imm } => {
                    let is_signed = match opcode {
                        ir::Opcode::IaddImm
                        | ir::Opcode::IrsubImm
                        | ir::Opcode::ImulImm
                        | ir::Opcode::SdivImm
                        | ir::Opcode::SremImm
                        | ir::Opcode::IfcmpImm => true,
                        _ => false,
                    };

                    let imm = imm_const(&mut pos, arg, imm, is_signed);
                    let replace = pos.func.dfg.replace(inst);
                    match opcode {
                        // bitops
                        ir::Opcode::BandImm => {
                            replace.band(arg, imm);
                        }
                        ir::Opcode::BorImm => {
                            replace.bor(arg, imm);
                        }
                        ir::Opcode::BxorImm => {
                            replace.bxor(arg, imm);
                        }
                        // bitshifting
                        ir::Opcode::IshlImm => {
                            replace.ishl(arg, imm);
                        }
                        ir::Opcode::RotlImm => {
                            replace.rotl(arg, imm);
                        }
                        ir::Opcode::RotrImm => {
                            replace.rotr(arg, imm);
                        }
                        ir::Opcode::SshrImm => {
                            replace.sshr(arg, imm);
                        }
                        ir::Opcode::UshrImm => {
                            replace.ushr(arg, imm);
                        }
                        // math
                        ir::Opcode::IaddImm => {
                            replace.iadd(arg, imm);
                        }
                        ir::Opcode::IrsubImm => {
                            // note: arg order reversed
                            replace.isub(imm, arg);
                        }
                        ir::Opcode::ImulImm => {
                            replace.imul(arg, imm);
                        }
                        ir::Opcode::SdivImm => {
                            replace.sdiv(arg, imm);
                        }
                        ir::Opcode::SremImm => {
                            replace.srem(arg, imm);
                        }
                        ir::Opcode::UdivImm => {
                            replace.udiv(arg, imm);
                        }
                        ir::Opcode::UremImm => {
                            replace.urem(arg, imm);
                        }
                        // comparisons
                        ir::Opcode::IfcmpImm => {
                            replace.ifcmp(arg, imm);
                        }
                        _ => prev_pos = pos.position(),
                    };
                }

                // comparisons
                InstructionData::IntCompareImm {
                    opcode: ir::Opcode::IcmpImm,
                    cond,
                    arg,
                    imm,
                } => {
                    let imm = imm_const(&mut pos, arg, imm, true);
                    pos.func.dfg.replace(inst).icmp(cond, arg, imm);
                }

                _ => {
                    prev_pos = pos.position();
                    continue;
                }
            }

            // Legalization implementations require fixpoint loop here.
            // TODO: fix this.
            pos.set_position(prev_pos);
        }
    }

    trace!("Post-legalization function:\n{}", func.display());
}

Set the aligned flag, returning new flags.

Test if the readonly flag is set.

Loads with this flag have no memory dependencies. This results in undefined behavior if the dereferenced memory is mutated at any time between when the function is called and when it is exited.

Examples found in repository?
src/simple_gvn.rs (line 27)
25
26
27
28
29
30
fn is_load_and_not_readonly(inst_data: &InstructionData) -> bool {
    match *inst_data {
        InstructionData::Load { flags, .. } => !flags.readonly(),
        _ => inst_data.opcode().can_load(),
    }
}
More examples
Hide additional examples
src/licm.rs (line 150)
148
149
150
151
152
153
fn is_unsafe_load(inst_data: &InstructionData) -> bool {
    match *inst_data {
        InstructionData::Load { flags, .. } => !flags.readonly() || !flags.notrap(),
        _ => inst_data.opcode().can_load(),
    }
}
src/verifier/mod.rs (line 1683)
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
    fn immediate_constraints(
        &self,
        inst: Inst,
        errors: &mut VerifierErrors,
    ) -> VerifierStepResult<()> {
        let inst_data = &self.func.dfg[inst];

        match *inst_data {
            ir::InstructionData::Store { flags, .. } => {
                if flags.readonly() {
                    errors.fatal((
                        inst,
                        self.context(inst),
                        "A store instruction cannot have the `readonly` MemFlag",
                    ))
                } else {
                    Ok(())
                }
            }
            ir::InstructionData::BinaryImm8 {
                opcode: ir::instructions::Opcode::Extractlane,
                imm: lane,
                arg,
                ..
            }
            | ir::InstructionData::TernaryImm8 {
                opcode: ir::instructions::Opcode::Insertlane,
                imm: lane,
                args: [arg, _],
                ..
            } => {
                // We must be specific about the opcodes above because other instructions are using
                // the same formats.
                let ty = self.func.dfg.value_type(arg);
                if lane as u32 >= ty.lane_count() {
                    errors.fatal((
                        inst,
                        self.context(inst),
                        format!("The lane {} does not index into the type {}", lane, ty,),
                    ))
                } else {
                    Ok(())
                }
            }
            _ => Ok(()),
        }
    }
src/egraph.rs (line 193)
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
    fn build(&mut self, func: &Function) {
        // Mapping of SSA `Value` to eclass ID.
        let mut value_to_id = FxHashMap::default();

        // For each block in RPO, create an enode for block entry, for
        // each block param, and for each instruction.
        for &block in self.domtree.cfg_postorder().iter().rev() {
            let loop_level = self.loop_analysis.loop_level(block);
            let blockparam_start =
                u32::try_from(self.blockparam_ids_tys.len()).expect("Overflow in blockparam count");
            for (i, &value) in func.dfg.block_params(block).iter().enumerate() {
                let ty = func.dfg.value_type(value);
                let param = self
                    .egraph
                    .add(
                        Node::Param {
                            block,
                            index: i
                                .try_into()
                                .expect("blockparam index should fit in Node::Param"),
                            ty,
                            loop_level,
                        },
                        &mut self.node_ctx,
                    )
                    .get();
                value_to_id.insert(value, param);
                self.blockparam_ids_tys.push((param, ty));
                self.stats.node_created += 1;
                self.stats.node_param += 1;
            }
            let blockparam_end =
                u32::try_from(self.blockparam_ids_tys.len()).expect("Overflow in blockparam count");
            self.blockparams[block] = blockparam_start..blockparam_end;

            let side_effect_start =
                u32::try_from(self.side_effect_ids.len()).expect("Overflow in side-effect count");
            for inst in func.layout.block_insts(block) {
                // Build args from SSA values.
                let args = EntityList::from_iter(
                    func.dfg.inst_args(inst).iter().map(|&arg| {
                        let arg = func.dfg.resolve_aliases(arg);
                        *value_to_id
                            .get(&arg)
                            .expect("Must have seen def before this use")
                    }),
                    &mut self.node_ctx.args,
                );

                let results = func.dfg.inst_results(inst);
                let ty = if results.len() == 1 {
                    func.dfg.value_type(results[0])
                } else {
                    crate::ir::types::INVALID
                };

                let load_mem_state = self.alias_analysis.get_state_for_load(inst);
                let is_readonly_load = match func.dfg[inst] {
                    InstructionData::Load {
                        opcode: Opcode::Load,
                        flags,
                        ..
                    } => flags.readonly() && flags.notrap(),
                    _ => false,
                };

                // Create the egraph node.
                let op = InstructionImms::from(&func.dfg[inst]);
                let opcode = op.opcode();
                let srcloc = func.srclocs[inst];
                let arity = u16::try_from(results.len())
                    .expect("More than 2^16 results from an instruction");

                let node = if is_readonly_load {
                    self.stats.node_created += 1;
                    self.stats.node_pure += 1;
                    Node::Pure {
                        op,
                        args,
                        ty,
                        arity,
                    }
                } else if let Some(load_mem_state) = load_mem_state {
                    let addr = args.as_slice(&self.node_ctx.args)[0];
                    trace!("load at inst {} has mem state {:?}", inst, load_mem_state);
                    self.stats.node_created += 1;
                    self.stats.node_load += 1;
                    Node::Load {
                        op,
                        ty,
                        addr,
                        mem_state: load_mem_state,
                        srcloc,
                    }
                } else if has_side_effect(func, inst) || opcode.can_load() {
                    self.stats.node_created += 1;
                    self.stats.node_inst += 1;
                    Node::Inst {
                        op,
                        args,
                        ty,
                        arity,
                        srcloc,
                        loop_level,
                    }
                } else {
                    self.stats.node_created += 1;
                    self.stats.node_pure += 1;
                    Node::Pure {
                        op,
                        args,
                        ty,
                        arity,
                    }
                };
                let dedup_needed = self.node_ctx.needs_dedup(&node);
                let is_pure = matches!(node, Node::Pure { .. });

                let mut id = self.egraph.add(node, &mut self.node_ctx);

                if dedup_needed {
                    self.stats.node_dedup_query += 1;
                    match id {
                        NewOrExisting::New(_) => {
                            self.stats.node_dedup_miss += 1;
                        }
                        NewOrExisting::Existing(_) => {
                            self.stats.node_dedup_hit += 1;
                        }
                    }
                }

                if opcode == Opcode::Store {
                    let store_data_ty = func.dfg.value_type(func.dfg.inst_args(inst)[0]);
                    self.store_nodes.insert(inst, (store_data_ty, id.get()));
                    self.stats.store_map_insert += 1;
                }

                // Loads that did not already merge into an existing
                // load: try to forward from a store (store-to-load
                // forwarding).
                if let NewOrExisting::New(new_id) = id {
                    if load_mem_state.is_some() {
                        let opt_id = crate::opts::store_to_load(new_id, self);
                        trace!("store_to_load: {} -> {}", new_id, opt_id);
                        if opt_id != new_id {
                            id = NewOrExisting::Existing(opt_id);
                        }
                    }
                }

                // Now either optimize (for new pure nodes), or add to
                // the side-effecting list (for all other new nodes).
                let id = match id {
                    NewOrExisting::Existing(id) => id,
                    NewOrExisting::New(id) if is_pure => {
                        // Apply all optimization rules immediately; the
                        // aegraph (acyclic egraph) works best when we do
                        // this so all uses pick up the eclass with all
                        // possible enodes.
                        crate::opts::optimize_eclass(id, self)
                    }
                    NewOrExisting::New(id) => {
                        self.side_effect_ids.push(id);
                        self.stats.side_effect_nodes += 1;
                        id
                    }
                };

                // Create results and save in Value->Id map.
                match results {
                    &[] => {}
                    &[one_result] => {
                        trace!("build: value {} -> id {}", one_result, id);
                        value_to_id.insert(one_result, id);
                    }
                    many_results => {
                        debug_assert!(many_results.len() > 1);
                        for (i, &result) in many_results.iter().enumerate() {
                            let ty = func.dfg.value_type(result);
                            let projection = self
                                .egraph
                                .add(
                                    Node::Result {
                                        value: id,
                                        result: i,
                                        ty,
                                    },
                                    &mut self.node_ctx,
                                )
                                .get();
                            self.stats.node_created += 1;
                            self.stats.node_result += 1;
                            trace!("build: value {} -> id {}", result, projection);
                            value_to_id.insert(result, projection);
                        }
                    }
                }
            }

            let side_effect_end =
                u32::try_from(self.side_effect_ids.len()).expect("Overflow in side-effect count");
            let side_effect_range = side_effect_start..side_effect_end;
            self.side_effects[block] = side_effect_range;
        }
    }

Set the readonly flag.

Examples found in repository?
src/ir/memflags.rs (line 190)
189
190
191
192
    pub fn with_readonly(mut self) -> Self {
        self.set_readonly();
        self
    }
More examples
Hide additional examples
src/legalizer/globalvalue.rs (line 122)
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
fn load_addr(
    inst: ir::Inst,
    func: &mut ir::Function,
    base: ir::GlobalValue,
    offset: ir::immediates::Offset32,
    global_type: ir::Type,
    readonly: bool,
    isa: &dyn TargetIsa,
) {
    // We need to load a pointer from the `base` global value, so insert a new `global_value`
    // instruction. This depends on the iterative legalization loop. Note that the IR verifier
    // detects any cycles in the `load` globals.
    let ptr_ty = isa.pointer_type();
    let mut pos = FuncCursor::new(func).at_inst(inst);
    pos.use_srcloc(inst);

    // Get the value for the base. For tidiness, expand VMContext here so that we avoid
    // `vmctx_addr` which creates an otherwise unneeded value alias.
    let base_addr = if let ir::GlobalValueData::VMContext = pos.func.global_values[base] {
        pos.func
            .special_param(ir::ArgumentPurpose::VMContext)
            .expect("Missing vmctx parameter")
    } else {
        pos.ins().global_value(ptr_ty, base)
    };

    // Global-value loads are always notrap and aligned. They may be readonly.
    let mut mflags = ir::MemFlags::trusted();
    if readonly {
        mflags.set_readonly();
    }

    // Perform the load.
    pos.func
        .dfg
        .replace(inst)
        .load(global_type, mflags, base_addr, offset);
}

Set the readonly flag, returning new flags.

Test if the heap bit is set.

Loads and stores with this flag accesses the “heap” part of abstract state. This is disjoint from the “table”, “vmctx”, and “other” parts of abstract state. In concrete terms, this means that behavior is undefined if the same memory is also accessed by another load/store with one of the other alias-analysis bits (table, vmctx) set, or heap not set.

Examples found in repository?
src/ir/memflags.rs (line 234)
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
    pub fn set_table(&mut self) {
        assert!(!self.heap() && !self.vmctx());
        self.set(FlagBit::Table);
    }

    /// Set the `table` bit, returning new flags.
    pub fn with_table(mut self) -> Self {
        self.set_table();
        self
    }

    /// Test if the `vmctx` bit is set.
    ///
    /// Loads and stores with this flag accesses the "vmctx" part of
    /// abstract state. This is disjoint from the "heap", "table",
    /// and "other" parts of abstract state. In concrete terms, this
    /// means that behavior is undefined if the same memory is also
    /// accessed by another load/store with one of the other
    /// alias-analysis bits (`heap`, `table`) set, or `vmctx` not set.
    pub fn vmctx(self) -> bool {
        self.read(FlagBit::Vmctx)
    }

    /// Set the `vmctx` bit. See the notes about mutual exclusion with
    /// other bits in `vmctx()`.
    pub fn set_vmctx(&mut self) {
        assert!(!self.heap() && !self.table());
        self.set(FlagBit::Vmctx);
    }
More examples
Hide additional examples
src/egraph/stores.rs (line 147)
146
147
148
149
150
151
152
153
154
155
156
    fn for_flags(&mut self, memflags: MemFlags) -> &mut MemoryState {
        if memflags.heap() {
            &mut self.heap
        } else if memflags.table() {
            &mut self.table
        } else if memflags.vmctx() {
            &mut self.vmctx
        } else {
            &mut self.other
        }
    }
src/alias_analysis.rs (line 96)
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
    fn update(&mut self, func: &Function, inst: Inst) {
        let opcode = func.dfg[inst].opcode();
        if has_memory_fence_semantics(opcode) {
            self.heap = inst.into();
            self.table = inst.into();
            self.vmctx = inst.into();
            self.other = inst.into();
        } else if opcode.can_store() {
            if let Some(memflags) = func.dfg[inst].memflags() {
                if memflags.heap() {
                    self.heap = inst.into();
                } else if memflags.table() {
                    self.table = inst.into();
                } else if memflags.vmctx() {
                    self.vmctx = inst.into();
                } else {
                    self.other = inst.into();
                }
            } else {
                self.heap = inst.into();
                self.table = inst.into();
                self.vmctx = inst.into();
                self.other = inst.into();
            }
        }
    }

    fn get_last_store(&self, func: &Function, inst: Inst) -> PackedOption<Inst> {
        if let Some(memflags) = func.dfg[inst].memflags() {
            if memflags.heap() {
                self.heap
            } else if memflags.table() {
                self.table
            } else if memflags.vmctx() {
                self.vmctx
            } else {
                self.other
            }
        } else if func.dfg[inst].opcode().can_load() || func.dfg[inst].opcode().can_store() {
            inst.into()
        } else {
            PackedOption::default()
        }
    }

Set the heap bit. See the notes about mutual exclusion with other bits in heap().

Examples found in repository?
src/ir/memflags.rs (line 215)
214
215
216
217
    pub fn with_heap(mut self) -> Self {
        self.set_heap();
        self
    }

Set the heap bit, returning new flags.

Test if the table bit is set.

Loads and stores with this flag accesses the “table” part of abstract state. This is disjoint from the “heap”, “vmctx”, and “other” parts of abstract state. In concrete terms, this means that behavior is undefined if the same memory is also accessed by another load/store with one of the other alias-analysis bits (heap, vmctx) set, or table not set.

Examples found in repository?
src/ir/memflags.rs (line 209)
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
    pub fn set_heap(&mut self) {
        assert!(!self.table() && !self.vmctx());
        self.set(FlagBit::Heap);
    }

    /// Set the `heap` bit, returning new flags.
    pub fn with_heap(mut self) -> Self {
        self.set_heap();
        self
    }

    /// Test if the `table` bit is set.
    ///
    /// Loads and stores with this flag accesses the "table" part of
    /// abstract state. This is disjoint from the "heap", "vmctx",
    /// and "other" parts of abstract state. In concrete terms, this
    /// means that behavior is undefined if the same memory is also
    /// accessed by another load/store with one of the other
    /// alias-analysis bits (`heap`, `vmctx`) set, or `table` not set.
    pub fn table(self) -> bool {
        self.read(FlagBit::Table)
    }

    /// Set the `table` bit. See the notes about mutual exclusion with
    /// other bits in `table()`.
    pub fn set_table(&mut self) {
        assert!(!self.heap() && !self.vmctx());
        self.set(FlagBit::Table);
    }

    /// Set the `table` bit, returning new flags.
    pub fn with_table(mut self) -> Self {
        self.set_table();
        self
    }

    /// Test if the `vmctx` bit is set.
    ///
    /// Loads and stores with this flag accesses the "vmctx" part of
    /// abstract state. This is disjoint from the "heap", "table",
    /// and "other" parts of abstract state. In concrete terms, this
    /// means that behavior is undefined if the same memory is also
    /// accessed by another load/store with one of the other
    /// alias-analysis bits (`heap`, `table`) set, or `vmctx` not set.
    pub fn vmctx(self) -> bool {
        self.read(FlagBit::Vmctx)
    }

    /// Set the `vmctx` bit. See the notes about mutual exclusion with
    /// other bits in `vmctx()`.
    pub fn set_vmctx(&mut self) {
        assert!(!self.heap() && !self.table());
        self.set(FlagBit::Vmctx);
    }
More examples
Hide additional examples
src/egraph/stores.rs (line 149)
146
147
148
149
150
151
152
153
154
155
156
    fn for_flags(&mut self, memflags: MemFlags) -> &mut MemoryState {
        if memflags.heap() {
            &mut self.heap
        } else if memflags.table() {
            &mut self.table
        } else if memflags.vmctx() {
            &mut self.vmctx
        } else {
            &mut self.other
        }
    }
src/alias_analysis.rs (line 98)
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
    fn update(&mut self, func: &Function, inst: Inst) {
        let opcode = func.dfg[inst].opcode();
        if has_memory_fence_semantics(opcode) {
            self.heap = inst.into();
            self.table = inst.into();
            self.vmctx = inst.into();
            self.other = inst.into();
        } else if opcode.can_store() {
            if let Some(memflags) = func.dfg[inst].memflags() {
                if memflags.heap() {
                    self.heap = inst.into();
                } else if memflags.table() {
                    self.table = inst.into();
                } else if memflags.vmctx() {
                    self.vmctx = inst.into();
                } else {
                    self.other = inst.into();
                }
            } else {
                self.heap = inst.into();
                self.table = inst.into();
                self.vmctx = inst.into();
                self.other = inst.into();
            }
        }
    }

    fn get_last_store(&self, func: &Function, inst: Inst) -> PackedOption<Inst> {
        if let Some(memflags) = func.dfg[inst].memflags() {
            if memflags.heap() {
                self.heap
            } else if memflags.table() {
                self.table
            } else if memflags.vmctx() {
                self.vmctx
            } else {
                self.other
            }
        } else if func.dfg[inst].opcode().can_load() || func.dfg[inst].opcode().can_store() {
            inst.into()
        } else {
            PackedOption::default()
        }
    }

Set the table bit. See the notes about mutual exclusion with other bits in table().

Examples found in repository?
src/ir/memflags.rs (line 240)
239
240
241
242
    pub fn with_table(mut self) -> Self {
        self.set_table();
        self
    }

Set the table bit, returning new flags.

Test if the vmctx bit is set.

Loads and stores with this flag accesses the “vmctx” part of abstract state. This is disjoint from the “heap”, “table”, and “other” parts of abstract state. In concrete terms, this means that behavior is undefined if the same memory is also accessed by another load/store with one of the other alias-analysis bits (heap, table) set, or vmctx not set.

Examples found in repository?
src/ir/memflags.rs (line 209)
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
    pub fn set_heap(&mut self) {
        assert!(!self.table() && !self.vmctx());
        self.set(FlagBit::Heap);
    }

    /// Set the `heap` bit, returning new flags.
    pub fn with_heap(mut self) -> Self {
        self.set_heap();
        self
    }

    /// Test if the `table` bit is set.
    ///
    /// Loads and stores with this flag accesses the "table" part of
    /// abstract state. This is disjoint from the "heap", "vmctx",
    /// and "other" parts of abstract state. In concrete terms, this
    /// means that behavior is undefined if the same memory is also
    /// accessed by another load/store with one of the other
    /// alias-analysis bits (`heap`, `vmctx`) set, or `table` not set.
    pub fn table(self) -> bool {
        self.read(FlagBit::Table)
    }

    /// Set the `table` bit. See the notes about mutual exclusion with
    /// other bits in `table()`.
    pub fn set_table(&mut self) {
        assert!(!self.heap() && !self.vmctx());
        self.set(FlagBit::Table);
    }
More examples
Hide additional examples
src/egraph/stores.rs (line 151)
146
147
148
149
150
151
152
153
154
155
156
    fn for_flags(&mut self, memflags: MemFlags) -> &mut MemoryState {
        if memflags.heap() {
            &mut self.heap
        } else if memflags.table() {
            &mut self.table
        } else if memflags.vmctx() {
            &mut self.vmctx
        } else {
            &mut self.other
        }
    }
src/alias_analysis.rs (line 100)
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
    fn update(&mut self, func: &Function, inst: Inst) {
        let opcode = func.dfg[inst].opcode();
        if has_memory_fence_semantics(opcode) {
            self.heap = inst.into();
            self.table = inst.into();
            self.vmctx = inst.into();
            self.other = inst.into();
        } else if opcode.can_store() {
            if let Some(memflags) = func.dfg[inst].memflags() {
                if memflags.heap() {
                    self.heap = inst.into();
                } else if memflags.table() {
                    self.table = inst.into();
                } else if memflags.vmctx() {
                    self.vmctx = inst.into();
                } else {
                    self.other = inst.into();
                }
            } else {
                self.heap = inst.into();
                self.table = inst.into();
                self.vmctx = inst.into();
                self.other = inst.into();
            }
        }
    }

    fn get_last_store(&self, func: &Function, inst: Inst) -> PackedOption<Inst> {
        if let Some(memflags) = func.dfg[inst].memflags() {
            if memflags.heap() {
                self.heap
            } else if memflags.table() {
                self.table
            } else if memflags.vmctx() {
                self.vmctx
            } else {
                self.other
            }
        } else if func.dfg[inst].opcode().can_load() || func.dfg[inst].opcode().can_store() {
            inst.into()
        } else {
            PackedOption::default()
        }
    }

Set the vmctx bit. See the notes about mutual exclusion with other bits in vmctx().

Examples found in repository?
src/ir/memflags.rs (line 265)
264
265
266
267
    pub fn with_vmctx(mut self) -> Self {
        self.set_vmctx();
        self
    }

Set the vmctx bit, returning new flags.

Trait Implementations§

Returns a copy of the value. Read more
Performs copy-assignment from source. Read more
Formats the value using the given formatter. Read more
Formats the value using the given formatter. Read more
Feeds this value into the given Hasher. Read more
Feeds a slice of this type into the given Hasher. Read more
This method tests for self and other values to be equal, and is used by ==.
This method tests for !=. The default implementation is almost always sufficient, and should not be overridden without very good reason.

Auto Trait Implementations§

Blanket Implementations§

Gets the TypeId of self. Read more
Immutably borrows from an owned value. Read more
Mutably borrows from an owned value. Read more
Compare self to key and return true if they are equal.

Returns the argument unchanged.

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

The resulting type after obtaining ownership.
Creates owned data from borrowed data, usually by cloning. Read more
Uses borrowed data to replace owned data, usually by cloning. Read more
Converts the given value to a String. Read more
The type returned in the event of a conversion error.
Performs the conversion.
The type returned in the event of a conversion error.
Performs the conversion.