pub struct ControlFlowGraph { /* private fields */ }
Expand description

The Control Flow Graph maintains a mapping of blocks to their predecessors and successors where predecessors are basic blocks and successors are basic blocks.

Implementations§

Allocate a new blank control flow graph.

Examples found in repository?
src/flowgraph.rs (line 102)
101
102
103
104
105
    pub fn with_function(func: &Function) -> Self {
        let mut cfg = Self::new();
        cfg.compute(func);
        cfg
    }
More examples
Hide additional examples
src/context.rs (line 76)
73
74
75
76
77
78
79
80
81
82
    pub fn for_function(func: Function) -> Self {
        Self {
            func,
            cfg: ControlFlowGraph::new(),
            domtree: DominatorTree::new(),
            loop_analysis: LoopAnalysis::new(),
            compiled_code: None,
            want_disasm: false,
        }
    }

Clear all data structures in this control flow graph.

Examples found in repository?
src/context.rs (line 87)
85
86
87
88
89
90
91
92
    pub fn clear(&mut self) {
        self.func.clear();
        self.cfg.clear();
        self.domtree.clear();
        self.loop_analysis.clear();
        self.compiled_code = None;
        self.want_disasm = false;
    }
More examples
Hide additional examples
src/flowgraph.rs (line 112)
110
111
112
113
114
115
116
117
118
119
120
    pub fn compute(&mut self, func: &Function) {
        let _tt = timing::flowgraph();
        self.clear();
        self.data.resize(func.dfg.num_blocks());

        for block in &func.layout {
            self.compute_block(func, block);
        }

        self.valid = true;
    }

Allocate and compute the control flow graph for func.

Examples found in repository?
src/cfg_printer.rs (line 23)
20
21
22
23
24
25
    pub fn new(func: &'a Function) -> Self {
        Self {
            func,
            cfg: ControlFlowGraph::with_function(func),
        }
    }
More examples
Hide additional examples
src/verifier/mod.rs (line 306)
305
306
307
308
309
310
311
312
313
314
    pub fn new(func: &'a Function, fisa: FlagsOrIsa<'a>) -> Self {
        let expected_cfg = ControlFlowGraph::with_function(func);
        let expected_domtree = DominatorTree::with_function(func, &expected_cfg);
        Self {
            func,
            expected_cfg,
            expected_domtree,
            isa: fisa.isa,
        }
    }

Compute the control flow graph of func.

This will clear and overwrite any information already stored in this data structure.

Examples found in repository?
src/context.rs (line 316)
315
316
317
    pub fn compute_cfg(&mut self) {
        self.cfg.compute(&self.func)
    }
More examples
Hide additional examples
src/flowgraph.rs (line 103)
101
102
103
104
105
    pub fn with_function(func: &Function) -> Self {
        let mut cfg = Self::new();
        cfg.compute(func);
        cfg
    }
src/licm.rs (line 58)
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
pub fn do_licm(
    func: &mut Function,
    cfg: &mut ControlFlowGraph,
    domtree: &mut DominatorTree,
    loop_analysis: &mut LoopAnalysis,
) {
    let _tt = timing::licm();
    debug_assert!(cfg.is_valid());
    debug_assert!(domtree.is_valid());
    debug_assert!(loop_analysis.is_valid());

    for lp in loop_analysis.loops() {
        // For each loop that we want to optimize we determine the set of loop-invariant
        // instructions
        let invariant_insts = remove_loop_invariant_instructions(lp, func, cfg, loop_analysis);
        // Then we create the loop's pre-header and fill it with the invariant instructions
        // Then we remove the invariant instructions from the loop body
        if !invariant_insts.is_empty() {
            // If the loop has a natural pre-header we use it, otherwise we create it.
            let mut pos;
            match has_pre_header(&func.layout, cfg, domtree, loop_analysis.loop_header(lp)) {
                None => {
                    let pre_header =
                        create_pre_header(loop_analysis.loop_header(lp), func, cfg, domtree);
                    pos = FuncCursor::new(func).at_last_inst(pre_header);
                }
                // If there is a natural pre-header we insert new instructions just before the
                // related jumping instruction (which is not necessarily at the end).
                Some((_, last_inst)) => {
                    pos = FuncCursor::new(func).at_inst(last_inst);
                }
            };
            // The last instruction of the pre-header is the termination instruction (usually
            // a jump) so we need to insert just before this.
            for inst in invariant_insts {
                pos.insert_inst(inst);
            }
        }
    }
    // We have to recompute the domtree to account for the changes
    cfg.compute(func);
    domtree.compute(func, cfg);
}

Recompute the control flow graph of block.

This is for use after modifying instructions within a specific block. It recomputes all edges from block while leaving edges to block intact. Its functionality a subset of that of the more expensive compute, and should be used when we know we don’t need to recompute the CFG from scratch, but rather that our changes have been restricted to specific blocks.

Examples found in repository?
src/unreachable_code.rs (line 40)
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
pub fn eliminate_unreachable_code(
    func: &mut ir::Function,
    cfg: &mut ControlFlowGraph,
    domtree: &DominatorTree,
) {
    let _tt = timing::unreachable_code();
    let mut pos = FuncCursor::new(func);
    while let Some(block) = pos.next_block() {
        if domtree.is_reachable(block) {
            continue;
        }

        trace!("Eliminating unreachable {}", block);
        // Move the cursor out of the way and make sure the next lop iteration goes to the right
        // block.
        pos.prev_block();

        // Remove all instructions from `block`.
        while let Some(inst) = pos.func.layout.first_inst(block) {
            trace!(" - {}", pos.func.dfg.display_inst(inst));
            pos.func.layout.remove_inst(inst);
        }

        // Once the block is completely empty, we can update the CFG which removes it from any
        // predecessor lists.
        cfg.recompute_block(pos.func, block);

        // Finally, remove the block from the layout.
        pos.func.layout.remove_block(block);
    }

    // Remove all jumptable block-list contents that refer to unreachable
    // blocks; the jumptable itself must have been unused (or used only in an
    // unreachable block) if so. Note that we are not necessarily removing *all*
    // unused jumptables, because that would require computing their
    // reachability as well; we are just removing enough to clean up references
    // to deleted blocks.
    for jt_data in func.jump_tables.values_mut() {
        let invalid_ref = jt_data.iter().any(|block| !domtree.is_reachable(*block));
        if invalid_ref {
            jt_data.clear();
        }
    }
}
More examples
Hide additional examples
src/legalizer/mod.rs (line 340)
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
fn expand_cond_trap(
    inst: ir::Inst,
    func: &mut ir::Function,
    cfg: &mut ControlFlowGraph,
    opcode: ir::Opcode,
    arg: ir::Value,
    code: ir::TrapCode,
) {
    trace!(
        "expanding conditional trap: {:?}: {}",
        inst,
        func.dfg.display_inst(inst)
    );

    // Parse the instruction.
    let trapz = match opcode {
        ir::Opcode::Trapz => true,
        ir::Opcode::Trapnz | ir::Opcode::ResumableTrapnz => false,
        _ => panic!("Expected cond trap: {}", func.dfg.display_inst(inst)),
    };

    // Split the block after `inst`:
    //
    //     trapnz arg
    //     ..
    //
    // Becomes:
    //
    //     brz arg, new_block_resume
    //     jump new_block_trap
    //
    //   new_block_trap:
    //     trap
    //
    //   new_block_resume:
    //     ..
    let old_block = func.layout.pp_block(inst);
    let new_block_trap = func.dfg.make_block();
    let new_block_resume = func.dfg.make_block();

    // Trapping is a rare event, mark the trapping block as cold.
    func.layout.set_cold(new_block_trap);

    // Replace trap instruction by the inverted condition.
    if trapz {
        func.dfg.replace(inst).brnz(arg, new_block_resume, &[]);
    } else {
        func.dfg.replace(inst).brz(arg, new_block_resume, &[]);
    }

    // Add jump instruction after the inverted branch.
    let mut pos = FuncCursor::new(func).after_inst(inst);
    pos.use_srcloc(inst);
    pos.ins().jump(new_block_trap, &[]);

    // Insert the new label and the unconditional trap terminator.
    pos.insert_block(new_block_trap);

    match opcode {
        ir::Opcode::Trapz | ir::Opcode::Trapnz => {
            pos.ins().trap(code);
        }
        ir::Opcode::ResumableTrapnz => {
            pos.ins().resumable_trap(code);
            pos.ins().jump(new_block_resume, &[]);
        }
        _ => unreachable!(),
    }

    // Insert the new label and resume the execution when the trap fails.
    pos.insert_block(new_block_resume);

    // Finally update the CFG.
    cfg.recompute_block(pos.func, old_block);
    cfg.recompute_block(pos.func, new_block_resume);
    cfg.recompute_block(pos.func, new_block_trap);
}
src/simple_preopt.rs (line 573)
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
fn branch_order(pos: &mut FuncCursor, cfg: &mut ControlFlowGraph, block: Block, inst: Inst) {
    let (term_inst, term_inst_args, term_dest, cond_inst, cond_inst_args, cond_dest, kind) =
        match pos.func.dfg[inst] {
            InstructionData::Jump {
                opcode: Opcode::Jump,
                destination,
                ref args,
            } => {
                let next_block = if let Some(next_block) = pos.func.layout.next_block(block) {
                    next_block
                } else {
                    return;
                };

                if destination == next_block {
                    return;
                }

                let prev_inst = if let Some(prev_inst) = pos.func.layout.prev_inst(inst) {
                    prev_inst
                } else {
                    return;
                };

                let prev_inst_data = &pos.func.dfg[prev_inst];

                if let Some(prev_dest) = prev_inst_data.branch_destination() {
                    if prev_dest != next_block {
                        return;
                    }
                } else {
                    return;
                }

                match prev_inst_data {
                    InstructionData::Branch {
                        opcode,
                        args: ref prev_args,
                        destination: cond_dest,
                    } => {
                        let cond_arg = {
                            let args = pos.func.dfg.inst_args(prev_inst);
                            args[0]
                        };

                        let kind = match opcode {
                            Opcode::Brz => BranchOrderKind::BrzToBrnz(cond_arg),
                            Opcode::Brnz => BranchOrderKind::BrnzToBrz(cond_arg),
                            _ => panic!("unexpected opcode"),
                        };

                        (
                            inst,
                            args.clone(),
                            destination,
                            prev_inst,
                            prev_args.clone(),
                            *cond_dest,
                            kind,
                        )
                    }
                    _ => return,
                }
            }

            _ => return,
        };

    let cond_args = cond_inst_args.as_slice(&pos.func.dfg.value_lists).to_vec();
    let term_args = term_inst_args.as_slice(&pos.func.dfg.value_lists).to_vec();

    match kind {
        BranchOrderKind::BrnzToBrz(cond_arg) => {
            pos.func
                .dfg
                .replace(term_inst)
                .jump(cond_dest, &cond_args[1..]);
            pos.func
                .dfg
                .replace(cond_inst)
                .brz(cond_arg, term_dest, &term_args);
        }
        BranchOrderKind::BrzToBrnz(cond_arg) => {
            pos.func
                .dfg
                .replace(term_inst)
                .jump(cond_dest, &cond_args[1..]);
            pos.func
                .dfg
                .replace(cond_inst)
                .brnz(cond_arg, term_dest, &term_args);
        }
    }

    cfg.recompute_block(pos.func, block);
}
src/legalizer/heap.rs (line 281)
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
fn bounds_check_and_compute_addr(
    pos: &mut FuncCursor,
    cfg: &mut ControlFlowGraph,
    isa: &dyn TargetIsa,
    heap: ir::Heap,
    // Dynamic operand indexing into the heap.
    index: ir::Value,
    // Static immediate added to the index.
    offset: u32,
    // Static size of the heap access.
    access_size: u8,
) -> ir::Value {
    let pointer_type = isa.pointer_type();
    let spectre = isa.flags().enable_heap_access_spectre_mitigation();
    let offset_and_size = offset_plus_size(offset, access_size);

    let ir::HeapData {
        base: _,
        min_size,
        offset_guard_size: guard_size,
        style,
        index_type,
    } = pos.func.heaps[heap].clone();

    let index = cast_index_to_pointer_ty(index, index_type, pointer_type, pos);

    // We need to emit code that will trap (or compute an address that will trap
    // when accessed) if
    //
    //     index + offset + access_size > bound
    //
    // or if the `index + offset + access_size` addition overflows.
    //
    // Note that we ultimately want a 64-bit integer (we only target 64-bit
    // architectures at the moment) and that `offset` is a `u32` and
    // `access_size` is a `u8`. This means that we can add the latter together
    // as `u64`s without fear of overflow, and we only have to be concerned with
    // whether adding in `index` will overflow.
    //
    // Finally, the following right-hand sides of the matches do have a little
    // bit of duplicated code across them, but I think writing it this way is
    // worth it for readability and seeing very clearly each of our cases for
    // different bounds checks and optimizations of those bounds checks. It is
    // intentionally written in a straightforward case-matching style that will
    // hopefully make it easy to port to ISLE one day.
    match style {
        // ====== Dynamic Memories ======
        //
        // 1. First special case for when `offset + access_size == 1`:
        //
        //            index + 1 > bound
        //        ==> index >= bound
        //
        //    1.a. When Spectre mitigations are enabled, avoid duplicating
        //         bounds checks between the mitigations and the regular bounds
        //         checks.
        ir::HeapStyle::Dynamic { bound_gv } if offset_and_size == 1 && spectre => {
            let bound = pos.ins().global_value(pointer_type, bound_gv);
            compute_addr(
                isa,
                pos,
                heap,
                pointer_type,
                index,
                offset,
                Some(SpectreOobComparison {
                    cc: IntCC::UnsignedGreaterThanOrEqual,
                    lhs: index,
                    rhs: bound,
                }),
            )
        }
        //    1.b. Emit explicit `index >= bound` bounds checks.
        ir::HeapStyle::Dynamic { bound_gv } if offset_and_size == 1 => {
            let bound = pos.ins().global_value(pointer_type, bound_gv);
            let oob = pos
                .ins()
                .icmp(IntCC::UnsignedGreaterThanOrEqual, index, bound);
            pos.ins().trapnz(oob, ir::TrapCode::HeapOutOfBounds);
            compute_addr(isa, pos, heap, pointer_type, index, offset, None)
        }

        // 2. Second special case for when `offset + access_size <= min_size`.
        //
        //    We know that `bound >= min_size`, so we can do the following
        //    comparison, without fear of the right-hand side wrapping around:
        //
        //            index + offset + access_size > bound
        //        ==> index > bound - (offset + access_size)
        //
        //    2.a. Dedupe bounds checks with Spectre mitigations.
        ir::HeapStyle::Dynamic { bound_gv } if offset_and_size <= min_size.into() && spectre => {
            let bound = pos.ins().global_value(pointer_type, bound_gv);
            let adjusted_bound = pos.ins().iadd_imm(bound, -(offset_and_size as i64));
            compute_addr(
                isa,
                pos,
                heap,
                pointer_type,
                index,
                offset,
                Some(SpectreOobComparison {
                    cc: IntCC::UnsignedGreaterThan,
                    lhs: index,
                    rhs: adjusted_bound,
                }),
            )
        }
        //    2.b. Emit explicit `index > bound - (offset + access_size)` bounds
        //         checks.
        ir::HeapStyle::Dynamic { bound_gv } if offset_and_size <= min_size.into() => {
            let bound = pos.ins().global_value(pointer_type, bound_gv);
            let adjusted_bound = pos.ins().iadd_imm(bound, -(offset_and_size as i64));
            let oob = pos
                .ins()
                .icmp(IntCC::UnsignedGreaterThan, index, adjusted_bound);
            pos.ins().trapnz(oob, ir::TrapCode::HeapOutOfBounds);
            compute_addr(isa, pos, heap, pointer_type, index, offset, None)
        }

        // 3. General case for dynamic memories:
        //
        //        index + offset + access_size > bound
        //
        //    And we have to handle the overflow case in the left-hand side.
        //
        //    3.a. Dedupe bounds checks with Spectre mitigations.
        ir::HeapStyle::Dynamic { bound_gv } if spectre => {
            let access_size_val = pos.ins().iconst(pointer_type, offset_and_size as i64);
            let adjusted_index =
                pos.ins()
                    .uadd_overflow_trap(index, access_size_val, ir::TrapCode::HeapOutOfBounds);
            let bound = pos.ins().global_value(pointer_type, bound_gv);
            compute_addr(
                isa,
                pos,
                heap,
                pointer_type,
                index,
                offset,
                Some(SpectreOobComparison {
                    cc: IntCC::UnsignedGreaterThan,
                    lhs: adjusted_index,
                    rhs: bound,
                }),
            )
        }
        //    3.b. Emit an explicit `index + offset + access_size > bound`
        //         check.
        ir::HeapStyle::Dynamic { bound_gv } => {
            let access_size_val = pos.ins().iconst(pointer_type, offset_and_size as i64);
            let adjusted_index =
                pos.ins()
                    .uadd_overflow_trap(index, access_size_val, ir::TrapCode::HeapOutOfBounds);
            let bound = pos.ins().global_value(pointer_type, bound_gv);
            let oob = pos
                .ins()
                .icmp(IntCC::UnsignedGreaterThan, adjusted_index, bound);
            pos.ins().trapnz(oob, ir::TrapCode::HeapOutOfBounds);
            compute_addr(isa, pos, heap, pointer_type, index, offset, None)
        }

        // ====== Static Memories ======
        //
        // With static memories we know the size of the heap bound at compile
        // time.
        //
        // 1. First special case: trap immediately if `offset + access_size >
        //    bound`, since we will end up being out-of-bounds regardless of the
        //    given `index`.
        ir::HeapStyle::Static { bound } if offset_and_size > bound.into() => {
            pos.ins().trap(ir::TrapCode::HeapOutOfBounds);

            // Split the block, as the trap is a terminator instruction.
            let curr_block = pos.current_block().expect("Cursor is not in a block");
            let new_block = pos.func.dfg.make_block();
            pos.insert_block(new_block);
            cfg.recompute_block(pos.func, curr_block);
            cfg.recompute_block(pos.func, new_block);

            let null = pos.ins().iconst(pointer_type, 0);
            return null;
        }

        // 2. Second special case for when we can completely omit explicit
        //    bounds checks for 32-bit static memories.
        //
        //    First, let's rewrite our comparison to move all of the constants
        //    to one side:
        //
        //            index + offset + access_size > bound
        //        ==> index > bound - (offset + access_size)
        //
        //    We know the subtraction on the right-hand side won't wrap because
        //    we didn't hit the first special case.
        //
        //    Additionally, we add our guard pages (if any) to the right-hand
        //    side, since we can rely on the virtual memory subsystem at runtime
        //    to catch out-of-bound accesses within the range `bound .. bound +
        //    guard_size`. So now we are dealing with
        //
        //        index > bound + guard_size - (offset + access_size)
        //
        //    Note that `bound + guard_size` cannot overflow for
        //    correctly-configured heaps, as otherwise the heap wouldn't fit in
        //    a 64-bit memory space.
        //
        //    The complement of our should-this-trap comparison expression is
        //    the should-this-not-trap comparison expression:
        //
        //        index <= bound + guard_size - (offset + access_size)
        //
        //    If we know the right-hand side is greater than or equal to
        //    `u32::MAX`, then
        //
        //        index <= u32::MAX <= bound + guard_size - (offset + access_size)
        //
        //    This expression is always true when the heap is indexed with
        //    32-bit integers because `index` cannot be larger than
        //    `u32::MAX`. This means that `index` is always either in bounds or
        //    within the guard page region, neither of which require emitting an
        //    explicit bounds check.
        ir::HeapStyle::Static { bound }
            if index_type == ir::types::I32
                && u64::from(u32::MAX)
                    <= u64::from(bound) + u64::from(guard_size) - offset_and_size =>
        {
            compute_addr(isa, pos, heap, pointer_type, index, offset, None)
        }

        // 3. General case for static memories.
        //
        //    We have to explicitly test whether
        //
        //        index > bound - (offset + access_size)
        //
        //    and trap if so.
        //
        //    Since we have to emit explicit bounds checks, we might as well be
        //    precise, not rely on the virtual memory subsystem at all, and not
        //    factor in the guard pages here.
        //
        //    3.a. Dedupe the Spectre mitigation and the explicit bounds check.
        ir::HeapStyle::Static { bound } if spectre => {
            // NB: this subtraction cannot wrap because we didn't hit the first
            // special case.
            let adjusted_bound = u64::from(bound) - offset_and_size;
            let adjusted_bound = pos.ins().iconst(pointer_type, adjusted_bound as i64);
            compute_addr(
                isa,
                pos,
                heap,
                pointer_type,
                index,
                offset,
                Some(SpectreOobComparison {
                    cc: IntCC::UnsignedGreaterThan,
                    lhs: index,
                    rhs: adjusted_bound,
                }),
            )
        }
        //    3.b. Emit the explicit `index > bound - (offset + access_size)`
        //         check.
        ir::HeapStyle::Static { bound } => {
            // See comment in 3.a. above.
            let adjusted_bound = u64::from(bound) - offset_and_size;
            let oob = pos
                .ins()
                .icmp_imm(IntCC::UnsignedGreaterThan, index, adjusted_bound as i64);
            pos.ins().trapnz(oob, ir::TrapCode::HeapOutOfBounds);
            compute_addr(isa, pos, heap, pointer_type, index, offset, None)
        }
    }
}

Get an iterator over the CFG predecessors to block.

Examples found in repository?
src/cfg_printer.rs (line 70)
65
66
67
68
69
70
71
72
73
74
75
76
    fn cfg_connections(&self, w: &mut dyn Write) -> Result {
        for block in &self.func.layout {
            for BlockPredecessor {
                block: parent,
                inst,
            } in self.cfg.pred_iter(block)
            {
                writeln!(w, "    {}:{} -> {}", parent, inst, block)?;
            }
        }
        Ok(())
    }
More examples
Hide additional examples
src/dominator_tree.rs (line 437)
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
    fn compute_idom(&self, block: Block, cfg: &ControlFlowGraph, layout: &Layout) -> Inst {
        // Get an iterator with just the reachable, already visited predecessors to `block`.
        // Note that during the first pass, `rpo_number` is 1 for reachable blocks that haven't
        // been visited yet, 0 for unreachable blocks.
        let mut reachable_preds = cfg
            .pred_iter(block)
            .filter(|&BlockPredecessor { block: pred, .. }| self.nodes[pred].rpo_number > 1);

        // The RPO must visit at least one predecessor before this node.
        let mut idom = reachable_preds
            .next()
            .expect("block node must have one reachable predecessor");

        for pred in reachable_preds {
            idom = self.common_dominator(idom, pred, layout);
        }

        idom.inst
    }
src/loop_analysis.rs (line 205)
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
    fn find_loop_headers(
        &mut self,
        cfg: &ControlFlowGraph,
        domtree: &DominatorTree,
        layout: &Layout,
    ) {
        // We traverse the CFG in reverse postorder
        for &block in domtree.cfg_postorder().iter().rev() {
            for BlockPredecessor {
                inst: pred_inst, ..
            } in cfg.pred_iter(block)
            {
                // If the block dominates one of its predecessors it is a back edge
                if domtree.dominates(block, pred_inst, layout) {
                    // This block is a loop header, so we create its associated loop
                    let lp = self.loops.push(LoopData::new(block, None));
                    self.block_loop_map[block] = lp.into();
                    break;
                    // We break because we only need one back edge to identify a loop header.
                }
            }
        }
    }

    // Intended to be called after `find_loop_headers`. For each detected loop header,
    // discovers all the block belonging to the loop and its inner loops. After a call to this
    // function, the loop tree is fully constructed.
    fn discover_loop_blocks(
        &mut self,
        cfg: &ControlFlowGraph,
        domtree: &DominatorTree,
        layout: &Layout,
    ) {
        let mut stack: Vec<Block> = Vec::new();
        // We handle each loop header in reverse order, corresponding to a pseudo postorder
        // traversal of the graph.
        for lp in self.loops().rev() {
            for BlockPredecessor {
                block: pred,
                inst: pred_inst,
            } in cfg.pred_iter(self.loops[lp].header)
            {
                // We follow the back edges
                if domtree.dominates(self.loops[lp].header, pred_inst, layout) {
                    stack.push(pred);
                }
            }
            while let Some(node) = stack.pop() {
                let continue_dfs: Option<Block>;
                match self.block_loop_map[node].expand() {
                    None => {
                        // The node hasn't been visited yet, we tag it as part of the loop
                        self.block_loop_map[node] = PackedOption::from(lp);
                        continue_dfs = Some(node);
                    }
                    Some(node_loop) => {
                        // We copy the node_loop into a mutable reference passed along the while
                        let mut node_loop = node_loop;
                        // The node is part of a loop, which can be lp or an inner loop
                        let mut node_loop_parent_option = self.loops[node_loop].parent;
                        while let Some(node_loop_parent) = node_loop_parent_option.expand() {
                            if node_loop_parent == lp {
                                // We have encountered lp so we stop (already visited)
                                break;
                            } else {
                                //
                                node_loop = node_loop_parent;
                                // We lookup the parent loop
                                node_loop_parent_option = self.loops[node_loop].parent;
                            }
                        }
                        // Now node_loop_parent is either:
                        // - None and node_loop is an new inner loop of lp
                        // - Some(...) and the initial node_loop was a known inner loop of lp
                        match node_loop_parent_option.expand() {
                            Some(_) => continue_dfs = None,
                            None => {
                                if node_loop != lp {
                                    self.loops[node_loop].parent = lp.into();
                                    continue_dfs = Some(self.loops[node_loop].header)
                                } else {
                                    // If lp is a one-block loop then we make sure we stop
                                    continue_dfs = None
                                }
                            }
                        }
                    }
                }
                // Now we have handled the popped node and need to continue the DFS by adding the
                // predecessors of that node
                if let Some(continue_dfs) = continue_dfs {
                    for BlockPredecessor { block: pred, .. } in cfg.pred_iter(continue_dfs) {
                        stack.push(pred)
                    }
                }
            }
        }
    }
src/licm.rs (line 84)
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
fn create_pre_header(
    header: Block,
    func: &mut Function,
    cfg: &mut ControlFlowGraph,
    domtree: &DominatorTree,
) -> Block {
    let pool = &mut ListPool::<Value>::new();
    let header_args_values = func.dfg.block_params(header).to_vec();
    let header_args_types: Vec<Type> = header_args_values
        .into_iter()
        .map(|val| func.dfg.value_type(val))
        .collect();
    let pre_header = func.dfg.make_block();
    let mut pre_header_args_value: EntityList<Value> = EntityList::new();
    for typ in header_args_types {
        pre_header_args_value.push(func.dfg.append_block_param(pre_header, typ), pool);
    }

    for BlockPredecessor {
        inst: last_inst, ..
    } in cfg.pred_iter(header)
    {
        // We only follow normal edges (not the back edges)
        if !domtree.dominates(header, last_inst, &func.layout) {
            func.rewrite_branch_destination(last_inst, header, pre_header);
        }
    }

    // Inserts the pre-header at the right place in the layout.
    let mut pos = FuncCursor::new(func).at_top(header);
    pos.insert_block(pre_header);
    pos.next_inst();
    pos.ins().jump(header, pre_header_args_value.as_slice(pool));

    pre_header
}

/// Detects if a loop header has a natural pre-header.
///
/// A loop header has a pre-header if there is only one predecessor that the header doesn't
/// dominate.
/// Returns the pre-header Block and the instruction jumping to the header.
fn has_pre_header(
    layout: &Layout,
    cfg: &ControlFlowGraph,
    domtree: &DominatorTree,
    header: Block,
) -> Option<(Block, Inst)> {
    let mut result = None;
    for BlockPredecessor {
        block: pred_block,
        inst: branch_inst,
    } in cfg.pred_iter(header)
    {
        // We only count normal edges (not the back edges)
        if !domtree.dominates(header, branch_inst, layout) {
            if result.is_some() {
                // We have already found one, there are more than one
                return None;
            }
            if branch_inst != layout.last_inst(pred_block).unwrap()
                || cfg.succ_iter(pred_block).nth(1).is_some()
            {
                // It's along a critical edge, so don't use it.
                return None;
            }
            result = Some((pred_block, branch_inst));
        }
    }
    result
}
src/verifier/flags.rs (line 61)
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
    fn check(&mut self, errors: &mut VerifierErrors) -> VerifierStepResult<()> {
        // List of blocks that need to be processed. blocks may be re-added to this list when we detect
        // that one of their successor blocks needs a live-in flags value.
        let mut worklist = EntitySet::with_capacity(self.func.layout.block_capacity());
        for block in self.func.layout.blocks() {
            worklist.insert(block);
        }

        while let Some(block) = worklist.pop() {
            if let Some(value) = self.visit_block(block, errors)? {
                // The block has live-in flags. Check if the value changed.
                match self.livein[block].expand() {
                    // Revisit any predecessor blocks the first time we see a live-in for `block`.
                    None => {
                        self.livein[block] = value.into();
                        for BlockPredecessor { block: pred, .. } in self.cfg.pred_iter(block) {
                            worklist.insert(pred);
                        }
                    }
                    Some(old) if old != value => {
                        return errors.fatal((
                            block,
                            format!("conflicting live-in CPU flags: {} and {}", old, value),
                        ));
                    }
                    x => assert_eq!(x, Some(value)),
                }
            } else {
                // Existing live-in flags should never be able to disappear.
                assert_eq!(self.livein[block].expand(), None);
            }
        }

        Ok(())
    }
src/verifier/mod.rs (line 1637)
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
    fn cfg_integrity(
        &self,
        cfg: &ControlFlowGraph,
        errors: &mut VerifierErrors,
    ) -> VerifierStepResult<()> {
        let mut expected_succs = BTreeSet::<Block>::new();
        let mut got_succs = BTreeSet::<Block>::new();
        let mut expected_preds = BTreeSet::<Inst>::new();
        let mut got_preds = BTreeSet::<Inst>::new();

        for block in self.func.layout.blocks() {
            expected_succs.extend(self.expected_cfg.succ_iter(block));
            got_succs.extend(cfg.succ_iter(block));

            let missing_succs: Vec<Block> =
                expected_succs.difference(&got_succs).cloned().collect();
            if !missing_succs.is_empty() {
                errors.report((
                    block,
                    format!("cfg lacked the following successor(s) {:?}", missing_succs),
                ));
                continue;
            }

            let excess_succs: Vec<Block> = got_succs.difference(&expected_succs).cloned().collect();
            if !excess_succs.is_empty() {
                errors.report((
                    block,
                    format!("cfg had unexpected successor(s) {:?}", excess_succs),
                ));
                continue;
            }

            expected_preds.extend(
                self.expected_cfg
                    .pred_iter(block)
                    .map(|BlockPredecessor { inst, .. }| inst),
            );
            got_preds.extend(
                cfg.pred_iter(block)
                    .map(|BlockPredecessor { inst, .. }| inst),
            );

            let missing_preds: Vec<Inst> = expected_preds.difference(&got_preds).cloned().collect();
            if !missing_preds.is_empty() {
                errors.report((
                    block,
                    format!(
                        "cfg lacked the following predecessor(s) {:?}",
                        missing_preds
                    ),
                ));
                continue;
            }

            let excess_preds: Vec<Inst> = got_preds.difference(&expected_preds).cloned().collect();
            if !excess_preds.is_empty() {
                errors.report((
                    block,
                    format!("cfg had unexpected predecessor(s) {:?}", excess_preds),
                ));
                continue;
            }

            expected_succs.clear();
            got_succs.clear();
            expected_preds.clear();
            got_preds.clear();
        }
        errors.as_result()
    }

Get an iterator over the CFG successors to block.

Examples found in repository?
src/licm.rs (line 125)
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
fn has_pre_header(
    layout: &Layout,
    cfg: &ControlFlowGraph,
    domtree: &DominatorTree,
    header: Block,
) -> Option<(Block, Inst)> {
    let mut result = None;
    for BlockPredecessor {
        block: pred_block,
        inst: branch_inst,
    } in cfg.pred_iter(header)
    {
        // We only count normal edges (not the back edges)
        if !domtree.dominates(header, branch_inst, layout) {
            if result.is_some() {
                // We have already found one, there are more than one
                return None;
            }
            if branch_inst != layout.last_inst(pred_block).unwrap()
                || cfg.succ_iter(pred_block).nth(1).is_some()
            {
                // It's along a critical edge, so don't use it.
                return None;
            }
            result = Some((pred_block, branch_inst));
        }
    }
    result
}

/// Test whether the given opcode is unsafe to even consider for LICM.
fn trivially_unsafe_for_licm(opcode: Opcode) -> bool {
    opcode.can_store()
        || opcode.is_call()
        || opcode.is_branch()
        || opcode.is_terminator()
        || opcode.is_return()
        || opcode.can_trap()
        || opcode.other_side_effects()
        || opcode.writes_cpu_flags()
}

fn is_unsafe_load(inst_data: &InstructionData) -> bool {
    match *inst_data {
        InstructionData::Load { flags, .. } => !flags.readonly() || !flags.notrap(),
        _ => inst_data.opcode().can_load(),
    }
}

/// Test whether the given instruction is loop-invariant.
fn is_loop_invariant(inst: Inst, dfg: &DataFlowGraph, loop_values: &FxHashSet<Value>) -> bool {
    if trivially_unsafe_for_licm(dfg[inst].opcode()) {
        return false;
    }

    if is_unsafe_load(&dfg[inst]) {
        return false;
    }

    let inst_args = dfg.inst_args(inst);
    for arg in inst_args {
        let arg = dfg.resolve_aliases(*arg);
        if loop_values.contains(&arg) {
            return false;
        }
    }
    true
}

/// Traverses a loop in reverse post-order from a header block and identify loop-invariant
/// instructions. These loop-invariant instructions are then removed from the code and returned
/// (in reverse post-order) for later use.
fn remove_loop_invariant_instructions(
    lp: Loop,
    func: &mut Function,
    cfg: &ControlFlowGraph,
    loop_analysis: &LoopAnalysis,
) -> Vec<Inst> {
    let mut loop_values: FxHashSet<Value> = FxHashSet();
    let mut invariant_insts: Vec<Inst> = Vec::new();
    let mut pos = FuncCursor::new(func);
    // We traverse the loop block in reverse post-order.
    for block in postorder_blocks_loop(loop_analysis, cfg, lp).iter().rev() {
        // Arguments of the block are loop values
        for val in pos.func.dfg.block_params(*block) {
            loop_values.insert(*val);
        }
        pos.goto_top(*block);
        #[cfg_attr(feature = "cargo-clippy", allow(clippy::block_in_if_condition_stmt))]
        while let Some(inst) = pos.next_inst() {
            if is_loop_invariant(inst, &pos.func.dfg, &loop_values) {
                // If all the instruction's argument are defined outside the loop
                // then this instruction is loop-invariant
                invariant_insts.push(inst);
                // We remove it from the loop
                pos.remove_inst_and_step_back();
            } else {
                // If the instruction is not loop-invariant we push its results in the set of
                // loop values
                for out in pos.func.dfg.inst_results(inst) {
                    loop_values.insert(*out);
                }
            }
        }
    }
    invariant_insts
}

/// Return blocks from a loop in post-order, starting from an entry point in the block.
fn postorder_blocks_loop(
    loop_analysis: &LoopAnalysis,
    cfg: &ControlFlowGraph,
    lp: Loop,
) -> Vec<Block> {
    let mut grey = FxHashSet();
    let mut black = FxHashSet();
    let mut stack = vec![loop_analysis.loop_header(lp)];
    let mut postorder = Vec::new();

    while !stack.is_empty() {
        let node = stack.pop().unwrap();
        if !grey.contains(&node) {
            // This is a white node. Mark it as gray.
            grey.insert(node);
            stack.push(node);
            // Get any children we've never seen before.
            for child in cfg.succ_iter(node) {
                if loop_analysis.is_in_loop(child, lp) && !grey.contains(&child) {
                    stack.push(child);
                }
            }
        } else if !black.contains(&node) {
            postorder.push(node);
            black.insert(node);
        }
    }
    postorder
}
More examples
Hide additional examples
src/egraph/stores.rs (line 216)
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
    fn compute_block_input_states(
        func: &Function,
        cfg: &ControlFlowGraph,
    ) -> SecondaryMap<Block, Option<LastStores>> {
        let mut block_input = SecondaryMap::with_capacity(func.dfg.num_blocks());
        let mut worklist: SmallVec<[Block; 16]> = smallvec![];
        let mut worklist_set = FxHashSet::default();
        let entry = func.layout.entry_block().unwrap();
        worklist.push(entry);
        worklist_set.insert(entry);
        block_input[entry] = Some(LastStores::default());

        while let Some(block) = worklist.pop() {
            worklist_set.remove(&block);
            let state = block_input[block].clone().unwrap();

            trace!("alias analysis: input to {} is {:?}", block, state);

            let state = func
                .layout
                .block_insts(block)
                .fold(state, |mut state, inst| {
                    state.update(func, inst);
                    trace!("after {}: state is {:?}", inst, state);
                    state
                });

            for succ in cfg.succ_iter(block) {
                let succ_first_inst = func.layout.first_inst(succ).unwrap();
                let succ_state = &mut block_input[succ];
                let old = succ_state.clone();
                if let Some(succ_state) = succ_state.as_mut() {
                    succ_state.meet_from(&state, succ_first_inst);
                } else {
                    *succ_state = Some(state);
                };
                let updated = *succ_state != old;

                if updated && worklist_set.insert(succ) {
                    worklist.push(succ);
                }
            }
        }

        block_input
    }
src/verifier/mod.rs (line 1613)
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
    fn cfg_integrity(
        &self,
        cfg: &ControlFlowGraph,
        errors: &mut VerifierErrors,
    ) -> VerifierStepResult<()> {
        let mut expected_succs = BTreeSet::<Block>::new();
        let mut got_succs = BTreeSet::<Block>::new();
        let mut expected_preds = BTreeSet::<Inst>::new();
        let mut got_preds = BTreeSet::<Inst>::new();

        for block in self.func.layout.blocks() {
            expected_succs.extend(self.expected_cfg.succ_iter(block));
            got_succs.extend(cfg.succ_iter(block));

            let missing_succs: Vec<Block> =
                expected_succs.difference(&got_succs).cloned().collect();
            if !missing_succs.is_empty() {
                errors.report((
                    block,
                    format!("cfg lacked the following successor(s) {:?}", missing_succs),
                ));
                continue;
            }

            let excess_succs: Vec<Block> = got_succs.difference(&expected_succs).cloned().collect();
            if !excess_succs.is_empty() {
                errors.report((
                    block,
                    format!("cfg had unexpected successor(s) {:?}", excess_succs),
                ));
                continue;
            }

            expected_preds.extend(
                self.expected_cfg
                    .pred_iter(block)
                    .map(|BlockPredecessor { inst, .. }| inst),
            );
            got_preds.extend(
                cfg.pred_iter(block)
                    .map(|BlockPredecessor { inst, .. }| inst),
            );

            let missing_preds: Vec<Inst> = expected_preds.difference(&got_preds).cloned().collect();
            if !missing_preds.is_empty() {
                errors.report((
                    block,
                    format!(
                        "cfg lacked the following predecessor(s) {:?}",
                        missing_preds
                    ),
                ));
                continue;
            }

            let excess_preds: Vec<Inst> = got_preds.difference(&expected_preds).cloned().collect();
            if !excess_preds.is_empty() {
                errors.report((
                    block,
                    format!("cfg had unexpected predecessor(s) {:?}", excess_preds),
                ));
                continue;
            }

            expected_succs.clear();
            got_succs.clear();
            expected_preds.clear();
            got_preds.clear();
        }
        errors.as_result()
    }

Check if the CFG is in a valid state.

Note that this doesn’t perform any kind of validity checks. It simply checks if the compute() method has been called since the last clear(). It does not check that the CFG is consistent with the function.

Examples found in repository?
src/flowgraph.rs (line 161)
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
    pub fn recompute_block(&mut self, func: &Function, block: Block) {
        debug_assert!(self.is_valid());
        self.invalidate_block_successors(block);
        self.compute_block(func, block);
    }

    fn add_edge(&mut self, from: Block, from_inst: Inst, to: Block) {
        self.data[from]
            .successors
            .insert(to, &mut self.succ_forest, &());
        self.data[to]
            .predecessors
            .insert(from_inst, from, &mut self.pred_forest, &());
    }

    /// Get an iterator over the CFG predecessors to `block`.
    pub fn pred_iter(&self, block: Block) -> PredIter {
        PredIter(self.data[block].predecessors.iter(&self.pred_forest))
    }

    /// Get an iterator over the CFG successors to `block`.
    pub fn succ_iter(&self, block: Block) -> SuccIter {
        debug_assert!(self.is_valid());
        self.data[block].successors.iter(&self.succ_forest)
    }
More examples
Hide additional examples
src/dominator_tree.rs (line 245)
243
244
245
246
247
248
249
    pub fn compute(&mut self, func: &Function, cfg: &ControlFlowGraph) {
        let _tt = timing::domtree();
        debug_assert!(cfg.is_valid());
        self.compute_postorder(func);
        self.compute_domtree(func, cfg);
        self.valid = true;
    }
src/verifier/mod.rs (line 288)
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
pub fn verify_context<'a, FOI: Into<FlagsOrIsa<'a>>>(
    func: &Function,
    cfg: &ControlFlowGraph,
    domtree: &DominatorTree,
    fisa: FOI,
    errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
    let _tt = timing::verifier();
    let verifier = Verifier::new(func, fisa.into());
    if cfg.is_valid() {
        verifier.cfg_integrity(cfg, errors)?;
    }
    if domtree.is_valid() {
        verifier.domtree_integrity(domtree, errors)?;
    }
    verifier.run(errors)
}
src/licm.rs (line 25)
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
pub fn do_licm(
    func: &mut Function,
    cfg: &mut ControlFlowGraph,
    domtree: &mut DominatorTree,
    loop_analysis: &mut LoopAnalysis,
) {
    let _tt = timing::licm();
    debug_assert!(cfg.is_valid());
    debug_assert!(domtree.is_valid());
    debug_assert!(loop_analysis.is_valid());

    for lp in loop_analysis.loops() {
        // For each loop that we want to optimize we determine the set of loop-invariant
        // instructions
        let invariant_insts = remove_loop_invariant_instructions(lp, func, cfg, loop_analysis);
        // Then we create the loop's pre-header and fill it with the invariant instructions
        // Then we remove the invariant instructions from the loop body
        if !invariant_insts.is_empty() {
            // If the loop has a natural pre-header we use it, otherwise we create it.
            let mut pos;
            match has_pre_header(&func.layout, cfg, domtree, loop_analysis.loop_header(lp)) {
                None => {
                    let pre_header =
                        create_pre_header(loop_analysis.loop_header(lp), func, cfg, domtree);
                    pos = FuncCursor::new(func).at_last_inst(pre_header);
                }
                // If there is a natural pre-header we insert new instructions just before the
                // related jumping instruction (which is not necessarily at the end).
                Some((_, last_inst)) => {
                    pos = FuncCursor::new(func).at_inst(last_inst);
                }
            };
            // The last instruction of the pre-header is the termination instruction (usually
            // a jump) so we need to insert just before this.
            for inst in invariant_insts {
                pos.insert_inst(inst);
            }
        }
    }
    // We have to recompute the domtree to account for the changes
    cfg.compute(func);
    domtree.compute(func, cfg);
}

Auto Trait Implementations§

Blanket Implementations§

Gets the TypeId of self. Read more
Immutably borrows from an owned value. Read more
Mutably borrows from an owned value. Read more

Returns the argument unchanged.

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

The type returned in the event of a conversion error.
Performs the conversion.
The type returned in the event of a conversion error.
Performs the conversion.