Struct cranelift_codegen::ir::layout::Layout
source · pub struct Layout { /* private fields */ }
Expand description
The Layout
struct determines the layout of blocks and instructions in a function. It does not
contain definitions of instructions or blocks, but depends on Inst
and Block
entity references
being defined elsewhere.
This data structure determines:
- The order of blocks in the function.
- Which block contains a given instruction.
- The order of instructions with a block.
While data dependencies are not recorded, instruction ordering does affect control dependencies, so part of the semantics of the program are determined by the layout.
Implementations§
source§impl Layout
impl Layout
sourcepub fn new() -> Self
pub fn new() -> Self
Create a new empty Layout
.
Examples found in repository?
441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
pub fn with_name_signature(name: UserFuncName, sig: Signature) -> Self {
Self {
name,
stencil: FunctionStencil {
version_marker: VersionMarker,
signature: sig,
sized_stack_slots: StackSlots::new(),
dynamic_stack_slots: DynamicStackSlots::new(),
global_values: PrimaryMap::new(),
heaps: PrimaryMap::new(),
tables: PrimaryMap::new(),
jump_tables: PrimaryMap::new(),
dfg: DataFlowGraph::new(),
layout: Layout::new(),
srclocs: SecondaryMap::new(),
stack_limit: None,
},
params: FunctionParameters::new(),
}
}
sourcepub fn clear(&mut self)
pub fn clear(&mut self)
Clear the layout.
Examples found in repository?
203 204 205 206 207 208 209 210 211 212 213 214 215
fn clear(&mut self) {
self.signature.clear(CallConv::Fast);
self.sized_stack_slots.clear();
self.dynamic_stack_slots.clear();
self.global_values.clear();
self.heaps.clear();
self.tables.clear();
self.jump_tables.clear();
self.dfg.clear();
self.layout.clear();
self.srclocs.clear();
self.stack_limit = None;
}
sourcepub fn block_capacity(&self) -> usize
pub fn block_capacity(&self) -> usize
Returns the capacity of the BlockData
map.
Examples found in repository?
More examples
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
fn check(&mut self, errors: &mut VerifierErrors) -> VerifierStepResult<()> {
// List of blocks that need to be processed. blocks may be re-added to this list when we detect
// that one of their successor blocks needs a live-in flags value.
let mut worklist = EntitySet::with_capacity(self.func.layout.block_capacity());
for block in self.func.layout.blocks() {
worklist.insert(block);
}
while let Some(block) = worklist.pop() {
if let Some(value) = self.visit_block(block, errors)? {
// The block has live-in flags. Check if the value changed.
match self.livein[block].expand() {
// Revisit any predecessor blocks the first time we see a live-in for `block`.
None => {
self.livein[block] = value.into();
for BlockPredecessor { block: pred, .. } in self.cfg.pred_iter(block) {
worklist.insert(pred);
}
}
Some(old) if old != value => {
return errors.fatal((
block,
format!("conflicting live-in CPU flags: {} and {}", old, value),
));
}
x => assert_eq!(x, Some(value)),
}
} else {
// Existing live-in flags should never be able to disappear.
assert_eq!(self.livein[block].expand(), None);
}
}
Ok(())
}
source§impl Layout
impl Layout
Methods for laying out blocks.
An unknown block starts out as not inserted in the block layout. The layout is a linear order of inserted blocks. Once a block has been inserted in the layout, instructions can be added. A block can only be removed from the layout when it is empty.
Since every block must end with a terminator instruction which cannot fall through, the layout of blocks do not affect the semantics of the program.
sourcepub fn is_block_inserted(&self, block: Block) -> bool
pub fn is_block_inserted(&self, block: Block) -> bool
Is block
currently part of the layout?
Examples found in repository?
More examples
782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046
fn verify_block(
&self,
loc: impl Into<AnyEntity>,
e: Block,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
if !self.func.dfg.block_is_valid(e) || !self.func.layout.is_block_inserted(e) {
return errors.fatal((loc, format!("invalid block reference {}", e)));
}
if let Some(entry_block) = self.func.layout.entry_block() {
if e == entry_block {
return errors.fatal((loc, format!("invalid reference to entry block {}", e)));
}
}
Ok(())
}
fn verify_sig_ref(
&self,
inst: Inst,
s: SigRef,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
if !self.func.dfg.signatures.is_valid(s) {
errors.fatal((
inst,
self.context(inst),
format!("invalid signature reference {}", s),
))
} else {
Ok(())
}
}
fn verify_func_ref(
&self,
inst: Inst,
f: FuncRef,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
if !self.func.dfg.ext_funcs.is_valid(f) {
errors.nonfatal((
inst,
self.context(inst),
format!("invalid function reference {}", f),
))
} else {
Ok(())
}
}
fn verify_stack_slot(
&self,
inst: Inst,
ss: StackSlot,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
if !self.func.sized_stack_slots.is_valid(ss) {
errors.nonfatal((
inst,
self.context(inst),
format!("invalid stack slot {}", ss),
))
} else {
Ok(())
}
}
fn verify_dynamic_stack_slot(
&self,
inst: Inst,
ss: DynamicStackSlot,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
if !self.func.dynamic_stack_slots.is_valid(ss) {
errors.nonfatal((
inst,
self.context(inst),
format!("invalid dynamic stack slot {}", ss),
))
} else {
Ok(())
}
}
fn verify_global_value(
&self,
inst: Inst,
gv: GlobalValue,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
if !self.func.global_values.is_valid(gv) {
errors.nonfatal((
inst,
self.context(inst),
format!("invalid global value {}", gv),
))
} else {
Ok(())
}
}
fn verify_heap(
&self,
inst: Inst,
heap: ir::Heap,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
if !self.func.heaps.is_valid(heap) {
errors.nonfatal((inst, self.context(inst), format!("invalid heap {}", heap)))
} else {
Ok(())
}
}
fn verify_table(
&self,
inst: Inst,
table: ir::Table,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
if !self.func.tables.is_valid(table) {
errors.nonfatal((inst, self.context(inst), format!("invalid table {}", table)))
} else {
Ok(())
}
}
fn verify_value_list(
&self,
inst: Inst,
l: &ValueList,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
if !l.is_valid(&self.func.dfg.value_lists) {
errors.nonfatal((
inst,
self.context(inst),
format!("invalid value list reference {:?}", l),
))
} else {
Ok(())
}
}
fn verify_jump_table(
&self,
inst: Inst,
j: JumpTable,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
if !self.func.jump_tables.is_valid(j) {
errors.nonfatal((
inst,
self.context(inst),
format!("invalid jump table reference {}", j),
))
} else {
Ok(())
}
}
fn verify_value(
&self,
loc_inst: Inst,
v: Value,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
let dfg = &self.func.dfg;
if !dfg.value_is_valid(v) {
errors.nonfatal((
loc_inst,
self.context(loc_inst),
format!("invalid value reference {}", v),
))
} else {
Ok(())
}
}
fn verify_inst_arg(
&self,
loc_inst: Inst,
v: Value,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
self.verify_value(loc_inst, v, errors)?;
let dfg = &self.func.dfg;
let loc_block = self.func.layout.pp_block(loc_inst);
let is_reachable = self.expected_domtree.is_reachable(loc_block);
// SSA form
match dfg.value_def(v) {
ValueDef::Result(def_inst, _) => {
// Value is defined by an instruction that exists.
if !dfg.inst_is_valid(def_inst) {
return errors.fatal((
loc_inst,
self.context(loc_inst),
format!("{} is defined by invalid instruction {}", v, def_inst),
));
}
// Defining instruction is inserted in a block.
if self.func.layout.inst_block(def_inst) == None {
return errors.fatal((
loc_inst,
self.context(loc_inst),
format!("{} is defined by {} which has no block", v, def_inst),
));
}
// Defining instruction dominates the instruction that uses the value.
if is_reachable {
if !self
.expected_domtree
.dominates(def_inst, loc_inst, &self.func.layout)
{
return errors.fatal((
loc_inst,
self.context(loc_inst),
format!("uses value {} from non-dominating {}", v, def_inst),
));
}
if def_inst == loc_inst {
return errors.fatal((
loc_inst,
self.context(loc_inst),
format!("uses value {} from itself", v),
));
}
}
}
ValueDef::Param(block, _) => {
// Value is defined by an existing block.
if !dfg.block_is_valid(block) {
return errors.fatal((
loc_inst,
self.context(loc_inst),
format!("{} is defined by invalid block {}", v, block),
));
}
// Defining block is inserted in the layout
if !self.func.layout.is_block_inserted(block) {
return errors.fatal((
loc_inst,
self.context(loc_inst),
format!("{} is defined by {} which is not in the layout", v, block),
));
}
// The defining block dominates the instruction using this value.
if is_reachable
&& !self
.expected_domtree
.dominates(block, loc_inst, &self.func.layout)
{
return errors.fatal((
loc_inst,
self.context(loc_inst),
format!("uses value arg from non-dominating {}", block),
));
}
}
}
Ok(())
}
171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760
fn assign_block_seq(&mut self, block: Block) {
debug_assert!(self.is_block_inserted(block));
// Get the sequence number immediately before `block`, or 0.
let prev_seq = self.blocks[block]
.prev
.map(|prev_block| self.last_block_seq(prev_block))
.unwrap_or(0);
// Get the sequence number immediately following `block`.
let next_seq = if let Some(inst) = self.blocks[block].first_inst.expand() {
self.insts[inst].seq
} else if let Some(next_block) = self.blocks[block].next.expand() {
self.blocks[next_block].seq
} else {
// There is nothing after `block`. We can just use a major stride.
self.blocks[block].seq = prev_seq + MAJOR_STRIDE;
return;
};
// Check if there is room between these sequence numbers.
if let Some(seq) = midpoint(prev_seq, next_seq) {
self.blocks[block].seq = seq;
} else {
// No available integers between `prev_seq` and `next_seq`. We have to renumber.
self.renumber_from_block(block, prev_seq + MINOR_STRIDE, prev_seq + LOCAL_LIMIT);
}
}
/// Assign a valid sequence number to `inst` such that the numbers are still monotonic. This may
/// require renumbering.
fn assign_inst_seq(&mut self, inst: Inst) {
let block = self
.inst_block(inst)
.expect("inst must be inserted before assigning an seq");
// Get the sequence number immediately before `inst`.
let prev_seq = match self.insts[inst].prev.expand() {
Some(prev_inst) => self.insts[prev_inst].seq,
None => self.blocks[block].seq,
};
// Get the sequence number immediately following `inst`.
let next_seq = if let Some(next_inst) = self.insts[inst].next.expand() {
self.insts[next_inst].seq
} else if let Some(next_block) = self.blocks[block].next.expand() {
self.blocks[next_block].seq
} else {
// There is nothing after `inst`. We can just use a major stride.
self.insts[inst].seq = prev_seq + MAJOR_STRIDE;
return;
};
// Check if there is room between these sequence numbers.
if let Some(seq) = midpoint(prev_seq, next_seq) {
self.insts[inst].seq = seq;
} else {
// No available integers between `prev_seq` and `next_seq`. We have to renumber.
self.renumber_from_inst(inst, prev_seq + MINOR_STRIDE, prev_seq + LOCAL_LIMIT);
}
}
/// Renumber instructions starting from `inst` until the end of the block or until numbers catch
/// up.
///
/// Return `None` if renumbering has caught up and the sequence is monotonic again. Otherwise
/// return the last used sequence number.
///
/// If sequence numbers exceed `limit`, switch to a full function renumbering and return `None`.
fn renumber_insts(
&mut self,
inst: Inst,
seq: SequenceNumber,
limit: SequenceNumber,
) -> Option<SequenceNumber> {
let mut inst = inst;
let mut seq = seq;
loop {
self.insts[inst].seq = seq;
// Next instruction.
inst = match self.insts[inst].next.expand() {
None => return Some(seq),
Some(next) => next,
};
if seq < self.insts[inst].seq {
// Sequence caught up.
return None;
}
if seq > limit {
// We're pushing too many instructions in front of us.
// Switch to a full function renumbering to make some space.
self.full_renumber();
return None;
}
seq += MINOR_STRIDE;
}
}
/// Renumber starting from `block` to `seq` and continuing until the sequence numbers are
/// monotonic again.
fn renumber_from_block(
&mut self,
block: Block,
first_seq: SequenceNumber,
limit: SequenceNumber,
) {
let mut block = block;
let mut seq = first_seq;
loop {
self.blocks[block].seq = seq;
// Renumber instructions in `block`. Stop when the numbers catch up.
if let Some(inst) = self.blocks[block].first_inst.expand() {
seq = match self.renumber_insts(inst, seq + MINOR_STRIDE, limit) {
Some(s) => s,
None => return,
}
}
// Advance to the next block.
block = match self.blocks[block].next.expand() {
Some(next) => next,
None => return,
};
// Stop renumbering once the numbers catch up.
if seq < self.blocks[block].seq {
return;
}
seq += MINOR_STRIDE;
}
}
/// Renumber starting from `inst` to `seq` and continuing until the sequence numbers are
/// monotonic again.
fn renumber_from_inst(&mut self, inst: Inst, first_seq: SequenceNumber, limit: SequenceNumber) {
if let Some(seq) = self.renumber_insts(inst, first_seq, limit) {
// Renumbering spills over into next block.
if let Some(next_block) = self.blocks[self.inst_block(inst).unwrap()].next.expand() {
self.renumber_from_block(next_block, seq + MINOR_STRIDE, limit);
}
}
}
/// Renumber all blocks and instructions in the layout.
///
/// This doesn't affect the position of anything, but it gives more room in the internal
/// sequence numbers for inserting instructions later.
pub(crate) fn full_renumber(&mut self) {
let _tt = timing::layout_renumber();
let mut seq = 0;
let mut next_block = self.first_block;
while let Some(block) = next_block {
self.blocks[block].seq = seq;
seq += MAJOR_STRIDE;
next_block = self.blocks[block].next.expand();
let mut next_inst = self.blocks[block].first_inst.expand();
while let Some(inst) = next_inst {
self.insts[inst].seq = seq;
seq += MAJOR_STRIDE;
next_inst = self.insts[inst].next.expand();
}
}
trace!("Renumbered {} program points", seq / MAJOR_STRIDE);
}
}
/// Methods for laying out blocks.
///
/// An unknown block starts out as *not inserted* in the block layout. The layout is a linear order of
/// inserted blocks. Once a block has been inserted in the layout, instructions can be added. A block
/// can only be removed from the layout when it is empty.
///
/// Since every block must end with a terminator instruction which cannot fall through, the layout of
/// blocks do not affect the semantics of the program.
///
impl Layout {
/// Is `block` currently part of the layout?
pub fn is_block_inserted(&self, block: Block) -> bool {
Some(block) == self.first_block || self.blocks[block].prev.is_some()
}
/// Insert `block` as the last block in the layout.
pub fn append_block(&mut self, block: Block) {
debug_assert!(
!self.is_block_inserted(block),
"Cannot append block that is already in the layout"
);
{
let node = &mut self.blocks[block];
debug_assert!(node.first_inst.is_none() && node.last_inst.is_none());
node.prev = self.last_block.into();
node.next = None.into();
}
if let Some(last) = self.last_block {
self.blocks[last].next = block.into();
} else {
self.first_block = Some(block);
}
self.last_block = Some(block);
self.assign_block_seq(block);
}
/// Insert `block` in the layout before the existing block `before`.
pub fn insert_block(&mut self, block: Block, before: Block) {
debug_assert!(
!self.is_block_inserted(block),
"Cannot insert block that is already in the layout"
);
debug_assert!(
self.is_block_inserted(before),
"block Insertion point not in the layout"
);
let after = self.blocks[before].prev;
{
let node = &mut self.blocks[block];
node.next = before.into();
node.prev = after;
}
self.blocks[before].prev = block.into();
match after.expand() {
None => self.first_block = Some(block),
Some(a) => self.blocks[a].next = block.into(),
}
self.assign_block_seq(block);
}
/// Insert `block` in the layout *after* the existing block `after`.
pub fn insert_block_after(&mut self, block: Block, after: Block) {
debug_assert!(
!self.is_block_inserted(block),
"Cannot insert block that is already in the layout"
);
debug_assert!(
self.is_block_inserted(after),
"block Insertion point not in the layout"
);
let before = self.blocks[after].next;
{
let node = &mut self.blocks[block];
node.next = before;
node.prev = after.into();
}
self.blocks[after].next = block.into();
match before.expand() {
None => self.last_block = Some(block),
Some(b) => self.blocks[b].prev = block.into(),
}
self.assign_block_seq(block);
}
/// Remove `block` from the layout.
pub fn remove_block(&mut self, block: Block) {
debug_assert!(self.is_block_inserted(block), "block not in the layout");
debug_assert!(self.first_inst(block).is_none(), "block must be empty.");
// Clear the `block` node and extract links.
let prev;
let next;
{
let n = &mut self.blocks[block];
prev = n.prev;
next = n.next;
n.prev = None.into();
n.next = None.into();
}
// Fix up links to `block`.
match prev.expand() {
None => self.first_block = next.expand(),
Some(p) => self.blocks[p].next = next,
}
match next.expand() {
None => self.last_block = prev.expand(),
Some(n) => self.blocks[n].prev = prev,
}
}
/// Return an iterator over all blocks in layout order.
pub fn blocks(&self) -> Blocks {
Blocks {
layout: self,
next: self.first_block,
}
}
/// Get the function's entry block.
/// This is simply the first block in the layout order.
pub fn entry_block(&self) -> Option<Block> {
self.first_block
}
/// Get the last block in the layout.
pub fn last_block(&self) -> Option<Block> {
self.last_block
}
/// Get the block preceding `block` in the layout order.
pub fn prev_block(&self, block: Block) -> Option<Block> {
self.blocks[block].prev.expand()
}
/// Get the block following `block` in the layout order.
pub fn next_block(&self, block: Block) -> Option<Block> {
self.blocks[block].next.expand()
}
/// Mark a block as "cold".
///
/// This will try to move it out of the ordinary path of execution
/// when lowered to machine code.
pub fn set_cold(&mut self, block: Block) {
self.blocks[block].cold = true;
}
/// Is the given block cold?
pub fn is_cold(&self, block: Block) -> bool {
self.blocks[block].cold
}
}
/// A single node in the linked-list of blocks.
// Whenever you add new fields here, don't forget to update the custom serializer for `Layout` too.
#[derive(Clone, Debug, Default, PartialEq, Hash)]
struct BlockNode {
prev: PackedOption<Block>,
next: PackedOption<Block>,
first_inst: PackedOption<Inst>,
last_inst: PackedOption<Inst>,
seq: SequenceNumber,
cold: bool,
}
/// Iterate over blocks in layout order. See [crate::ir::layout::Layout::blocks].
pub struct Blocks<'f> {
layout: &'f Layout,
next: Option<Block>,
}
impl<'f> Iterator for Blocks<'f> {
type Item = Block;
fn next(&mut self) -> Option<Block> {
match self.next {
Some(block) => {
self.next = self.layout.next_block(block);
Some(block)
}
None => None,
}
}
}
/// Use a layout reference in a for loop.
impl<'f> IntoIterator for &'f Layout {
type Item = Block;
type IntoIter = Blocks<'f>;
fn into_iter(self) -> Blocks<'f> {
self.blocks()
}
}
/// Methods for arranging instructions.
///
/// An instruction starts out as *not inserted* in the layout. An instruction can be inserted into
/// a block at a given position.
impl Layout {
/// Get the block containing `inst`, or `None` if `inst` is not inserted in the layout.
pub fn inst_block(&self, inst: Inst) -> Option<Block> {
self.insts[inst].block.into()
}
/// Get the block containing the program point `pp`. Panic if `pp` is not in the layout.
pub fn pp_block<PP>(&self, pp: PP) -> Block
where
PP: Into<ExpandedProgramPoint>,
{
match pp.into() {
ExpandedProgramPoint::Block(block) => block,
ExpandedProgramPoint::Inst(inst) => {
self.inst_block(inst).expect("Program point not in layout")
}
}
}
/// Append `inst` to the end of `block`.
pub fn append_inst(&mut self, inst: Inst, block: Block) {
debug_assert_eq!(self.inst_block(inst), None);
debug_assert!(
self.is_block_inserted(block),
"Cannot append instructions to block not in layout"
);
{
let block_node = &mut self.blocks[block];
{
let inst_node = &mut self.insts[inst];
inst_node.block = block.into();
inst_node.prev = block_node.last_inst;
debug_assert!(inst_node.next.is_none());
}
if block_node.first_inst.is_none() {
block_node.first_inst = inst.into();
} else {
self.insts[block_node.last_inst.unwrap()].next = inst.into();
}
block_node.last_inst = inst.into();
}
self.assign_inst_seq(inst);
}
/// Fetch a block's first instruction.
pub fn first_inst(&self, block: Block) -> Option<Inst> {
self.blocks[block].first_inst.into()
}
/// Fetch a block's last instruction.
pub fn last_inst(&self, block: Block) -> Option<Inst> {
self.blocks[block].last_inst.into()
}
/// Fetch the instruction following `inst`.
pub fn next_inst(&self, inst: Inst) -> Option<Inst> {
self.insts[inst].next.expand()
}
/// Fetch the instruction preceding `inst`.
pub fn prev_inst(&self, inst: Inst) -> Option<Inst> {
self.insts[inst].prev.expand()
}
/// Fetch the first instruction in a block's terminal branch group.
pub fn canonical_branch_inst(&self, dfg: &DataFlowGraph, block: Block) -> Option<Inst> {
// Basic blocks permit at most two terminal branch instructions.
// If two, the former is conditional and the latter is unconditional.
let last = self.last_inst(block)?;
if let Some(prev) = self.prev_inst(last) {
if dfg[prev].opcode().is_branch() {
return Some(prev);
}
}
Some(last)
}
/// Insert `inst` before the instruction `before` in the same block.
pub fn insert_inst(&mut self, inst: Inst, before: Inst) {
debug_assert_eq!(self.inst_block(inst), None);
let block = self
.inst_block(before)
.expect("Instruction before insertion point not in the layout");
let after = self.insts[before].prev;
{
let inst_node = &mut self.insts[inst];
inst_node.block = block.into();
inst_node.next = before.into();
inst_node.prev = after;
}
self.insts[before].prev = inst.into();
match after.expand() {
None => self.blocks[block].first_inst = inst.into(),
Some(a) => self.insts[a].next = inst.into(),
}
self.assign_inst_seq(inst);
}
/// Remove `inst` from the layout.
pub fn remove_inst(&mut self, inst: Inst) {
let block = self.inst_block(inst).expect("Instruction already removed.");
// Clear the `inst` node and extract links.
let prev;
let next;
{
let n = &mut self.insts[inst];
prev = n.prev;
next = n.next;
n.block = None.into();
n.prev = None.into();
n.next = None.into();
}
// Fix up links to `inst`.
match prev.expand() {
None => self.blocks[block].first_inst = next,
Some(p) => self.insts[p].next = next,
}
match next.expand() {
None => self.blocks[block].last_inst = prev,
Some(n) => self.insts[n].prev = prev,
}
}
/// Iterate over the instructions in `block` in layout order.
pub fn block_insts(&self, block: Block) -> Insts {
Insts {
layout: self,
head: self.blocks[block].first_inst.into(),
tail: self.blocks[block].last_inst.into(),
}
}
/// Iterate over a limited set of instruction which are likely the branches of `block` in layout
/// order. Any instruction not visited by this iterator is not a branch, but an instruction visited by this may not be a branch.
pub fn block_likely_branches(&self, block: Block) -> Insts {
// Note: Checking whether an instruction is a branch or not while walking backward might add
// extra overhead. However, we know that the number of branches is limited to 2 at the end of
// each block, and therefore we can just iterate over the last 2 instructions.
let mut iter = self.block_insts(block);
let head = iter.head;
let tail = iter.tail;
iter.next_back();
let head = iter.next_back().or(head);
Insts {
layout: self,
head,
tail,
}
}
/// Split the block containing `before` in two.
///
/// Insert `new_block` after the old block and move `before` and the following instructions to
/// `new_block`:
///
/// ```text
/// old_block:
/// i1
/// i2
/// i3 << before
/// i4
/// ```
/// becomes:
///
/// ```text
/// old_block:
/// i1
/// i2
/// new_block:
/// i3 << before
/// i4
/// ```
pub fn split_block(&mut self, new_block: Block, before: Inst) {
let old_block = self
.inst_block(before)
.expect("The `before` instruction must be in the layout");
debug_assert!(!self.is_block_inserted(new_block));
// Insert new_block after old_block.
let next_block = self.blocks[old_block].next;
let last_inst = self.blocks[old_block].last_inst;
{
let node = &mut self.blocks[new_block];
node.prev = old_block.into();
node.next = next_block;
node.first_inst = before.into();
node.last_inst = last_inst;
}
self.blocks[old_block].next = new_block.into();
// Fix backwards link.
if Some(old_block) == self.last_block {
self.last_block = Some(new_block);
} else {
self.blocks[next_block.unwrap()].prev = new_block.into();
}
// Disconnect the instruction links.
let prev_inst = self.insts[before].prev;
self.insts[before].prev = None.into();
self.blocks[old_block].last_inst = prev_inst;
match prev_inst.expand() {
None => self.blocks[old_block].first_inst = None.into(),
Some(pi) => self.insts[pi].next = None.into(),
}
// Fix the instruction -> block pointers.
let mut opt_i = Some(before);
while let Some(i) = opt_i {
debug_assert_eq!(self.insts[i].block.expand(), Some(old_block));
self.insts[i].block = new_block.into();
opt_i = self.insts[i].next.into();
}
self.assign_block_seq(new_block);
}
sourcepub fn append_block(&mut self, block: Block)
pub fn append_block(&mut self, block: Block)
Insert block
as the last block in the layout.
Examples found in repository?
548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
fn insert_block(&mut self, new_block: ir::Block) {
use self::CursorPosition::*;
match self.position() {
At(inst) => {
self.layout_mut().split_block(new_block, inst);
// All other cases move to `After(block)`, but in this case we'll stay `At(inst)`.
return;
}
Nowhere => self.layout_mut().append_block(new_block),
Before(block) => self.layout_mut().insert_block(new_block, block),
After(block) => self.layout_mut().insert_block_after(new_block, block),
}
// For everything but `At(inst)` we end up appending to the new block.
self.set_position(After(new_block));
}
sourcepub fn insert_block(&mut self, block: Block, before: Block)
pub fn insert_block(&mut self, block: Block, before: Block)
Insert block
in the layout before the existing block before
.
Examples found in repository?
548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
fn insert_block(&mut self, new_block: ir::Block) {
use self::CursorPosition::*;
match self.position() {
At(inst) => {
self.layout_mut().split_block(new_block, inst);
// All other cases move to `After(block)`, but in this case we'll stay `At(inst)`.
return;
}
Nowhere => self.layout_mut().append_block(new_block),
Before(block) => self.layout_mut().insert_block(new_block, block),
After(block) => self.layout_mut().insert_block_after(new_block, block),
}
// For everything but `At(inst)` we end up appending to the new block.
self.set_position(After(new_block));
}
sourcepub fn insert_block_after(&mut self, block: Block, after: Block)
pub fn insert_block_after(&mut self, block: Block, after: Block)
Insert block
in the layout after the existing block after
.
Examples found in repository?
548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
fn insert_block(&mut self, new_block: ir::Block) {
use self::CursorPosition::*;
match self.position() {
At(inst) => {
self.layout_mut().split_block(new_block, inst);
// All other cases move to `After(block)`, but in this case we'll stay `At(inst)`.
return;
}
Nowhere => self.layout_mut().append_block(new_block),
Before(block) => self.layout_mut().insert_block(new_block, block),
After(block) => self.layout_mut().insert_block_after(new_block, block),
}
// For everything but `At(inst)` we end up appending to the new block.
self.set_position(After(new_block));
}
sourcepub fn remove_block(&mut self, block: Block)
pub fn remove_block(&mut self, block: Block)
Remove block
from the layout.
Examples found in repository?
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
pub fn eliminate_unreachable_code(
func: &mut ir::Function,
cfg: &mut ControlFlowGraph,
domtree: &DominatorTree,
) {
let _tt = timing::unreachable_code();
let mut pos = FuncCursor::new(func);
while let Some(block) = pos.next_block() {
if domtree.is_reachable(block) {
continue;
}
trace!("Eliminating unreachable {}", block);
// Move the cursor out of the way and make sure the next lop iteration goes to the right
// block.
pos.prev_block();
// Remove all instructions from `block`.
while let Some(inst) = pos.func.layout.first_inst(block) {
trace!(" - {}", pos.func.dfg.display_inst(inst));
pos.func.layout.remove_inst(inst);
}
// Once the block is completely empty, we can update the CFG which removes it from any
// predecessor lists.
cfg.recompute_block(pos.func, block);
// Finally, remove the block from the layout.
pos.func.layout.remove_block(block);
}
// Remove all jumptable block-list contents that refer to unreachable
// blocks; the jumptable itself must have been unused (or used only in an
// unreachable block) if so. Note that we are not necessarily removing *all*
// unused jumptables, because that would require computing their
// reachability as well; we are just removing enough to clean up references
// to deleted blocks.
for jt_data in func.jump_tables.values_mut() {
let invalid_ref = jt_data.iter().any(|block| !domtree.is_reachable(*block));
if invalid_ref {
jt_data.clear();
}
}
}
sourcepub fn blocks(&self) -> Blocks<'_> ⓘ
pub fn blocks(&self) -> Blocks<'_> ⓘ
Return an iterator over all blocks in layout order.
Examples found in repository?
More examples
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
pub(crate) fn new(func: &Function, domtree: &DominatorTree) -> DomTreeWithChildren {
let mut nodes: SecondaryMap<Block, DomTreeNode> =
SecondaryMap::with_capacity(func.dfg.num_blocks());
for block in func.layout.blocks() {
let idom_inst = match domtree.idom(block) {
Some(idom_inst) => idom_inst,
None => continue,
};
let idom = func
.layout
.inst_block(idom_inst)
.expect("Dominating instruction should be part of a block");
nodes[block].next = nodes[idom].children;
nodes[idom].children = block.into();
}
let root = func.layout.entry_block().unwrap();
Self { nodes, root }
}
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
fn check(&mut self, errors: &mut VerifierErrors) -> VerifierStepResult<()> {
// List of blocks that need to be processed. blocks may be re-added to this list when we detect
// that one of their successor blocks needs a live-in flags value.
let mut worklist = EntitySet::with_capacity(self.func.layout.block_capacity());
for block in self.func.layout.blocks() {
worklist.insert(block);
}
while let Some(block) = worklist.pop() {
if let Some(value) = self.visit_block(block, errors)? {
// The block has live-in flags. Check if the value changed.
match self.livein[block].expand() {
// Revisit any predecessor blocks the first time we see a live-in for `block`.
None => {
self.livein[block] = value.into();
for BlockPredecessor { block: pred, .. } in self.cfg.pred_iter(block) {
worklist.insert(pred);
}
}
Some(old) if old != value => {
return errors.fatal((
block,
format!("conflicting live-in CPU flags: {} and {}", old, value),
));
}
x => assert_eq!(x, Some(value)),
}
} else {
// Existing live-in flags should never be able to disappear.
assert_eq!(self.livein[block].expand(), None);
}
}
Ok(())
}
236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
fn compute_load_last_stores(
func: &Function,
block_input: SecondaryMap<Block, Option<LastStores>>,
) -> FxHashMap<Inst, PackedMemoryState> {
let mut load_mem_state = FxHashMap::default();
load_mem_state.reserve(func.dfg.num_insts() / 8);
for block in func.layout.blocks() {
let mut state = block_input[block].clone().unwrap();
for inst in func.layout.block_insts(block) {
trace!(
"alias analysis: scanning at {} with state {:?} ({:?})",
inst,
state,
func.dfg[inst],
);
// N.B.: we match `Load` specifically, and not any
// other kinds of loads (or any opcode such that
// `opcode.can_load()` returns true), because some
// "can load" instructions actually have very
// different semantics (are not just a load of a
// particularly-typed value). For example, atomic
// (load/store, RMW, CAS) instructions "can load" but
// definitely should not participate in store-to-load
// forwarding or redundant-load elimination. Our goal
// here is to provide a `MemoryState` just for plain
// old loads whose semantics we can completely reason
// about.
if let InstructionData::Load {
opcode: Opcode::Load,
flags,
..
} = func.dfg[inst]
{
let mem_state = *state.for_flags(flags);
trace!(
"alias analysis: at {}: load with mem_state {:?}",
inst,
mem_state,
);
load_mem_state.insert(inst, mem_state.into());
}
state.update(func, inst);
}
}
load_mem_state
}
1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803
fn domtree_integrity(
&self,
domtree: &DominatorTree,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
// We consider two `DominatorTree`s to be equal if they return the same immediate
// dominator for each block. Therefore the current domtree is valid if it matches the freshly
// computed one.
for block in self.func.layout.blocks() {
let expected = self.expected_domtree.idom(block);
let got = domtree.idom(block);
if got != expected {
return errors.fatal((
block,
format!(
"invalid domtree, expected idom({}) = {:?}, got {:?}",
block, expected, got
),
));
}
}
// We also verify if the postorder defined by `DominatorTree` is sane
if domtree.cfg_postorder().len() != self.expected_domtree.cfg_postorder().len() {
return errors.fatal((
AnyEntity::Function,
"incorrect number of Blocks in postorder traversal",
));
}
for (index, (&test_block, &true_block)) in domtree
.cfg_postorder()
.iter()
.zip(self.expected_domtree.cfg_postorder().iter())
.enumerate()
{
if test_block != true_block {
return errors.fatal((
test_block,
format!(
"invalid domtree, postorder block number {} should be {}, got {}",
index, true_block, test_block
),
));
}
}
// We verify rpo_cmp on pairs of adjacent blocks in the postorder
for (&prev_block, &next_block) in domtree.cfg_postorder().iter().adjacent_pairs() {
if self
.expected_domtree
.rpo_cmp(prev_block, next_block, &self.func.layout)
!= Ordering::Greater
{
return errors.fatal((
next_block,
format!(
"invalid domtree, rpo_cmp does not says {} is greater than {}",
prev_block, next_block
),
));
}
}
Ok(())
}
fn typecheck_entry_block_params(&self, errors: &mut VerifierErrors) -> VerifierStepResult<()> {
if let Some(block) = self.func.layout.entry_block() {
let expected_types = &self.func.signature.params;
let block_param_count = self.func.dfg.num_block_params(block);
if block_param_count != expected_types.len() {
return errors.fatal((
block,
format!(
"entry block parameters ({}) must match function signature ({})",
block_param_count,
expected_types.len()
),
));
}
for (i, &arg) in self.func.dfg.block_params(block).iter().enumerate() {
let arg_type = self.func.dfg.value_type(arg);
if arg_type != expected_types[i].value_type {
errors.report((
block,
format!(
"entry block parameter {} expected to have type {}, got {}",
i, expected_types[i], arg_type
),
));
}
}
}
errors.as_result()
}
fn check_entry_not_cold(&self, errors: &mut VerifierErrors) -> VerifierStepResult<()> {
if let Some(entry_block) = self.func.layout.entry_block() {
if self.func.layout.is_cold(entry_block) {
return errors
.fatal((entry_block, format!("entry block cannot be marked as cold")));
}
}
errors.as_result()
}
fn typecheck(&self, inst: Inst, errors: &mut VerifierErrors) -> VerifierStepResult<()> {
let inst_data = &self.func.dfg[inst];
let constraints = inst_data.opcode().constraints();
let ctrl_type = if let Some(value_typeset) = constraints.ctrl_typeset() {
// For polymorphic opcodes, determine the controlling type variable first.
let ctrl_type = self.func.dfg.ctrl_typevar(inst);
if !value_typeset.contains(ctrl_type) {
errors.report((
inst,
self.context(inst),
format!("has an invalid controlling type {}", ctrl_type),
));
}
ctrl_type
} else {
// Non-polymorphic instructions don't check the controlling type variable, so `Option`
// is unnecessary and we can just make it `INVALID`.
types::INVALID
};
// Typechecking instructions is never fatal
let _ = self.typecheck_results(inst, ctrl_type, errors);
let _ = self.typecheck_fixed_args(inst, ctrl_type, errors);
let _ = self.typecheck_variable_args(inst, errors);
let _ = self.typecheck_return(inst, errors);
let _ = self.typecheck_special(inst, ctrl_type, errors);
Ok(())
}
fn typecheck_results(
&self,
inst: Inst,
ctrl_type: Type,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
let mut i = 0;
for &result in self.func.dfg.inst_results(inst) {
let result_type = self.func.dfg.value_type(result);
let expected_type = self.func.dfg.compute_result_type(inst, i, ctrl_type);
if let Some(expected_type) = expected_type {
if result_type != expected_type {
errors.report((
inst,
self.context(inst),
format!(
"expected result {} ({}) to have type {}, found {}",
i, result, expected_type, result_type
),
));
}
} else {
return errors.nonfatal((
inst,
self.context(inst),
"has more result values than expected",
));
}
i += 1;
}
// There aren't any more result types left.
if self.func.dfg.compute_result_type(inst, i, ctrl_type) != None {
return errors.nonfatal((
inst,
self.context(inst),
"has fewer result values than expected",
));
}
Ok(())
}
fn typecheck_fixed_args(
&self,
inst: Inst,
ctrl_type: Type,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
let constraints = self.func.dfg[inst].opcode().constraints();
for (i, &arg) in self.func.dfg.inst_fixed_args(inst).iter().enumerate() {
let arg_type = self.func.dfg.value_type(arg);
match constraints.value_argument_constraint(i, ctrl_type) {
ResolvedConstraint::Bound(expected_type) => {
if arg_type != expected_type {
errors.report((
inst,
self.context(inst),
format!(
"arg {} ({}) has type {}, expected {}",
i, arg, arg_type, expected_type
),
));
}
}
ResolvedConstraint::Free(type_set) => {
if !type_set.contains(arg_type) {
errors.report((
inst,
self.context(inst),
format!(
"arg {} ({}) with type {} failed to satisfy type set {:?}",
i, arg, arg_type, type_set
),
));
}
}
}
}
Ok(())
}
fn typecheck_variable_args(
&self,
inst: Inst,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
match self.func.dfg.analyze_branch(inst) {
BranchInfo::SingleDest(block, _) => {
let iter = self
.func
.dfg
.block_params(block)
.iter()
.map(|&v| self.func.dfg.value_type(v));
self.typecheck_variable_args_iterator(inst, iter, errors)?;
}
BranchInfo::Table(table, block) => {
if let Some(block) = block {
let arg_count = self.func.dfg.num_block_params(block);
if arg_count != 0 {
return errors.nonfatal((
inst,
self.context(inst),
format!(
"takes no arguments, but had target {} with {} arguments",
block, arg_count,
),
));
}
}
for block in self.func.jump_tables[table].iter() {
let arg_count = self.func.dfg.num_block_params(*block);
if arg_count != 0 {
return errors.nonfatal((
inst,
self.context(inst),
format!(
"takes no arguments, but had target {} with {} arguments",
block, arg_count,
),
));
}
}
}
BranchInfo::NotABranch => {}
}
match self.func.dfg[inst].analyze_call(&self.func.dfg.value_lists) {
CallInfo::Direct(func_ref, _) => {
let sig_ref = self.func.dfg.ext_funcs[func_ref].signature;
let arg_types = self.func.dfg.signatures[sig_ref]
.params
.iter()
.map(|a| a.value_type);
self.typecheck_variable_args_iterator(inst, arg_types, errors)?;
}
CallInfo::Indirect(sig_ref, _) => {
let arg_types = self.func.dfg.signatures[sig_ref]
.params
.iter()
.map(|a| a.value_type);
self.typecheck_variable_args_iterator(inst, arg_types, errors)?;
}
CallInfo::NotACall => {}
}
Ok(())
}
fn typecheck_variable_args_iterator<I: Iterator<Item = Type>>(
&self,
inst: Inst,
iter: I,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
let variable_args = self.func.dfg.inst_variable_args(inst);
let mut i = 0;
for expected_type in iter {
if i >= variable_args.len() {
// Result count mismatch handled below, we want the full argument count first though
i += 1;
continue;
}
let arg = variable_args[i];
let arg_type = self.func.dfg.value_type(arg);
if expected_type != arg_type {
errors.report((
inst,
self.context(inst),
format!(
"arg {} ({}) has type {}, expected {}",
i, variable_args[i], arg_type, expected_type
),
));
}
i += 1;
}
if i != variable_args.len() {
return errors.nonfatal((
inst,
self.context(inst),
format!(
"mismatched argument count for `{}`: got {}, expected {}",
self.func.dfg.display_inst(inst),
variable_args.len(),
i,
),
));
}
Ok(())
}
fn typecheck_return(&self, inst: Inst, errors: &mut VerifierErrors) -> VerifierStepResult<()> {
if self.func.dfg[inst].opcode().is_return() {
let args = self.func.dfg.inst_variable_args(inst);
let expected_types = &self.func.signature.returns;
if args.len() != expected_types.len() {
return errors.nonfatal((
inst,
self.context(inst),
"arguments of return must match function signature",
));
}
for (i, (&arg, &expected_type)) in args.iter().zip(expected_types).enumerate() {
let arg_type = self.func.dfg.value_type(arg);
if arg_type != expected_type.value_type {
errors.report((
inst,
self.context(inst),
format!(
"arg {} ({}) has type {}, must match function signature of {}",
i, arg, arg_type, expected_type
),
));
}
}
}
Ok(())
}
// Check special-purpose type constraints that can't be expressed in the normal opcode
// constraints.
fn typecheck_special(
&self,
inst: Inst,
ctrl_type: Type,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
match self.func.dfg[inst] {
ir::InstructionData::Unary { opcode, arg } => {
let arg_type = self.func.dfg.value_type(arg);
match opcode {
Opcode::Uextend | Opcode::Sextend | Opcode::Fpromote => {
if arg_type.lane_count() != ctrl_type.lane_count() {
return errors.nonfatal((
inst,
self.context(inst),
format!(
"input {} and output {} must have same number of lanes",
arg_type, ctrl_type,
),
));
}
if arg_type.lane_bits() >= ctrl_type.lane_bits() {
return errors.nonfatal((
inst,
self.context(inst),
format!(
"input {} must be smaller than output {}",
arg_type, ctrl_type,
),
));
}
}
Opcode::Ireduce | Opcode::Fdemote => {
if arg_type.lane_count() != ctrl_type.lane_count() {
return errors.nonfatal((
inst,
self.context(inst),
format!(
"input {} and output {} must have same number of lanes",
arg_type, ctrl_type,
),
));
}
if arg_type.lane_bits() <= ctrl_type.lane_bits() {
return errors.nonfatal((
inst,
self.context(inst),
format!(
"input {} must be larger than output {}",
arg_type, ctrl_type,
),
));
}
}
_ => {}
}
}
ir::InstructionData::HeapAddr { heap, arg, .. } => {
let index_type = self.func.dfg.value_type(arg);
let heap_index_type = self.func.heaps[heap].index_type;
if index_type != heap_index_type {
return errors.nonfatal((
inst,
self.context(inst),
format!(
"index type {} differs from heap index type {}",
index_type, heap_index_type,
),
));
}
}
ir::InstructionData::TableAddr { table, arg, .. } => {
let index_type = self.func.dfg.value_type(arg);
let table_index_type = self.func.tables[table].index_type;
if index_type != table_index_type {
return errors.nonfatal((
inst,
self.context(inst),
format!(
"index type {} differs from table index type {}",
index_type, table_index_type,
),
));
}
}
ir::InstructionData::UnaryGlobalValue { global_value, .. } => {
if let Some(isa) = self.isa {
let inst_type = self.func.dfg.value_type(self.func.dfg.first_result(inst));
let global_type = self.func.global_values[global_value].global_type(isa);
if inst_type != global_type {
return errors.nonfatal((
inst, self.context(inst),
format!(
"global_value instruction with type {} references global value with type {}",
inst_type, global_type
)),
);
}
}
}
_ => {}
}
Ok(())
}
fn cfg_integrity(
&self,
cfg: &ControlFlowGraph,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
let mut expected_succs = BTreeSet::<Block>::new();
let mut got_succs = BTreeSet::<Block>::new();
let mut expected_preds = BTreeSet::<Inst>::new();
let mut got_preds = BTreeSet::<Inst>::new();
for block in self.func.layout.blocks() {
expected_succs.extend(self.expected_cfg.succ_iter(block));
got_succs.extend(cfg.succ_iter(block));
let missing_succs: Vec<Block> =
expected_succs.difference(&got_succs).cloned().collect();
if !missing_succs.is_empty() {
errors.report((
block,
format!("cfg lacked the following successor(s) {:?}", missing_succs),
));
continue;
}
let excess_succs: Vec<Block> = got_succs.difference(&expected_succs).cloned().collect();
if !excess_succs.is_empty() {
errors.report((
block,
format!("cfg had unexpected successor(s) {:?}", excess_succs),
));
continue;
}
expected_preds.extend(
self.expected_cfg
.pred_iter(block)
.map(|BlockPredecessor { inst, .. }| inst),
);
got_preds.extend(
cfg.pred_iter(block)
.map(|BlockPredecessor { inst, .. }| inst),
);
let missing_preds: Vec<Inst> = expected_preds.difference(&got_preds).cloned().collect();
if !missing_preds.is_empty() {
errors.report((
block,
format!(
"cfg lacked the following predecessor(s) {:?}",
missing_preds
),
));
continue;
}
let excess_preds: Vec<Inst> = got_preds.difference(&expected_preds).cloned().collect();
if !excess_preds.is_empty() {
errors.report((
block,
format!("cfg had unexpected predecessor(s) {:?}", excess_preds),
));
continue;
}
expected_succs.clear();
got_succs.clear();
expected_preds.clear();
got_preds.clear();
}
errors.as_result()
}
fn immediate_constraints(
&self,
inst: Inst,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
let inst_data = &self.func.dfg[inst];
match *inst_data {
ir::InstructionData::Store { flags, .. } => {
if flags.readonly() {
errors.fatal((
inst,
self.context(inst),
"A store instruction cannot have the `readonly` MemFlag",
))
} else {
Ok(())
}
}
ir::InstructionData::BinaryImm8 {
opcode: ir::instructions::Opcode::Extractlane,
imm: lane,
arg,
..
}
| ir::InstructionData::TernaryImm8 {
opcode: ir::instructions::Opcode::Insertlane,
imm: lane,
args: [arg, _],
..
} => {
// We must be specific about the opcodes above because other instructions are using
// the same formats.
let ty = self.func.dfg.value_type(arg);
if lane as u32 >= ty.lane_count() {
errors.fatal((
inst,
self.context(inst),
format!("The lane {} does not index into the type {}", lane, ty,),
))
} else {
Ok(())
}
}
_ => Ok(()),
}
}
fn typecheck_function_signature(&self, errors: &mut VerifierErrors) -> VerifierStepResult<()> {
self.func
.signature
.params
.iter()
.enumerate()
.filter(|(_, ¶m)| param.value_type == types::INVALID)
.for_each(|(i, _)| {
errors.report((
AnyEntity::Function,
format!("Parameter at position {} has an invalid type", i),
));
});
self.func
.signature
.returns
.iter()
.enumerate()
.filter(|(_, &ret)| ret.value_type == types::INVALID)
.for_each(|(i, _)| {
errors.report((
AnyEntity::Function,
format!("Return value at position {} has an invalid type", i),
))
});
self.func
.signature
.returns
.iter()
.enumerate()
.for_each(|(i, ret)| {
if let ArgumentPurpose::StructArgument(_) = ret.purpose {
errors.report((
AnyEntity::Function,
format!("Return value at position {} can't be an struct argument", i),
))
}
});
if errors.has_error() {
Err(())
} else {
Ok(())
}
}
pub fn run(&self, errors: &mut VerifierErrors) -> VerifierStepResult<()> {
self.verify_global_values(errors)?;
self.verify_heaps(errors)?;
self.verify_tables(errors)?;
self.verify_jump_tables(errors)?;
self.typecheck_entry_block_params(errors)?;
self.check_entry_not_cold(errors)?;
self.typecheck_function_signature(errors)?;
for block in self.func.layout.blocks() {
if self.func.layout.first_inst(block).is_none() {
return errors.fatal((block, format!("{} cannot be empty", block)));
}
for inst in self.func.layout.block_insts(block) {
self.block_integrity(block, inst, errors)?;
self.instruction_integrity(inst, errors)?;
self.typecheck(inst, errors)?;
self.immediate_constraints(inst, errors)?;
}
self.encodable_as_bb(block, errors)?;
}
verify_flags(self.func, &self.expected_cfg, errors)?;
if !errors.is_empty() {
log::warn!(
"Found verifier errors in function:\n{}",
pretty_verifier_error(self.func, None, errors.clone())
);
}
Ok(())
}
326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552
pub fn new(
f: &'func Function,
flags: crate::settings::Flags,
machine_env: &MachineEnv,
abi: Callee<I::ABIMachineSpec>,
emit_info: I::Info,
block_order: BlockLoweringOrder,
sigs: SigSet,
) -> CodegenResult<Self> {
let constants = VCodeConstants::with_capacity(f.dfg.constants.len());
let vcode = VCodeBuilder::new(
sigs,
abi,
emit_info,
block_order,
constants,
VCodeBuildDirection::Backward,
);
let mut vregs = VRegAllocator::new();
let mut value_regs = SecondaryMap::with_default(ValueRegs::invalid());
// Assign a vreg to each block param and each inst result.
for bb in f.layout.blocks() {
for ¶m in f.dfg.block_params(bb) {
let ty = f.dfg.value_type(param);
if value_regs[param].is_invalid() {
let regs = vregs.alloc(ty)?;
value_regs[param] = regs;
trace!("bb {} param {}: regs {:?}", bb, param, regs);
}
}
for inst in f.layout.block_insts(bb) {
for &result in f.dfg.inst_results(inst) {
let ty = f.dfg.value_type(result);
if value_regs[result].is_invalid() && !ty.is_invalid() {
let regs = vregs.alloc(ty)?;
value_regs[result] = regs;
trace!(
"bb {} inst {} ({:?}): result {} regs {:?}",
bb,
inst,
f.dfg[inst],
result,
regs,
);
}
}
}
}
// Make a sret register, if one is needed.
let mut sret_reg = None;
for ret in &vcode.abi().signature().returns.clone() {
if ret.purpose == ArgumentPurpose::StructReturn {
assert!(sret_reg.is_none());
sret_reg = Some(vregs.alloc(ret.value_type)?);
}
}
// Compute instruction colors, find constant instructions, and find instructions with
// side-effects, in one combined pass.
let mut cur_color = 0;
let mut block_end_colors = SecondaryMap::with_default(InstColor::new(0));
let mut side_effect_inst_entry_colors = FxHashMap::default();
let mut inst_constants = FxHashMap::default();
for bb in f.layout.blocks() {
cur_color += 1;
for inst in f.layout.block_insts(bb) {
let side_effect = has_lowering_side_effect(f, inst);
trace!("bb {} inst {} has color {}", bb, inst, cur_color);
if side_effect {
side_effect_inst_entry_colors.insert(inst, InstColor::new(cur_color));
trace!(" -> side-effecting; incrementing color for next inst");
cur_color += 1;
}
// Determine if this is a constant; if so, add to the table.
if let Some(c) = is_constant_64bit(f, inst) {
trace!(" -> constant: {}", c);
inst_constants.insert(inst, c);
}
}
block_end_colors[bb] = InstColor::new(cur_color);
}
let value_ir_uses = Self::compute_use_states(f);
Ok(Lower {
f,
flags,
allocatable: PRegSet::from(machine_env),
vcode,
vregs,
value_regs,
sret_reg,
block_end_colors,
side_effect_inst_entry_colors,
inst_constants,
value_ir_uses,
value_lowered_uses: SecondaryMap::default(),
inst_sunk: FxHashSet::default(),
cur_scan_entry_color: None,
cur_inst: None,
ir_insts: vec![],
pinned_reg: None,
})
}
pub fn sigs(&self) -> &SigSet {
self.vcode.sigs()
}
pub fn sigs_mut(&mut self) -> &mut SigSet {
self.vcode.sigs_mut()
}
/// Pre-analysis: compute `value_ir_uses`. See comment on
/// `ValueUseState` for a description of what this analysis
/// computes.
fn compute_use_states<'a>(f: &'a Function) -> SecondaryMap<Value, ValueUseState> {
// We perform the analysis without recursion, so we don't
// overflow the stack on long chains of ops in the input.
//
// This is sort of a hybrid of a "shallow use-count" pass and
// a DFS. We iterate over all instructions and mark their args
// as used. However when we increment a use-count to
// "Multiple" we push its args onto the stack and do a DFS,
// immediately marking the whole dependency tree as
// Multiple. Doing both (shallow use-counting over all insts,
// and deep Multiple propagation) lets us trim both
// traversals, stopping recursion when a node is already at
// the appropriate state.
//
// In particular, note that the *coarsening* into {Unused,
// Once, Multiple} is part of what makes this pass more
// efficient than a full indirect-use-counting pass.
let mut value_ir_uses: SecondaryMap<Value, ValueUseState> =
SecondaryMap::with_default(ValueUseState::Unused);
// Stack of iterators over Values as we do DFS to mark
// Multiple-state subtrees.
type StackVec<'a> = SmallVec<[std::slice::Iter<'a, Value>; 16]>;
let mut stack: StackVec = smallvec![];
// Push args for a given inst onto the DFS stack.
let push_args_on_stack = |stack: &mut StackVec<'a>, value| {
trace!(" -> pushing args for {} onto stack", value);
if let ValueDef::Result(src_inst, _) = f.dfg.value_def(value) {
stack.push(f.dfg.inst_args(src_inst).iter());
}
};
// Do a DFS through `value_ir_uses` to mark a subtree as
// Multiple.
let mark_all_uses_as_multiple =
|value_ir_uses: &mut SecondaryMap<Value, ValueUseState>, stack: &mut StackVec<'a>| {
while let Some(iter) = stack.last_mut() {
if let Some(&value) = iter.next() {
let value = f.dfg.resolve_aliases(value);
trace!(" -> DFS reaches {}", value);
if value_ir_uses[value] == ValueUseState::Multiple {
// Truncate DFS here: no need to go further,
// as whole subtree must already be Multiple.
#[cfg(debug_assertions)]
{
// With debug asserts, check one level
// of that invariant at least.
if let ValueDef::Result(src_inst, _) = f.dfg.value_def(value) {
debug_assert!(f.dfg.inst_args(src_inst).iter().all(|&arg| {
let arg = f.dfg.resolve_aliases(arg);
value_ir_uses[arg] == ValueUseState::Multiple
}));
}
}
continue;
}
value_ir_uses[value] = ValueUseState::Multiple;
trace!(" -> became Multiple");
push_args_on_stack(stack, value);
} else {
// Empty iterator, discard.
stack.pop();
}
}
};
for inst in f
.layout
.blocks()
.flat_map(|block| f.layout.block_insts(block))
{
// If this inst produces multiple values, we must mark all
// of its args as Multiple, because otherwise two uses
// could come in as Once on our two different results.
let force_multiple = f.dfg.inst_results(inst).len() > 1;
// Iterate over all args of all instructions, noting an
// additional use on each operand. If an operand becomes Multiple,
for &arg in f.dfg.inst_args(inst) {
let arg = f.dfg.resolve_aliases(arg);
let old = value_ir_uses[arg];
if force_multiple {
trace!(
"forcing arg {} to Multiple because of multiple results of user inst",
arg
);
value_ir_uses[arg] = ValueUseState::Multiple;
} else {
value_ir_uses[arg].inc();
}
let new = value_ir_uses[arg];
trace!("arg {} used, old state {:?}, new {:?}", arg, old, new,);
// On transition to Multiple, do DFS.
if old != ValueUseState::Multiple && new == ValueUseState::Multiple {
push_args_on_stack(&mut stack, arg);
mark_all_uses_as_multiple(&mut value_ir_uses, &mut stack);
}
}
}
value_ir_uses
}
sourcepub fn entry_block(&self) -> Option<Block>
pub fn entry_block(&self) -> Option<Block>
Get the function’s entry block. This is simply the first block in the layout order.
Examples found in repository?
More examples
322 323 324 325 326 327 328 329 330 331 332 333
fn next_block(&mut self) -> Option<ir::Block> {
let next = if let Some(block) = self.current_block() {
self.layout().next_block(block)
} else {
self.layout().entry_block()
};
self.set_position(match next {
Some(block) => CursorPosition::Before(block),
None => CursorPosition::Nowhere,
});
next
}
782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239
fn verify_block(
&self,
loc: impl Into<AnyEntity>,
e: Block,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
if !self.func.dfg.block_is_valid(e) || !self.func.layout.is_block_inserted(e) {
return errors.fatal((loc, format!("invalid block reference {}", e)));
}
if let Some(entry_block) = self.func.layout.entry_block() {
if e == entry_block {
return errors.fatal((loc, format!("invalid reference to entry block {}", e)));
}
}
Ok(())
}
fn verify_sig_ref(
&self,
inst: Inst,
s: SigRef,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
if !self.func.dfg.signatures.is_valid(s) {
errors.fatal((
inst,
self.context(inst),
format!("invalid signature reference {}", s),
))
} else {
Ok(())
}
}
fn verify_func_ref(
&self,
inst: Inst,
f: FuncRef,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
if !self.func.dfg.ext_funcs.is_valid(f) {
errors.nonfatal((
inst,
self.context(inst),
format!("invalid function reference {}", f),
))
} else {
Ok(())
}
}
fn verify_stack_slot(
&self,
inst: Inst,
ss: StackSlot,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
if !self.func.sized_stack_slots.is_valid(ss) {
errors.nonfatal((
inst,
self.context(inst),
format!("invalid stack slot {}", ss),
))
} else {
Ok(())
}
}
fn verify_dynamic_stack_slot(
&self,
inst: Inst,
ss: DynamicStackSlot,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
if !self.func.dynamic_stack_slots.is_valid(ss) {
errors.nonfatal((
inst,
self.context(inst),
format!("invalid dynamic stack slot {}", ss),
))
} else {
Ok(())
}
}
fn verify_global_value(
&self,
inst: Inst,
gv: GlobalValue,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
if !self.func.global_values.is_valid(gv) {
errors.nonfatal((
inst,
self.context(inst),
format!("invalid global value {}", gv),
))
} else {
Ok(())
}
}
fn verify_heap(
&self,
inst: Inst,
heap: ir::Heap,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
if !self.func.heaps.is_valid(heap) {
errors.nonfatal((inst, self.context(inst), format!("invalid heap {}", heap)))
} else {
Ok(())
}
}
fn verify_table(
&self,
inst: Inst,
table: ir::Table,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
if !self.func.tables.is_valid(table) {
errors.nonfatal((inst, self.context(inst), format!("invalid table {}", table)))
} else {
Ok(())
}
}
fn verify_value_list(
&self,
inst: Inst,
l: &ValueList,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
if !l.is_valid(&self.func.dfg.value_lists) {
errors.nonfatal((
inst,
self.context(inst),
format!("invalid value list reference {:?}", l),
))
} else {
Ok(())
}
}
fn verify_jump_table(
&self,
inst: Inst,
j: JumpTable,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
if !self.func.jump_tables.is_valid(j) {
errors.nonfatal((
inst,
self.context(inst),
format!("invalid jump table reference {}", j),
))
} else {
Ok(())
}
}
fn verify_value(
&self,
loc_inst: Inst,
v: Value,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
let dfg = &self.func.dfg;
if !dfg.value_is_valid(v) {
errors.nonfatal((
loc_inst,
self.context(loc_inst),
format!("invalid value reference {}", v),
))
} else {
Ok(())
}
}
fn verify_inst_arg(
&self,
loc_inst: Inst,
v: Value,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
self.verify_value(loc_inst, v, errors)?;
let dfg = &self.func.dfg;
let loc_block = self.func.layout.pp_block(loc_inst);
let is_reachable = self.expected_domtree.is_reachable(loc_block);
// SSA form
match dfg.value_def(v) {
ValueDef::Result(def_inst, _) => {
// Value is defined by an instruction that exists.
if !dfg.inst_is_valid(def_inst) {
return errors.fatal((
loc_inst,
self.context(loc_inst),
format!("{} is defined by invalid instruction {}", v, def_inst),
));
}
// Defining instruction is inserted in a block.
if self.func.layout.inst_block(def_inst) == None {
return errors.fatal((
loc_inst,
self.context(loc_inst),
format!("{} is defined by {} which has no block", v, def_inst),
));
}
// Defining instruction dominates the instruction that uses the value.
if is_reachable {
if !self
.expected_domtree
.dominates(def_inst, loc_inst, &self.func.layout)
{
return errors.fatal((
loc_inst,
self.context(loc_inst),
format!("uses value {} from non-dominating {}", v, def_inst),
));
}
if def_inst == loc_inst {
return errors.fatal((
loc_inst,
self.context(loc_inst),
format!("uses value {} from itself", v),
));
}
}
}
ValueDef::Param(block, _) => {
// Value is defined by an existing block.
if !dfg.block_is_valid(block) {
return errors.fatal((
loc_inst,
self.context(loc_inst),
format!("{} is defined by invalid block {}", v, block),
));
}
// Defining block is inserted in the layout
if !self.func.layout.is_block_inserted(block) {
return errors.fatal((
loc_inst,
self.context(loc_inst),
format!("{} is defined by {} which is not in the layout", v, block),
));
}
// The defining block dominates the instruction using this value.
if is_reachable
&& !self
.expected_domtree
.dominates(block, loc_inst, &self.func.layout)
{
return errors.fatal((
loc_inst,
self.context(loc_inst),
format!("uses value arg from non-dominating {}", block),
));
}
}
}
Ok(())
}
fn verify_inst_result(
&self,
loc_inst: Inst,
v: Value,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
self.verify_value(loc_inst, v, errors)?;
match self.func.dfg.value_def(v) {
ValueDef::Result(def_inst, _) => {
if def_inst != loc_inst {
errors.fatal((
loc_inst,
self.context(loc_inst),
format!("instruction result {} is not defined by the instruction", v),
))
} else {
Ok(())
}
}
ValueDef::Param(_, _) => errors.fatal((
loc_inst,
self.context(loc_inst),
format!("instruction result {} is not defined by the instruction", v),
)),
}
}
fn verify_bitcast(
&self,
inst: Inst,
flags: MemFlags,
arg: Value,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
let typ = self.func.dfg.ctrl_typevar(inst);
let value_type = self.func.dfg.value_type(arg);
if typ.bits() != value_type.bits() {
errors.fatal((
inst,
format!(
"The bitcast argument {} has a type of {} bits, which doesn't match an expected type of {} bits",
arg,
value_type.bits(),
typ.bits()
),
))
} else if flags != MemFlags::new()
&& flags != MemFlags::new().with_endianness(ir::Endianness::Little)
&& flags != MemFlags::new().with_endianness(ir::Endianness::Big)
{
errors.fatal((
inst,
"The bitcast instruction only accepts the `big` or `little` memory flags",
))
} else if flags == MemFlags::new() && typ.lane_count() != value_type.lane_count() {
errors.fatal((
inst,
"Byte order specifier required for bitcast instruction changing lane count",
))
} else {
Ok(())
}
}
fn verify_constant_size(
&self,
inst: Inst,
constant: Constant,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
let type_size = self.func.dfg.ctrl_typevar(inst).bytes() as usize;
let constant_size = self.func.dfg.constants.get(constant).len();
if type_size != constant_size {
errors.fatal((
inst,
format!(
"The instruction expects {} to have a size of {} bytes but it has {}",
constant, type_size, constant_size
),
))
} else {
Ok(())
}
}
fn domtree_integrity(
&self,
domtree: &DominatorTree,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
// We consider two `DominatorTree`s to be equal if they return the same immediate
// dominator for each block. Therefore the current domtree is valid if it matches the freshly
// computed one.
for block in self.func.layout.blocks() {
let expected = self.expected_domtree.idom(block);
let got = domtree.idom(block);
if got != expected {
return errors.fatal((
block,
format!(
"invalid domtree, expected idom({}) = {:?}, got {:?}",
block, expected, got
),
));
}
}
// We also verify if the postorder defined by `DominatorTree` is sane
if domtree.cfg_postorder().len() != self.expected_domtree.cfg_postorder().len() {
return errors.fatal((
AnyEntity::Function,
"incorrect number of Blocks in postorder traversal",
));
}
for (index, (&test_block, &true_block)) in domtree
.cfg_postorder()
.iter()
.zip(self.expected_domtree.cfg_postorder().iter())
.enumerate()
{
if test_block != true_block {
return errors.fatal((
test_block,
format!(
"invalid domtree, postorder block number {} should be {}, got {}",
index, true_block, test_block
),
));
}
}
// We verify rpo_cmp on pairs of adjacent blocks in the postorder
for (&prev_block, &next_block) in domtree.cfg_postorder().iter().adjacent_pairs() {
if self
.expected_domtree
.rpo_cmp(prev_block, next_block, &self.func.layout)
!= Ordering::Greater
{
return errors.fatal((
next_block,
format!(
"invalid domtree, rpo_cmp does not says {} is greater than {}",
prev_block, next_block
),
));
}
}
Ok(())
}
fn typecheck_entry_block_params(&self, errors: &mut VerifierErrors) -> VerifierStepResult<()> {
if let Some(block) = self.func.layout.entry_block() {
let expected_types = &self.func.signature.params;
let block_param_count = self.func.dfg.num_block_params(block);
if block_param_count != expected_types.len() {
return errors.fatal((
block,
format!(
"entry block parameters ({}) must match function signature ({})",
block_param_count,
expected_types.len()
),
));
}
for (i, &arg) in self.func.dfg.block_params(block).iter().enumerate() {
let arg_type = self.func.dfg.value_type(arg);
if arg_type != expected_types[i].value_type {
errors.report((
block,
format!(
"entry block parameter {} expected to have type {}, got {}",
i, expected_types[i], arg_type
),
));
}
}
}
errors.as_result()
}
fn check_entry_not_cold(&self, errors: &mut VerifierErrors) -> VerifierStepResult<()> {
if let Some(entry_block) = self.func.layout.entry_block() {
if self.func.layout.is_cold(entry_block) {
return errors
.fatal((entry_block, format!("entry block cannot be marked as cold")));
}
}
errors.as_result()
}
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
pub(crate) fn new(func: &Function, domtree: &DominatorTree) -> DomTreeWithChildren {
let mut nodes: SecondaryMap<Block, DomTreeNode> =
SecondaryMap::with_capacity(func.dfg.num_blocks());
for block in func.layout.blocks() {
let idom_inst = match domtree.idom(block) {
Some(idom_inst) => idom_inst,
None => continue,
};
let idom = func
.layout
.inst_block(idom_inst)
.expect("Dominating instruction should be part of a block");
nodes[block].next = nodes[idom].children;
nodes[idom].children = block.into();
}
let root = func.layout.entry_block().unwrap();
Self { nodes, root }
}
189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
fn compute_block_input_states(
func: &Function,
cfg: &ControlFlowGraph,
) -> SecondaryMap<Block, Option<LastStores>> {
let mut block_input = SecondaryMap::with_capacity(func.dfg.num_blocks());
let mut worklist: SmallVec<[Block; 16]> = smallvec![];
let mut worklist_set = FxHashSet::default();
let entry = func.layout.entry_block().unwrap();
worklist.push(entry);
worklist_set.insert(entry);
block_input[entry] = Some(LastStores::default());
while let Some(block) = worklist.pop() {
worklist_set.remove(&block);
let state = block_input[block].clone().unwrap();
trace!("alias analysis: input to {} is {:?}", block, state);
let state = func
.layout
.block_insts(block)
.fold(state, |mut state, inst| {
state.update(func, inst);
trace!("after {}: state is {:?}", inst, state);
state
});
for succ in cfg.succ_iter(block) {
let succ_first_inst = func.layout.first_inst(succ).unwrap();
let succ_state = &mut block_input[succ];
let old = succ_state.clone();
if let Some(succ_state) = succ_state.as_mut() {
succ_state.meet_from(&state, succ_first_inst);
} else {
*succ_state = Some(state);
};
let updated = *succ_state != old;
if updated && worklist_set.insert(succ) {
worklist.push(succ);
}
}
}
block_input
}
sourcepub fn last_block(&self) -> Option<Block>
pub fn last_block(&self) -> Option<Block>
Get the last block in the layout.
Examples found in repository?
355 356 357 358 359 360 361 362 363 364 365 366
fn prev_block(&mut self) -> Option<ir::Block> {
let prev = if let Some(block) = self.current_block() {
self.layout().prev_block(block)
} else {
self.layout().last_block()
};
self.set_position(match prev {
Some(block) => CursorPosition::After(block),
None => CursorPosition::Nowhere,
});
prev
}
sourcepub fn prev_block(&self, block: Block) -> Option<Block>
pub fn prev_block(&self, block: Block) -> Option<Block>
Get the block preceding block
in the layout order.
Examples found in repository?
355 356 357 358 359 360 361 362 363 364 365 366
fn prev_block(&mut self) -> Option<ir::Block> {
let prev = if let Some(block) = self.current_block() {
self.layout().prev_block(block)
} else {
self.layout().last_block()
};
self.set_position(match prev {
Some(block) => CursorPosition::After(block),
None => CursorPosition::Nowhere,
});
prev
}
sourcepub fn next_block(&self, block: Block) -> Option<Block>
pub fn next_block(&self, block: Block) -> Option<Block>
Get the block following block
in the layout order.
Examples found in repository?
More examples
322 323 324 325 326 327 328 329 330 331 332 333
fn next_block(&mut self) -> Option<ir::Block> {
let next = if let Some(block) = self.current_block() {
self.layout().next_block(block)
} else {
self.layout().entry_block()
};
self.set_position(match next {
Some(block) => CursorPosition::Before(block),
None => CursorPosition::Nowhere,
});
next
}
479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574
fn branch_order(pos: &mut FuncCursor, cfg: &mut ControlFlowGraph, block: Block, inst: Inst) {
let (term_inst, term_inst_args, term_dest, cond_inst, cond_inst_args, cond_dest, kind) =
match pos.func.dfg[inst] {
InstructionData::Jump {
opcode: Opcode::Jump,
destination,
ref args,
} => {
let next_block = if let Some(next_block) = pos.func.layout.next_block(block) {
next_block
} else {
return;
};
if destination == next_block {
return;
}
let prev_inst = if let Some(prev_inst) = pos.func.layout.prev_inst(inst) {
prev_inst
} else {
return;
};
let prev_inst_data = &pos.func.dfg[prev_inst];
if let Some(prev_dest) = prev_inst_data.branch_destination() {
if prev_dest != next_block {
return;
}
} else {
return;
}
match prev_inst_data {
InstructionData::Branch {
opcode,
args: ref prev_args,
destination: cond_dest,
} => {
let cond_arg = {
let args = pos.func.dfg.inst_args(prev_inst);
args[0]
};
let kind = match opcode {
Opcode::Brz => BranchOrderKind::BrzToBrnz(cond_arg),
Opcode::Brnz => BranchOrderKind::BrnzToBrz(cond_arg),
_ => panic!("unexpected opcode"),
};
(
inst,
args.clone(),
destination,
prev_inst,
prev_args.clone(),
*cond_dest,
kind,
)
}
_ => return,
}
}
_ => return,
};
let cond_args = cond_inst_args.as_slice(&pos.func.dfg.value_lists).to_vec();
let term_args = term_inst_args.as_slice(&pos.func.dfg.value_lists).to_vec();
match kind {
BranchOrderKind::BrnzToBrz(cond_arg) => {
pos.func
.dfg
.replace(term_inst)
.jump(cond_dest, &cond_args[1..]);
pos.func
.dfg
.replace(cond_inst)
.brz(cond_arg, term_dest, &term_args);
}
BranchOrderKind::BrzToBrnz(cond_arg) => {
pos.func
.dfg
.replace(term_inst)
.jump(cond_dest, &cond_args[1..]);
pos.func
.dfg
.replace(cond_inst)
.brnz(cond_arg, term_dest, &term_args);
}
}
cfg.recompute_block(pos.func, block);
}
sourcepub fn set_cold(&mut self, block: Block)
pub fn set_cold(&mut self, block: Block)
Mark a block as “cold”.
This will try to move it out of the ordinary path of execution when lowered to machine code.
Examples found in repository?
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
fn expand_cond_trap(
inst: ir::Inst,
func: &mut ir::Function,
cfg: &mut ControlFlowGraph,
opcode: ir::Opcode,
arg: ir::Value,
code: ir::TrapCode,
) {
trace!(
"expanding conditional trap: {:?}: {}",
inst,
func.dfg.display_inst(inst)
);
// Parse the instruction.
let trapz = match opcode {
ir::Opcode::Trapz => true,
ir::Opcode::Trapnz | ir::Opcode::ResumableTrapnz => false,
_ => panic!("Expected cond trap: {}", func.dfg.display_inst(inst)),
};
// Split the block after `inst`:
//
// trapnz arg
// ..
//
// Becomes:
//
// brz arg, new_block_resume
// jump new_block_trap
//
// new_block_trap:
// trap
//
// new_block_resume:
// ..
let old_block = func.layout.pp_block(inst);
let new_block_trap = func.dfg.make_block();
let new_block_resume = func.dfg.make_block();
// Trapping is a rare event, mark the trapping block as cold.
func.layout.set_cold(new_block_trap);
// Replace trap instruction by the inverted condition.
if trapz {
func.dfg.replace(inst).brnz(arg, new_block_resume, &[]);
} else {
func.dfg.replace(inst).brz(arg, new_block_resume, &[]);
}
// Add jump instruction after the inverted branch.
let mut pos = FuncCursor::new(func).after_inst(inst);
pos.use_srcloc(inst);
pos.ins().jump(new_block_trap, &[]);
// Insert the new label and the unconditional trap terminator.
pos.insert_block(new_block_trap);
match opcode {
ir::Opcode::Trapz | ir::Opcode::Trapnz => {
pos.ins().trap(code);
}
ir::Opcode::ResumableTrapnz => {
pos.ins().resumable_trap(code);
pos.ins().jump(new_block_resume, &[]);
}
_ => unreachable!(),
}
// Insert the new label and resume the execution when the trap fails.
pos.insert_block(new_block_resume);
// Finally update the CFG.
cfg.recompute_block(pos.func, old_block);
cfg.recompute_block(pos.func, new_block_resume);
cfg.recompute_block(pos.func, new_block_trap);
}
sourcepub fn is_cold(&self, block: Block) -> bool
pub fn is_cold(&self, block: Block) -> bool
Is the given block cold?
Examples found in repository?
More examples
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
pub fn write_block_header(
w: &mut dyn Write,
func: &Function,
block: Block,
indent: usize,
) -> fmt::Result {
let cold = if func.layout.is_cold(block) {
" cold"
} else {
""
};
// The `indent` is the instruction indentation. block headers are 4 spaces out from that.
write!(w, "{1:0$}{2}", indent - 4, "", block)?;
let mut args = func.dfg.block_params(block).iter().cloned();
match args.next() {
None => return writeln!(w, "{}:", cold),
Some(arg) => {
write!(w, "(")?;
write_arg(w, func, arg)?;
}
}
// Remaining arguments.
for arg in args {
write!(w, ", ")?;
write_arg(w, func, arg)?;
}
writeln!(w, "){}:", cold)
}
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
pub fn new(f: &Function) -> BlockLoweringOrder {
trace!("BlockLoweringOrder: function body {:?}", f);
// Make sure that we have an entry block, and the entry block is
// not marked as cold. (The verifier ensures this as well, but
// the user may not have run the verifier, and this property is
// critical to avoid a miscompile, so we assert it here too.)
let entry = f.layout.entry_block().expect("Must have entry block");
assert!(!f.layout.is_cold(entry));
// Step 1: compute the in-edge and out-edge count of every block.
let mut block_in_count = SecondaryMap::with_default(0);
let mut block_out_count = SecondaryMap::with_default(0);
// Cache the block successors to avoid re-examining branches below.
let mut block_succs: SmallVec<[(Inst, usize, Block); 128]> = SmallVec::new();
let mut block_succ_range = SecondaryMap::with_default((0, 0));
let mut indirect_branch_target_clif_blocks = FxHashSet::default();
for block in f.layout.blocks() {
let block_succ_start = block_succs.len();
let mut succ_idx = 0;
visit_block_succs(f, block, |inst, succ, from_table| {
block_out_count[block] += 1;
block_in_count[succ] += 1;
block_succs.push((inst, succ_idx, succ));
succ_idx += 1;
if from_table {
indirect_branch_target_clif_blocks.insert(succ);
}
});
let block_succ_end = block_succs.len();
block_succ_range[block] = (block_succ_start, block_succ_end);
for inst in f.layout.block_likely_branches(block) {
if f.dfg[inst].opcode() == Opcode::Return {
// Implicit output edge for any return.
block_out_count[block] += 1;
}
}
}
// Implicit input edge for entry block.
block_in_count[entry] += 1;
// All blocks ending in conditional branches or br_tables must
// have edge-moves inserted at the top of successor blocks,
// not at the end of themselves. This is because the moves
// would have to be inserted prior to the branch's register
// use; but RA2's model is that the moves happen *on* the
// edge, after every def/use in the block. RA2 will check for
// "branch register use safety" and panic if such a problem
// occurs. To avoid this, we force the below algorithm to
// never merge the edge block onto the end of a block that
// ends in a conditional branch. We do this by "faking" more
// than one successor, even if there is only one.
//
// (One might ask, isn't that always the case already? It
// could not be, in cases of br_table with no table and just a
// default label, for example.)
for block in f.layout.blocks() {
for inst in f.layout.block_likely_branches(block) {
// If the block has a branch with any "fixed args"
// (not blockparam args) ...
if f.dfg[inst].opcode().is_branch() && f.dfg.inst_fixed_args(inst).len() > 0 {
// ... then force a minimum successor count of
// two, so the below algorithm cannot put
// edge-moves on the end of the block.
block_out_count[block] = std::cmp::max(2, block_out_count[block]);
}
}
}
// Here we define the implicit CLIF-plus-edges graph. There are
// conceptually two such graphs: the original, with every edge explicit,
// and the merged one, with blocks (represented by `LoweredBlock`
// values) that contain original CLIF blocks, edges, or both. This
// function returns a lowered block's successors as per the latter, with
// consideration to edge-block merging.
//
// Note that there is a property of the block-merging rules below
// that is very important to ensure we don't miss any lowered blocks:
// any block in the implicit CLIF-plus-edges graph will *only* be
// included in one block in the merged graph.
//
// This, combined with the property that every edge block is reachable
// only from one predecessor (and hence cannot be reached by a DFS
// backedge), means that it is sufficient in our DFS below to track
// visited-bits per original CLIF block only, not per edge. This greatly
// simplifies the data structures (no need to keep a sparse hash-set of
// (block, block) tuples).
let compute_lowered_succs = |ret: &mut Vec<(Inst, LoweredBlock)>, block: LoweredBlock| {
let start_idx = ret.len();
match block {
LoweredBlock::Orig { block } | LoweredBlock::EdgeAndOrig { block, .. } => {
// At an orig block; successors are always edge blocks,
// possibly with orig blocks following.
let range = block_succ_range[block];
for &(edge_inst, succ_idx, succ) in &block_succs[range.0..range.1] {
if block_in_count[succ] == 1 {
ret.push((
edge_inst,
LoweredBlock::EdgeAndOrig {
pred: block,
edge_inst,
succ_idx,
block: succ,
},
));
} else {
ret.push((
edge_inst,
LoweredBlock::Edge {
pred: block,
edge_inst,
succ_idx,
succ,
},
));
}
}
}
LoweredBlock::Edge {
succ, edge_inst, ..
}
| LoweredBlock::OrigAndEdge {
succ, edge_inst, ..
} => {
// At an edge block; successors are always orig blocks,
// possibly with edge blocks following.
if block_out_count[succ] == 1 {
let range = block_succ_range[succ];
// check if the one succ is a real CFG edge (vs.
// implicit return succ).
if range.1 - range.0 > 0 {
debug_assert!(range.1 - range.0 == 1);
let (succ_edge_inst, succ_succ_idx, succ_succ) = block_succs[range.0];
ret.push((
edge_inst,
LoweredBlock::OrigAndEdge {
block: succ,
edge_inst: succ_edge_inst,
succ_idx: succ_succ_idx,
succ: succ_succ,
},
));
} else {
ret.push((edge_inst, LoweredBlock::Orig { block: succ }));
}
} else {
ret.push((edge_inst, LoweredBlock::Orig { block: succ }));
}
}
}
let end_idx = ret.len();
(start_idx, end_idx)
};
// Build the explicit LoweredBlock-to-LoweredBlock successors list.
let mut lowered_succs = vec![];
let mut lowered_succ_indices = vec![];
// Step 2: Compute RPO traversal of the implicit CLIF-plus-edge-block graph. Use an
// explicit stack so we don't overflow the real stack with a deep DFS.
#[derive(Debug)]
struct StackEntry {
this: LoweredBlock,
succs: (usize, usize), // range in lowered_succs
cur_succ: usize, // index in lowered_succs
}
let mut stack: SmallVec<[StackEntry; 16]> = SmallVec::new();
let mut visited = FxHashSet::default();
let mut postorder = vec![];
// Add the entry block.
//
// FIXME(cfallin): we might be able to use OrigAndEdge. Find a
// way to not special-case the entry block here.
let block = LoweredBlock::Orig { block: entry };
visited.insert(block);
let range = compute_lowered_succs(&mut lowered_succs, block);
lowered_succ_indices.resize(lowered_succs.len(), 0);
stack.push(StackEntry {
this: block,
succs: range,
cur_succ: range.1,
});
while !stack.is_empty() {
let stack_entry = stack.last_mut().unwrap();
let range = stack_entry.succs;
if stack_entry.cur_succ == range.0 {
postorder.push((stack_entry.this, range));
stack.pop();
} else {
// Heuristic: chase the children in reverse. This puts the first
// successor block first in RPO, all other things being equal,
// which tends to prioritize loop backedges over out-edges,
// putting the edge-block closer to the loop body and minimizing
// live-ranges in linear instruction space.
let next = lowered_succs[stack_entry.cur_succ - 1].1;
stack_entry.cur_succ -= 1;
if visited.contains(&next) {
continue;
}
visited.insert(next);
let range = compute_lowered_succs(&mut lowered_succs, next);
lowered_succ_indices.resize(lowered_succs.len(), 0);
stack.push(StackEntry {
this: next,
succs: range,
cur_succ: range.1,
});
}
}
postorder.reverse();
let rpo = postorder;
// Step 3: now that we have RPO, build the BlockIndex/BB fwd/rev maps.
let mut lowered_order = vec![];
let mut cold_blocks = FxHashSet::default();
let mut lowered_succ_ranges = vec![];
let mut lb_to_bindex = FxHashMap::default();
let mut indirect_branch_targets = FxHashSet::default();
for (block, succ_range) in rpo.into_iter() {
let index = BlockIndex::new(lowered_order.len());
lb_to_bindex.insert(block, index);
lowered_order.push(block);
lowered_succ_ranges.push(succ_range);
match block {
LoweredBlock::Orig { block }
| LoweredBlock::OrigAndEdge { block, .. }
| LoweredBlock::EdgeAndOrig { block, .. } => {
if f.layout.is_cold(block) {
cold_blocks.insert(index);
}
if indirect_branch_target_clif_blocks.contains(&block) {
indirect_branch_targets.insert(index);
}
}
LoweredBlock::Edge { pred, succ, .. } => {
if f.layout.is_cold(pred) || f.layout.is_cold(succ) {
cold_blocks.insert(index);
}
if indirect_branch_target_clif_blocks.contains(&succ) {
indirect_branch_targets.insert(index);
}
}
}
}
let lowered_succ_indices = lowered_succs
.iter()
.map(|&(inst, succ)| (inst, lb_to_bindex.get(&succ).cloned().unwrap()))
.collect();
let mut orig_map = SecondaryMap::with_default(None);
for (i, lb) in lowered_order.iter().enumerate() {
let i = BlockIndex::new(i);
if let Some(b) = lb.orig_block() {
orig_map[b] = Some(i);
}
}
let result = BlockLoweringOrder {
lowered_order,
lowered_succs,
lowered_succ_indices,
lowered_succ_ranges,
orig_map,
cold_blocks,
indirect_branch_targets,
};
trace!("BlockLoweringOrder: {:?}", result);
result
}
source§impl Layout
impl Layout
Methods for arranging instructions.
An instruction starts out as not inserted in the layout. An instruction can be inserted into a block at a given position.
sourcepub fn inst_block(&self, inst: Inst) -> Option<Block>
pub fn inst_block(&self, inst: Inst) -> Option<Block>
Get the block containing inst
, or None
if inst
is not inserted in the layout.
Examples found in repository?
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
fn current_block(&self) -> Option<ir::Block> {
use self::CursorPosition::*;
match self.position() {
Nowhere => None,
At(inst) => self.layout().inst_block(inst),
Before(block) | After(block) => Some(block),
}
}
/// Get the instruction corresponding to the current position, if any.
fn current_inst(&self) -> Option<ir::Inst> {
use self::CursorPosition::*;
match self.position() {
At(inst) => Some(inst),
_ => None,
}
}
/// Go to the position after a specific instruction, which must be inserted
/// in the layout. New instructions will be inserted after `inst`.
fn goto_after_inst(&mut self, inst: ir::Inst) {
debug_assert!(self.layout().inst_block(inst).is_some());
let new_pos = if let Some(next) = self.layout().next_inst(inst) {
CursorPosition::At(next)
} else {
CursorPosition::After(
self.layout()
.inst_block(inst)
.expect("current instruction removed?"),
)
};
self.set_position(new_pos);
}
/// Go to a specific instruction which must be inserted in the layout.
/// New instructions will be inserted before `inst`.
fn goto_inst(&mut self, inst: ir::Inst) {
debug_assert!(self.layout().inst_block(inst).is_some());
self.set_position(CursorPosition::At(inst));
}
/// Go to the position for inserting instructions at the beginning of `block`,
/// which unlike `goto_first_inst` doesn't assume that any instructions have
/// been inserted into `block` yet.
fn goto_first_insertion_point(&mut self, block: ir::Block) {
if let Some(inst) = self.layout().first_inst(block) {
self.goto_inst(inst);
} else {
self.goto_bottom(block);
}
}
/// Go to the first instruction in `block`.
fn goto_first_inst(&mut self, block: ir::Block) {
let inst = self.layout().first_inst(block).expect("Empty block");
self.goto_inst(inst);
}
/// Go to the last instruction in `block`.
fn goto_last_inst(&mut self, block: ir::Block) {
let inst = self.layout().last_inst(block).expect("Empty block");
self.goto_inst(inst);
}
/// Go to the top of `block` which must be inserted into the layout.
/// At this position, instructions cannot be inserted, but `next_inst()` will move to the first
/// instruction in `block`.
fn goto_top(&mut self, block: ir::Block) {
debug_assert!(self.layout().is_block_inserted(block));
self.set_position(CursorPosition::Before(block));
}
/// Go to the bottom of `block` which must be inserted into the layout.
/// At this position, inserted instructions will be appended to `block`.
fn goto_bottom(&mut self, block: ir::Block) {
debug_assert!(self.layout().is_block_inserted(block));
self.set_position(CursorPosition::After(block));
}
/// Go to the top of the next block in layout order and return it.
///
/// - If the cursor wasn't pointing at anything, go to the top of the first block in the
/// function.
/// - If there are no more blocks, leave the cursor pointing at nothing and return `None`.
///
/// # Examples
///
/// The `next_block()` method is intended for iterating over the blocks in layout order:
///
/// ```
/// # use cranelift_codegen::ir::{Function, Block};
/// # use cranelift_codegen::cursor::{Cursor, FuncCursor};
/// fn edit_func(func: &mut Function) {
/// let mut cursor = FuncCursor::new(func);
/// while let Some(block) = cursor.next_block() {
/// // Edit block.
/// }
/// }
/// ```
fn next_block(&mut self) -> Option<ir::Block> {
let next = if let Some(block) = self.current_block() {
self.layout().next_block(block)
} else {
self.layout().entry_block()
};
self.set_position(match next {
Some(block) => CursorPosition::Before(block),
None => CursorPosition::Nowhere,
});
next
}
/// Go to the bottom of the previous block in layout order and return it.
///
/// - If the cursor wasn't pointing at anything, go to the bottom of the last block in the
/// function.
/// - If there are no more blocks, leave the cursor pointing at nothing and return `None`.
///
/// # Examples
///
/// The `prev_block()` method is intended for iterating over the blocks in backwards layout order:
///
/// ```
/// # use cranelift_codegen::ir::{Function, Block};
/// # use cranelift_codegen::cursor::{Cursor, FuncCursor};
/// fn edit_func(func: &mut Function) {
/// let mut cursor = FuncCursor::new(func);
/// while let Some(block) = cursor.prev_block() {
/// // Edit block.
/// }
/// }
/// ```
fn prev_block(&mut self) -> Option<ir::Block> {
let prev = if let Some(block) = self.current_block() {
self.layout().prev_block(block)
} else {
self.layout().last_block()
};
self.set_position(match prev {
Some(block) => CursorPosition::After(block),
None => CursorPosition::Nowhere,
});
prev
}
/// Move to the next instruction in the same block and return it.
///
/// - If the cursor was positioned before a block, go to the first instruction in that block.
/// - If there are no more instructions in the block, go to the `After(block)` position and return
/// `None`.
/// - If the cursor wasn't pointing anywhere, keep doing that.
///
/// This method will never move the cursor to a different block.
///
/// # Examples
///
/// The `next_inst()` method is intended for iterating over the instructions in a block like
/// this:
///
/// ```
/// # use cranelift_codegen::ir::{Function, Block};
/// # use cranelift_codegen::cursor::{Cursor, FuncCursor};
/// fn edit_block(func: &mut Function, block: Block) {
/// let mut cursor = FuncCursor::new(func).at_top(block);
/// while let Some(inst) = cursor.next_inst() {
/// // Edit instructions...
/// }
/// }
/// ```
/// The loop body can insert and remove instructions via the cursor.
///
/// Iterating over all the instructions in a function looks like this:
///
/// ```
/// # use cranelift_codegen::ir::{Function, Block};
/// # use cranelift_codegen::cursor::{Cursor, FuncCursor};
/// fn edit_func(func: &mut Function) {
/// let mut cursor = FuncCursor::new(func);
/// while let Some(block) = cursor.next_block() {
/// while let Some(inst) = cursor.next_inst() {
/// // Edit instructions...
/// }
/// }
/// }
/// ```
fn next_inst(&mut self) -> Option<ir::Inst> {
use self::CursorPosition::*;
match self.position() {
Nowhere | After(..) => None,
At(inst) => {
if let Some(next) = self.layout().next_inst(inst) {
self.set_position(At(next));
Some(next)
} else {
let pos = After(
self.layout()
.inst_block(inst)
.expect("current instruction removed?"),
);
self.set_position(pos);
None
}
}
Before(block) => {
if let Some(next) = self.layout().first_inst(block) {
self.set_position(At(next));
Some(next)
} else {
self.set_position(After(block));
None
}
}
}
}
/// Move to the previous instruction in the same block and return it.
///
/// - If the cursor was positioned after a block, go to the last instruction in that block.
/// - If there are no more instructions in the block, go to the `Before(block)` position and return
/// `None`.
/// - If the cursor wasn't pointing anywhere, keep doing that.
///
/// This method will never move the cursor to a different block.
///
/// # Examples
///
/// The `prev_inst()` method is intended for iterating backwards over the instructions in an
/// block like this:
///
/// ```
/// # use cranelift_codegen::ir::{Function, Block};
/// # use cranelift_codegen::cursor::{Cursor, FuncCursor};
/// fn edit_block(func: &mut Function, block: Block) {
/// let mut cursor = FuncCursor::new(func).at_bottom(block);
/// while let Some(inst) = cursor.prev_inst() {
/// // Edit instructions...
/// }
/// }
/// ```
fn prev_inst(&mut self) -> Option<ir::Inst> {
use self::CursorPosition::*;
match self.position() {
Nowhere | Before(..) => None,
At(inst) => {
if let Some(prev) = self.layout().prev_inst(inst) {
self.set_position(At(prev));
Some(prev)
} else {
let pos = Before(
self.layout()
.inst_block(inst)
.expect("current instruction removed?"),
);
self.set_position(pos);
None
}
}
After(block) => {
if let Some(prev) = self.layout().last_inst(block) {
self.set_position(At(prev));
Some(prev)
} else {
self.set_position(Before(block));
None
}
}
}
}
More examples
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
pub(crate) fn new(func: &Function, domtree: &DominatorTree) -> DomTreeWithChildren {
let mut nodes: SecondaryMap<Block, DomTreeNode> =
SecondaryMap::with_capacity(func.dfg.num_blocks());
for block in func.layout.blocks() {
let idom_inst = match domtree.idom(block) {
Some(idom_inst) => idom_inst,
None => continue,
};
let idom = func
.layout
.inst_block(idom_inst)
.expect("Dominating instruction should be part of a block");
nodes[block].next = nodes[idom].children;
nodes[idom].children = block.into();
}
let root = func.layout.entry_block().unwrap();
Self { nodes, root }
}
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
pub fn dominates<A, B>(&self, a: A, b: B, layout: &Layout) -> bool
where
A: Into<ExpandedProgramPoint>,
B: Into<ExpandedProgramPoint>,
{
let a = a.into();
let b = b.into();
match a {
ExpandedProgramPoint::Block(block_a) => {
a == b || self.last_dominator(block_a, b, layout).is_some()
}
ExpandedProgramPoint::Inst(inst_a) => {
let block_a = layout
.inst_block(inst_a)
.expect("Instruction not in layout.");
match self.last_dominator(block_a, b, layout) {
Some(last) => layout.cmp(inst_a, last) != Ordering::Greater,
None => false,
}
}
}
}
/// Find the last instruction in `a` that dominates `b`.
/// If no instructions in `a` dominate `b`, return `None`.
pub fn last_dominator<B>(&self, a: Block, b: B, layout: &Layout) -> Option<Inst>
where
B: Into<ExpandedProgramPoint>,
{
let (mut block_b, mut inst_b) = match b.into() {
ExpandedProgramPoint::Block(block) => (block, None),
ExpandedProgramPoint::Inst(inst) => (
layout.inst_block(inst).expect("Instruction not in layout."),
Some(inst),
),
};
let rpo_a = self.nodes[a].rpo_number;
// Run a finger up the dominator tree from b until we see a.
// Do nothing if b is unreachable.
while rpo_a < self.nodes[block_b].rpo_number {
let idom = match self.idom(block_b) {
Some(idom) => idom,
None => return None, // a is unreachable, so we climbed past the entry
};
block_b = layout.inst_block(idom).expect("Dominator got removed.");
inst_b = Some(idom);
}
if a == block_b {
inst_b
} else {
None
}
}
/// Compute the common dominator of two basic blocks.
///
/// Both basic blocks are assumed to be reachable.
pub fn common_dominator(
&self,
mut a: BlockPredecessor,
mut b: BlockPredecessor,
layout: &Layout,
) -> BlockPredecessor {
loop {
match self.rpo_cmp_block(a.block, b.block) {
Ordering::Less => {
// `a` comes before `b` in the RPO. Move `b` up.
let idom = self.nodes[b.block].idom.expect("Unreachable basic block?");
b = BlockPredecessor::new(
layout.inst_block(idom).expect("Dangling idom instruction"),
idom,
);
}
Ordering::Greater => {
// `b` comes before `a` in the RPO. Move `a` up.
let idom = self.nodes[a.block].idom.expect("Unreachable basic block?");
a = BlockPredecessor::new(
layout.inst_block(idom).expect("Dangling idom instruction"),
idom,
);
}
Ordering::Equal => break,
}
}
debug_assert_eq!(
a.block, b.block,
"Unreachable block passed to common_dominator?"
);
// We're in the same block. The common dominator is the earlier instruction.
if layout.cmp(a.inst, b.inst) == Ordering::Less {
a
} else {
b
}
}
286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
fn type_suffix(func: &Function, inst: Inst) -> Option<Type> {
let inst_data = &func.dfg[inst];
let constraints = inst_data.opcode().constraints();
if !constraints.is_polymorphic() {
return None;
}
// If the controlling type variable can be inferred from the type of the designated value input
// operand, we don't need the type suffix.
if constraints.use_typevar_operand() {
let ctrl_var = inst_data.typevar_operand(&func.dfg.value_lists).unwrap();
let def_block = match func.dfg.value_def(ctrl_var) {
ValueDef::Result(instr, _) => func.layout.inst_block(instr),
ValueDef::Param(block, _) => Some(block),
};
if def_block.is_some() && def_block == func.layout.inst_block(inst) {
return None;
}
}
let rtype = func.dfg.ctrl_typevar(inst);
assert!(
!rtype.is_invalid(),
"Polymorphic instruction must produce a result"
);
Some(rtype)
}
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760
fn assign_inst_seq(&mut self, inst: Inst) {
let block = self
.inst_block(inst)
.expect("inst must be inserted before assigning an seq");
// Get the sequence number immediately before `inst`.
let prev_seq = match self.insts[inst].prev.expand() {
Some(prev_inst) => self.insts[prev_inst].seq,
None => self.blocks[block].seq,
};
// Get the sequence number immediately following `inst`.
let next_seq = if let Some(next_inst) = self.insts[inst].next.expand() {
self.insts[next_inst].seq
} else if let Some(next_block) = self.blocks[block].next.expand() {
self.blocks[next_block].seq
} else {
// There is nothing after `inst`. We can just use a major stride.
self.insts[inst].seq = prev_seq + MAJOR_STRIDE;
return;
};
// Check if there is room between these sequence numbers.
if let Some(seq) = midpoint(prev_seq, next_seq) {
self.insts[inst].seq = seq;
} else {
// No available integers between `prev_seq` and `next_seq`. We have to renumber.
self.renumber_from_inst(inst, prev_seq + MINOR_STRIDE, prev_seq + LOCAL_LIMIT);
}
}
/// Renumber instructions starting from `inst` until the end of the block or until numbers catch
/// up.
///
/// Return `None` if renumbering has caught up and the sequence is monotonic again. Otherwise
/// return the last used sequence number.
///
/// If sequence numbers exceed `limit`, switch to a full function renumbering and return `None`.
fn renumber_insts(
&mut self,
inst: Inst,
seq: SequenceNumber,
limit: SequenceNumber,
) -> Option<SequenceNumber> {
let mut inst = inst;
let mut seq = seq;
loop {
self.insts[inst].seq = seq;
// Next instruction.
inst = match self.insts[inst].next.expand() {
None => return Some(seq),
Some(next) => next,
};
if seq < self.insts[inst].seq {
// Sequence caught up.
return None;
}
if seq > limit {
// We're pushing too many instructions in front of us.
// Switch to a full function renumbering to make some space.
self.full_renumber();
return None;
}
seq += MINOR_STRIDE;
}
}
/// Renumber starting from `block` to `seq` and continuing until the sequence numbers are
/// monotonic again.
fn renumber_from_block(
&mut self,
block: Block,
first_seq: SequenceNumber,
limit: SequenceNumber,
) {
let mut block = block;
let mut seq = first_seq;
loop {
self.blocks[block].seq = seq;
// Renumber instructions in `block`. Stop when the numbers catch up.
if let Some(inst) = self.blocks[block].first_inst.expand() {
seq = match self.renumber_insts(inst, seq + MINOR_STRIDE, limit) {
Some(s) => s,
None => return,
}
}
// Advance to the next block.
block = match self.blocks[block].next.expand() {
Some(next) => next,
None => return,
};
// Stop renumbering once the numbers catch up.
if seq < self.blocks[block].seq {
return;
}
seq += MINOR_STRIDE;
}
}
/// Renumber starting from `inst` to `seq` and continuing until the sequence numbers are
/// monotonic again.
fn renumber_from_inst(&mut self, inst: Inst, first_seq: SequenceNumber, limit: SequenceNumber) {
if let Some(seq) = self.renumber_insts(inst, first_seq, limit) {
// Renumbering spills over into next block.
if let Some(next_block) = self.blocks[self.inst_block(inst).unwrap()].next.expand() {
self.renumber_from_block(next_block, seq + MINOR_STRIDE, limit);
}
}
}
/// Renumber all blocks and instructions in the layout.
///
/// This doesn't affect the position of anything, but it gives more room in the internal
/// sequence numbers for inserting instructions later.
pub(crate) fn full_renumber(&mut self) {
let _tt = timing::layout_renumber();
let mut seq = 0;
let mut next_block = self.first_block;
while let Some(block) = next_block {
self.blocks[block].seq = seq;
seq += MAJOR_STRIDE;
next_block = self.blocks[block].next.expand();
let mut next_inst = self.blocks[block].first_inst.expand();
while let Some(inst) = next_inst {
self.insts[inst].seq = seq;
seq += MAJOR_STRIDE;
next_inst = self.insts[inst].next.expand();
}
}
trace!("Renumbered {} program points", seq / MAJOR_STRIDE);
}
}
/// Methods for laying out blocks.
///
/// An unknown block starts out as *not inserted* in the block layout. The layout is a linear order of
/// inserted blocks. Once a block has been inserted in the layout, instructions can be added. A block
/// can only be removed from the layout when it is empty.
///
/// Since every block must end with a terminator instruction which cannot fall through, the layout of
/// blocks do not affect the semantics of the program.
///
impl Layout {
/// Is `block` currently part of the layout?
pub fn is_block_inserted(&self, block: Block) -> bool {
Some(block) == self.first_block || self.blocks[block].prev.is_some()
}
/// Insert `block` as the last block in the layout.
pub fn append_block(&mut self, block: Block) {
debug_assert!(
!self.is_block_inserted(block),
"Cannot append block that is already in the layout"
);
{
let node = &mut self.blocks[block];
debug_assert!(node.first_inst.is_none() && node.last_inst.is_none());
node.prev = self.last_block.into();
node.next = None.into();
}
if let Some(last) = self.last_block {
self.blocks[last].next = block.into();
} else {
self.first_block = Some(block);
}
self.last_block = Some(block);
self.assign_block_seq(block);
}
/// Insert `block` in the layout before the existing block `before`.
pub fn insert_block(&mut self, block: Block, before: Block) {
debug_assert!(
!self.is_block_inserted(block),
"Cannot insert block that is already in the layout"
);
debug_assert!(
self.is_block_inserted(before),
"block Insertion point not in the layout"
);
let after = self.blocks[before].prev;
{
let node = &mut self.blocks[block];
node.next = before.into();
node.prev = after;
}
self.blocks[before].prev = block.into();
match after.expand() {
None => self.first_block = Some(block),
Some(a) => self.blocks[a].next = block.into(),
}
self.assign_block_seq(block);
}
/// Insert `block` in the layout *after* the existing block `after`.
pub fn insert_block_after(&mut self, block: Block, after: Block) {
debug_assert!(
!self.is_block_inserted(block),
"Cannot insert block that is already in the layout"
);
debug_assert!(
self.is_block_inserted(after),
"block Insertion point not in the layout"
);
let before = self.blocks[after].next;
{
let node = &mut self.blocks[block];
node.next = before;
node.prev = after.into();
}
self.blocks[after].next = block.into();
match before.expand() {
None => self.last_block = Some(block),
Some(b) => self.blocks[b].prev = block.into(),
}
self.assign_block_seq(block);
}
/// Remove `block` from the layout.
pub fn remove_block(&mut self, block: Block) {
debug_assert!(self.is_block_inserted(block), "block not in the layout");
debug_assert!(self.first_inst(block).is_none(), "block must be empty.");
// Clear the `block` node and extract links.
let prev;
let next;
{
let n = &mut self.blocks[block];
prev = n.prev;
next = n.next;
n.prev = None.into();
n.next = None.into();
}
// Fix up links to `block`.
match prev.expand() {
None => self.first_block = next.expand(),
Some(p) => self.blocks[p].next = next,
}
match next.expand() {
None => self.last_block = prev.expand(),
Some(n) => self.blocks[n].prev = prev,
}
}
/// Return an iterator over all blocks in layout order.
pub fn blocks(&self) -> Blocks {
Blocks {
layout: self,
next: self.first_block,
}
}
/// Get the function's entry block.
/// This is simply the first block in the layout order.
pub fn entry_block(&self) -> Option<Block> {
self.first_block
}
/// Get the last block in the layout.
pub fn last_block(&self) -> Option<Block> {
self.last_block
}
/// Get the block preceding `block` in the layout order.
pub fn prev_block(&self, block: Block) -> Option<Block> {
self.blocks[block].prev.expand()
}
/// Get the block following `block` in the layout order.
pub fn next_block(&self, block: Block) -> Option<Block> {
self.blocks[block].next.expand()
}
/// Mark a block as "cold".
///
/// This will try to move it out of the ordinary path of execution
/// when lowered to machine code.
pub fn set_cold(&mut self, block: Block) {
self.blocks[block].cold = true;
}
/// Is the given block cold?
pub fn is_cold(&self, block: Block) -> bool {
self.blocks[block].cold
}
}
/// A single node in the linked-list of blocks.
// Whenever you add new fields here, don't forget to update the custom serializer for `Layout` too.
#[derive(Clone, Debug, Default, PartialEq, Hash)]
struct BlockNode {
prev: PackedOption<Block>,
next: PackedOption<Block>,
first_inst: PackedOption<Inst>,
last_inst: PackedOption<Inst>,
seq: SequenceNumber,
cold: bool,
}
/// Iterate over blocks in layout order. See [crate::ir::layout::Layout::blocks].
pub struct Blocks<'f> {
layout: &'f Layout,
next: Option<Block>,
}
impl<'f> Iterator for Blocks<'f> {
type Item = Block;
fn next(&mut self) -> Option<Block> {
match self.next {
Some(block) => {
self.next = self.layout.next_block(block);
Some(block)
}
None => None,
}
}
}
/// Use a layout reference in a for loop.
impl<'f> IntoIterator for &'f Layout {
type Item = Block;
type IntoIter = Blocks<'f>;
fn into_iter(self) -> Blocks<'f> {
self.blocks()
}
}
/// Methods for arranging instructions.
///
/// An instruction starts out as *not inserted* in the layout. An instruction can be inserted into
/// a block at a given position.
impl Layout {
/// Get the block containing `inst`, or `None` if `inst` is not inserted in the layout.
pub fn inst_block(&self, inst: Inst) -> Option<Block> {
self.insts[inst].block.into()
}
/// Get the block containing the program point `pp`. Panic if `pp` is not in the layout.
pub fn pp_block<PP>(&self, pp: PP) -> Block
where
PP: Into<ExpandedProgramPoint>,
{
match pp.into() {
ExpandedProgramPoint::Block(block) => block,
ExpandedProgramPoint::Inst(inst) => {
self.inst_block(inst).expect("Program point not in layout")
}
}
}
/// Append `inst` to the end of `block`.
pub fn append_inst(&mut self, inst: Inst, block: Block) {
debug_assert_eq!(self.inst_block(inst), None);
debug_assert!(
self.is_block_inserted(block),
"Cannot append instructions to block not in layout"
);
{
let block_node = &mut self.blocks[block];
{
let inst_node = &mut self.insts[inst];
inst_node.block = block.into();
inst_node.prev = block_node.last_inst;
debug_assert!(inst_node.next.is_none());
}
if block_node.first_inst.is_none() {
block_node.first_inst = inst.into();
} else {
self.insts[block_node.last_inst.unwrap()].next = inst.into();
}
block_node.last_inst = inst.into();
}
self.assign_inst_seq(inst);
}
/// Fetch a block's first instruction.
pub fn first_inst(&self, block: Block) -> Option<Inst> {
self.blocks[block].first_inst.into()
}
/// Fetch a block's last instruction.
pub fn last_inst(&self, block: Block) -> Option<Inst> {
self.blocks[block].last_inst.into()
}
/// Fetch the instruction following `inst`.
pub fn next_inst(&self, inst: Inst) -> Option<Inst> {
self.insts[inst].next.expand()
}
/// Fetch the instruction preceding `inst`.
pub fn prev_inst(&self, inst: Inst) -> Option<Inst> {
self.insts[inst].prev.expand()
}
/// Fetch the first instruction in a block's terminal branch group.
pub fn canonical_branch_inst(&self, dfg: &DataFlowGraph, block: Block) -> Option<Inst> {
// Basic blocks permit at most two terminal branch instructions.
// If two, the former is conditional and the latter is unconditional.
let last = self.last_inst(block)?;
if let Some(prev) = self.prev_inst(last) {
if dfg[prev].opcode().is_branch() {
return Some(prev);
}
}
Some(last)
}
/// Insert `inst` before the instruction `before` in the same block.
pub fn insert_inst(&mut self, inst: Inst, before: Inst) {
debug_assert_eq!(self.inst_block(inst), None);
let block = self
.inst_block(before)
.expect("Instruction before insertion point not in the layout");
let after = self.insts[before].prev;
{
let inst_node = &mut self.insts[inst];
inst_node.block = block.into();
inst_node.next = before.into();
inst_node.prev = after;
}
self.insts[before].prev = inst.into();
match after.expand() {
None => self.blocks[block].first_inst = inst.into(),
Some(a) => self.insts[a].next = inst.into(),
}
self.assign_inst_seq(inst);
}
/// Remove `inst` from the layout.
pub fn remove_inst(&mut self, inst: Inst) {
let block = self.inst_block(inst).expect("Instruction already removed.");
// Clear the `inst` node and extract links.
let prev;
let next;
{
let n = &mut self.insts[inst];
prev = n.prev;
next = n.next;
n.block = None.into();
n.prev = None.into();
n.next = None.into();
}
// Fix up links to `inst`.
match prev.expand() {
None => self.blocks[block].first_inst = next,
Some(p) => self.insts[p].next = next,
}
match next.expand() {
None => self.blocks[block].last_inst = prev,
Some(n) => self.insts[n].prev = prev,
}
}
/// Iterate over the instructions in `block` in layout order.
pub fn block_insts(&self, block: Block) -> Insts {
Insts {
layout: self,
head: self.blocks[block].first_inst.into(),
tail: self.blocks[block].last_inst.into(),
}
}
/// Iterate over a limited set of instruction which are likely the branches of `block` in layout
/// order. Any instruction not visited by this iterator is not a branch, but an instruction visited by this may not be a branch.
pub fn block_likely_branches(&self, block: Block) -> Insts {
// Note: Checking whether an instruction is a branch or not while walking backward might add
// extra overhead. However, we know that the number of branches is limited to 2 at the end of
// each block, and therefore we can just iterate over the last 2 instructions.
let mut iter = self.block_insts(block);
let head = iter.head;
let tail = iter.tail;
iter.next_back();
let head = iter.next_back().or(head);
Insts {
layout: self,
head,
tail,
}
}
/// Split the block containing `before` in two.
///
/// Insert `new_block` after the old block and move `before` and the following instructions to
/// `new_block`:
///
/// ```text
/// old_block:
/// i1
/// i2
/// i3 << before
/// i4
/// ```
/// becomes:
///
/// ```text
/// old_block:
/// i1
/// i2
/// new_block:
/// i3 << before
/// i4
/// ```
pub fn split_block(&mut self, new_block: Block, before: Inst) {
let old_block = self
.inst_block(before)
.expect("The `before` instruction must be in the layout");
debug_assert!(!self.is_block_inserted(new_block));
// Insert new_block after old_block.
let next_block = self.blocks[old_block].next;
let last_inst = self.blocks[old_block].last_inst;
{
let node = &mut self.blocks[new_block];
node.prev = old_block.into();
node.next = next_block;
node.first_inst = before.into();
node.last_inst = last_inst;
}
self.blocks[old_block].next = new_block.into();
// Fix backwards link.
if Some(old_block) == self.last_block {
self.last_block = Some(new_block);
} else {
self.blocks[next_block.unwrap()].prev = new_block.into();
}
// Disconnect the instruction links.
let prev_inst = self.insts[before].prev;
self.insts[before].prev = None.into();
self.blocks[old_block].last_inst = prev_inst;
match prev_inst.expand() {
None => self.blocks[old_block].first_inst = None.into(),
Some(pi) => self.insts[pi].next = None.into(),
}
// Fix the instruction -> block pointers.
let mut opt_i = Some(before);
while let Some(i) = opt_i {
debug_assert_eq!(self.insts[i].block.expand(), Some(old_block));
self.insts[i].block = new_block.into();
opt_i = self.insts[i].next.into();
}
self.assign_block_seq(new_block);
}
514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046
fn block_integrity(
&self,
block: Block,
inst: Inst,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
let is_terminator = self.func.dfg[inst].opcode().is_terminator();
let is_last_inst = self.func.layout.last_inst(block) == Some(inst);
if is_terminator && !is_last_inst {
// Terminating instructions only occur at the end of blocks.
return errors.fatal((
inst,
self.context(inst),
format!(
"a terminator instruction was encountered before the end of {}",
block
),
));
}
if is_last_inst && !is_terminator {
return errors.fatal((block, "block does not end in a terminator instruction"));
}
// Instructions belong to the correct block.
let inst_block = self.func.layout.inst_block(inst);
if inst_block != Some(block) {
return errors.fatal((
inst,
self.context(inst),
format!("should belong to {} not {:?}", block, inst_block),
));
}
// Parameters belong to the correct block.
for &arg in self.func.dfg.block_params(block) {
match self.func.dfg.value_def(arg) {
ValueDef::Param(arg_block, _) => {
if block != arg_block {
return errors.fatal((arg, format!("does not belong to {}", block)));
}
}
_ => {
return errors.fatal((arg, "expected an argument, found a result"));
}
}
}
Ok(())
}
fn instruction_integrity(
&self,
inst: Inst,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
let inst_data = &self.func.dfg[inst];
let dfg = &self.func.dfg;
// The instruction format matches the opcode
if inst_data.opcode().format() != InstructionFormat::from(inst_data) {
return errors.fatal((
inst,
self.context(inst),
"instruction opcode doesn't match instruction format",
));
}
let num_fixed_results = inst_data.opcode().constraints().num_fixed_results();
// var_results is 0 if we aren't a call instruction
let var_results = dfg
.call_signature(inst)
.map_or(0, |sig| dfg.signatures[sig].returns.len());
let total_results = num_fixed_results + var_results;
// All result values for multi-valued instructions are created
let got_results = dfg.inst_results(inst).len();
if got_results != total_results {
return errors.fatal((
inst,
self.context(inst),
format!(
"expected {} result values, found {}",
total_results, got_results,
),
));
}
self.verify_entity_references(inst, errors)
}
fn verify_entity_references(
&self,
inst: Inst,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
use crate::ir::instructions::InstructionData::*;
for &arg in self.func.dfg.inst_args(inst) {
self.verify_inst_arg(inst, arg, errors)?;
// All used values must be attached to something.
let original = self.func.dfg.resolve_aliases(arg);
if !self.func.dfg.value_is_attached(original) {
errors.report((
inst,
self.context(inst),
format!("argument {} -> {} is not attached", arg, original),
));
}
}
for &res in self.func.dfg.inst_results(inst) {
self.verify_inst_result(inst, res, errors)?;
}
match self.func.dfg[inst] {
MultiAry { ref args, .. } => {
self.verify_value_list(inst, args, errors)?;
}
Jump {
destination,
ref args,
..
}
| Branch {
destination,
ref args,
..
} => {
self.verify_block(inst, destination, errors)?;
self.verify_value_list(inst, args, errors)?;
}
BranchTable {
table, destination, ..
} => {
self.verify_block(inst, destination, errors)?;
self.verify_jump_table(inst, table, errors)?;
}
Call {
func_ref, ref args, ..
} => {
self.verify_func_ref(inst, func_ref, errors)?;
self.verify_value_list(inst, args, errors)?;
}
CallIndirect {
sig_ref, ref args, ..
} => {
self.verify_sig_ref(inst, sig_ref, errors)?;
self.verify_value_list(inst, args, errors)?;
}
FuncAddr { func_ref, .. } => {
self.verify_func_ref(inst, func_ref, errors)?;
}
StackLoad { stack_slot, .. } | StackStore { stack_slot, .. } => {
self.verify_stack_slot(inst, stack_slot, errors)?;
}
DynamicStackLoad {
dynamic_stack_slot, ..
}
| DynamicStackStore {
dynamic_stack_slot, ..
} => {
self.verify_dynamic_stack_slot(inst, dynamic_stack_slot, errors)?;
}
UnaryGlobalValue { global_value, .. } => {
self.verify_global_value(inst, global_value, errors)?;
}
HeapLoad { heap_imm, .. } | HeapStore { heap_imm, .. } => {
let HeapImmData { heap, .. } = self.func.dfg.heap_imms[heap_imm];
self.verify_heap(inst, heap, errors)?;
}
HeapAddr { heap, .. } => {
self.verify_heap(inst, heap, errors)?;
}
TableAddr { table, .. } => {
self.verify_table(inst, table, errors)?;
}
NullAry {
opcode: Opcode::GetPinnedReg,
}
| Unary {
opcode: Opcode::SetPinnedReg,
..
} => {
if let Some(isa) = &self.isa {
if !isa.flags().enable_pinned_reg() {
return errors.fatal((
inst,
self.context(inst),
"GetPinnedReg/SetPinnedReg cannot be used without enable_pinned_reg",
));
}
} else {
return errors.fatal((
inst,
self.context(inst),
"GetPinnedReg/SetPinnedReg need an ISA!",
));
}
}
NullAry {
opcode: Opcode::GetFramePointer | Opcode::GetReturnAddress,
} => {
if let Some(isa) = &self.isa {
// Backends may already rely on this check implicitly, so do
// not relax it without verifying that it is safe to do so.
if !isa.flags().preserve_frame_pointers() {
return errors.fatal((
inst,
self.context(inst),
"`get_frame_pointer`/`get_return_address` cannot be used without \
enabling `preserve_frame_pointers`",
));
}
} else {
return errors.fatal((
inst,
self.context(inst),
"`get_frame_pointer`/`get_return_address` require an ISA!",
));
}
}
LoadNoOffset {
opcode: Opcode::Bitcast,
flags,
arg,
} => {
self.verify_bitcast(inst, flags, arg, errors)?;
}
UnaryConst {
opcode: Opcode::Vconst,
constant_handle,
..
} => {
self.verify_constant_size(inst, constant_handle, errors)?;
}
// Exhaustive list so we can't forget to add new formats
AtomicCas { .. }
| AtomicRmw { .. }
| LoadNoOffset { .. }
| StoreNoOffset { .. }
| Unary { .. }
| UnaryConst { .. }
| UnaryImm { .. }
| UnaryIeee32 { .. }
| UnaryIeee64 { .. }
| Binary { .. }
| BinaryImm8 { .. }
| BinaryImm64 { .. }
| Ternary { .. }
| TernaryImm8 { .. }
| Shuffle { .. }
| IntAddTrap { .. }
| IntCompare { .. }
| IntCompareImm { .. }
| FloatCompare { .. }
| Load { .. }
| Store { .. }
| Trap { .. }
| CondTrap { .. }
| NullAry { .. } => {}
}
Ok(())
}
fn verify_block(
&self,
loc: impl Into<AnyEntity>,
e: Block,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
if !self.func.dfg.block_is_valid(e) || !self.func.layout.is_block_inserted(e) {
return errors.fatal((loc, format!("invalid block reference {}", e)));
}
if let Some(entry_block) = self.func.layout.entry_block() {
if e == entry_block {
return errors.fatal((loc, format!("invalid reference to entry block {}", e)));
}
}
Ok(())
}
fn verify_sig_ref(
&self,
inst: Inst,
s: SigRef,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
if !self.func.dfg.signatures.is_valid(s) {
errors.fatal((
inst,
self.context(inst),
format!("invalid signature reference {}", s),
))
} else {
Ok(())
}
}
fn verify_func_ref(
&self,
inst: Inst,
f: FuncRef,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
if !self.func.dfg.ext_funcs.is_valid(f) {
errors.nonfatal((
inst,
self.context(inst),
format!("invalid function reference {}", f),
))
} else {
Ok(())
}
}
fn verify_stack_slot(
&self,
inst: Inst,
ss: StackSlot,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
if !self.func.sized_stack_slots.is_valid(ss) {
errors.nonfatal((
inst,
self.context(inst),
format!("invalid stack slot {}", ss),
))
} else {
Ok(())
}
}
fn verify_dynamic_stack_slot(
&self,
inst: Inst,
ss: DynamicStackSlot,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
if !self.func.dynamic_stack_slots.is_valid(ss) {
errors.nonfatal((
inst,
self.context(inst),
format!("invalid dynamic stack slot {}", ss),
))
} else {
Ok(())
}
}
fn verify_global_value(
&self,
inst: Inst,
gv: GlobalValue,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
if !self.func.global_values.is_valid(gv) {
errors.nonfatal((
inst,
self.context(inst),
format!("invalid global value {}", gv),
))
} else {
Ok(())
}
}
fn verify_heap(
&self,
inst: Inst,
heap: ir::Heap,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
if !self.func.heaps.is_valid(heap) {
errors.nonfatal((inst, self.context(inst), format!("invalid heap {}", heap)))
} else {
Ok(())
}
}
fn verify_table(
&self,
inst: Inst,
table: ir::Table,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
if !self.func.tables.is_valid(table) {
errors.nonfatal((inst, self.context(inst), format!("invalid table {}", table)))
} else {
Ok(())
}
}
fn verify_value_list(
&self,
inst: Inst,
l: &ValueList,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
if !l.is_valid(&self.func.dfg.value_lists) {
errors.nonfatal((
inst,
self.context(inst),
format!("invalid value list reference {:?}", l),
))
} else {
Ok(())
}
}
fn verify_jump_table(
&self,
inst: Inst,
j: JumpTable,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
if !self.func.jump_tables.is_valid(j) {
errors.nonfatal((
inst,
self.context(inst),
format!("invalid jump table reference {}", j),
))
} else {
Ok(())
}
}
fn verify_value(
&self,
loc_inst: Inst,
v: Value,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
let dfg = &self.func.dfg;
if !dfg.value_is_valid(v) {
errors.nonfatal((
loc_inst,
self.context(loc_inst),
format!("invalid value reference {}", v),
))
} else {
Ok(())
}
}
fn verify_inst_arg(
&self,
loc_inst: Inst,
v: Value,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
self.verify_value(loc_inst, v, errors)?;
let dfg = &self.func.dfg;
let loc_block = self.func.layout.pp_block(loc_inst);
let is_reachable = self.expected_domtree.is_reachable(loc_block);
// SSA form
match dfg.value_def(v) {
ValueDef::Result(def_inst, _) => {
// Value is defined by an instruction that exists.
if !dfg.inst_is_valid(def_inst) {
return errors.fatal((
loc_inst,
self.context(loc_inst),
format!("{} is defined by invalid instruction {}", v, def_inst),
));
}
// Defining instruction is inserted in a block.
if self.func.layout.inst_block(def_inst) == None {
return errors.fatal((
loc_inst,
self.context(loc_inst),
format!("{} is defined by {} which has no block", v, def_inst),
));
}
// Defining instruction dominates the instruction that uses the value.
if is_reachable {
if !self
.expected_domtree
.dominates(def_inst, loc_inst, &self.func.layout)
{
return errors.fatal((
loc_inst,
self.context(loc_inst),
format!("uses value {} from non-dominating {}", v, def_inst),
));
}
if def_inst == loc_inst {
return errors.fatal((
loc_inst,
self.context(loc_inst),
format!("uses value {} from itself", v),
));
}
}
}
ValueDef::Param(block, _) => {
// Value is defined by an existing block.
if !dfg.block_is_valid(block) {
return errors.fatal((
loc_inst,
self.context(loc_inst),
format!("{} is defined by invalid block {}", v, block),
));
}
// Defining block is inserted in the layout
if !self.func.layout.is_block_inserted(block) {
return errors.fatal((
loc_inst,
self.context(loc_inst),
format!("{} is defined by {} which is not in the layout", v, block),
));
}
// The defining block dominates the instruction using this value.
if is_reachable
&& !self
.expected_domtree
.dominates(block, loc_inst, &self.func.layout)
{
return errors.fatal((
loc_inst,
self.context(loc_inst),
format!("uses value arg from non-dominating {}", block),
));
}
}
}
Ok(())
}
sourcepub fn pp_block<PP>(&self, pp: PP) -> Blockwhere
PP: Into<ExpandedProgramPoint>,
pub fn pp_block<PP>(&self, pp: PP) -> Blockwhere
PP: Into<ExpandedProgramPoint>,
Get the block containing the program point pp
. Panic if pp
is not in the layout.
Examples found in repository?
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608
pub fn rpo_cmp<A, B>(&self, a: A, b: B, layout: &Layout) -> Ordering
where
A: Into<ExpandedProgramPoint>,
B: Into<ExpandedProgramPoint>,
{
let a = a.into();
let b = b.into();
self.rpo_cmp_block(layout.pp_block(a), layout.pp_block(b))
.then(layout.cmp(a, b))
}
/// Returns `true` if `a` dominates `b`.
///
/// This means that every control-flow path from the function entry to `b` must go through `a`.
///
/// Dominance is ill defined for unreachable blocks. This function can always determine
/// dominance for instructions in the same block, but otherwise returns `false` if either block
/// is unreachable.
///
/// An instruction is considered to dominate itself.
pub fn dominates<A, B>(&self, a: A, b: B, layout: &Layout) -> bool
where
A: Into<ExpandedProgramPoint>,
B: Into<ExpandedProgramPoint>,
{
let a = a.into();
let b = b.into();
match a {
ExpandedProgramPoint::Block(block_a) => {
a == b || self.last_dominator(block_a, b, layout).is_some()
}
ExpandedProgramPoint::Inst(inst_a) => {
let block_a = layout
.inst_block(inst_a)
.expect("Instruction not in layout.");
match self.last_dominator(block_a, b, layout) {
Some(last) => layout.cmp(inst_a, last) != Ordering::Greater,
None => false,
}
}
}
}
/// Find the last instruction in `a` that dominates `b`.
/// If no instructions in `a` dominate `b`, return `None`.
pub fn last_dominator<B>(&self, a: Block, b: B, layout: &Layout) -> Option<Inst>
where
B: Into<ExpandedProgramPoint>,
{
let (mut block_b, mut inst_b) = match b.into() {
ExpandedProgramPoint::Block(block) => (block, None),
ExpandedProgramPoint::Inst(inst) => (
layout.inst_block(inst).expect("Instruction not in layout."),
Some(inst),
),
};
let rpo_a = self.nodes[a].rpo_number;
// Run a finger up the dominator tree from b until we see a.
// Do nothing if b is unreachable.
while rpo_a < self.nodes[block_b].rpo_number {
let idom = match self.idom(block_b) {
Some(idom) => idom,
None => return None, // a is unreachable, so we climbed past the entry
};
block_b = layout.inst_block(idom).expect("Dominator got removed.");
inst_b = Some(idom);
}
if a == block_b {
inst_b
} else {
None
}
}
/// Compute the common dominator of two basic blocks.
///
/// Both basic blocks are assumed to be reachable.
pub fn common_dominator(
&self,
mut a: BlockPredecessor,
mut b: BlockPredecessor,
layout: &Layout,
) -> BlockPredecessor {
loop {
match self.rpo_cmp_block(a.block, b.block) {
Ordering::Less => {
// `a` comes before `b` in the RPO. Move `b` up.
let idom = self.nodes[b.block].idom.expect("Unreachable basic block?");
b = BlockPredecessor::new(
layout.inst_block(idom).expect("Dangling idom instruction"),
idom,
);
}
Ordering::Greater => {
// `b` comes before `a` in the RPO. Move `a` up.
let idom = self.nodes[a.block].idom.expect("Unreachable basic block?");
a = BlockPredecessor::new(
layout.inst_block(idom).expect("Dangling idom instruction"),
idom,
);
}
Ordering::Equal => break,
}
}
debug_assert_eq!(
a.block, b.block,
"Unreachable block passed to common_dominator?"
);
// We're in the same block. The common dominator is the earlier instruction.
if layout.cmp(a.inst, b.inst) == Ordering::Less {
a
} else {
b
}
}
}
impl DominatorTree {
/// Allocate a new blank dominator tree. Use `compute` to compute the dominator tree for a
/// function.
pub fn new() -> Self {
Self {
nodes: SecondaryMap::new(),
postorder: Vec::new(),
stack: Vec::new(),
valid: false,
}
}
/// Allocate and compute a dominator tree.
pub fn with_function(func: &Function, cfg: &ControlFlowGraph) -> Self {
let block_capacity = func.layout.block_capacity();
let mut domtree = Self {
nodes: SecondaryMap::with_capacity(block_capacity),
postorder: Vec::with_capacity(block_capacity),
stack: Vec::new(),
valid: false,
};
domtree.compute(func, cfg);
domtree
}
/// Reset and compute a CFG post-order and dominator tree.
pub fn compute(&mut self, func: &Function, cfg: &ControlFlowGraph) {
let _tt = timing::domtree();
debug_assert!(cfg.is_valid());
self.compute_postorder(func);
self.compute_domtree(func, cfg);
self.valid = true;
}
/// Clear the data structures used to represent the dominator tree. This will leave the tree in
/// a state where `is_valid()` returns false.
pub fn clear(&mut self) {
self.nodes.clear();
self.postorder.clear();
debug_assert!(self.stack.is_empty());
self.valid = false;
}
/// Check if the dominator tree is in a valid state.
///
/// Note that this doesn't perform any kind of validity checks. It simply checks if the
/// `compute()` method has been called since the last `clear()`. It does not check that the
/// dominator tree is consistent with the CFG.
pub fn is_valid(&self) -> bool {
self.valid
}
/// Reset all internal data structures and compute a post-order of the control flow graph.
///
/// This leaves `rpo_number == 1` for all reachable blocks, 0 for unreachable ones.
fn compute_postorder(&mut self, func: &Function) {
self.clear();
self.nodes.resize(func.dfg.num_blocks());
// This algorithm is a depth first traversal (DFT) of the control flow graph, computing a
// post-order of the blocks that are reachable form the entry block. A DFT post-order is not
// unique. The specific order we get is controlled by two factors:
//
// 1. The order each node's children are visited, and
// 2. The method used for pruning graph edges to get a tree.
//
// There are two ways of viewing the CFG as a graph:
//
// 1. Each block is a node, with outgoing edges for all the branches in the block.
// 2. Each basic block is a node, with outgoing edges for the single branch at the end of
// the BB. (A block is a linear sequence of basic blocks).
//
// The first graph is a contraction of the second one. We want to compute a block post-order
// that is compatible both graph interpretations. That is, if you compute a BB post-order
// and then remove those BBs that do not correspond to block headers, you get a post-order of
// the block graph.
//
// Node child order:
//
// In the BB graph, we always go down the fall-through path first and follow the branch
// destination second.
//
// In the block graph, this is equivalent to visiting block successors in a bottom-up
// order, starting from the destination of the block's terminating jump, ending at the
// destination of the first branch in the block.
//
// Edge pruning:
//
// In the BB graph, we keep an edge to a block the first time we visit the *source* side
// of the edge. Any subsequent edges to the same block are pruned.
//
// The equivalent tree is reached in the block graph by keeping the first edge to a block
// in a top-down traversal of the successors. (And then visiting edges in a bottom-up
// order).
//
// This pruning method makes it possible to compute the DFT without storing lots of
// information about the progress through a block.
// During this algorithm only, use `rpo_number` to hold the following state:
//
// 0: block has not yet been reached in the pre-order.
// SEEN: block has been pushed on the stack but successors not yet pushed.
// DONE: Successors pushed.
match func.layout.entry_block() {
Some(block) => {
self.stack.push(block);
self.nodes[block].rpo_number = SEEN;
}
None => return,
}
while let Some(block) = self.stack.pop() {
match self.nodes[block].rpo_number {
SEEN => {
// This is the first time we pop the block, so we need to scan its successors and
// then revisit it.
self.nodes[block].rpo_number = DONE;
self.stack.push(block);
self.push_successors(func, block);
}
DONE => {
// This is the second time we pop the block, so all successors have been
// processed.
self.postorder.push(block);
}
_ => unreachable!(),
}
}
}
/// Push `block` successors onto `self.stack`, filtering out those that have already been seen.
///
/// The successors are pushed in program order which is important to get a split-invariant
/// post-order. Split-invariant means that if a block is split in two, we get the same
/// post-order except for the insertion of the new block header at the split point.
fn push_successors(&mut self, func: &Function, block: Block) {
for inst in func.layout.block_likely_branches(block) {
match func.dfg.analyze_branch(inst) {
BranchInfo::SingleDest(succ, _) => self.push_if_unseen(succ),
BranchInfo::Table(jt, dest) => {
for succ in func.jump_tables[jt].iter() {
self.push_if_unseen(*succ);
}
if let Some(dest) = dest {
self.push_if_unseen(dest);
}
}
BranchInfo::NotABranch => {}
}
}
}
/// Push `block` onto `self.stack` if it has not already been seen.
fn push_if_unseen(&mut self, block: Block) {
if self.nodes[block].rpo_number == 0 {
self.nodes[block].rpo_number = SEEN;
self.stack.push(block);
}
}
/// Build a dominator tree from a control flow graph using Keith D. Cooper's
/// "Simple, Fast Dominator Algorithm."
fn compute_domtree(&mut self, func: &Function, cfg: &ControlFlowGraph) {
// During this algorithm, `rpo_number` has the following values:
//
// 0: block is not reachable.
// 1: block is reachable, but has not yet been visited during the first pass. This is set by
// `compute_postorder`.
// 2+: block is reachable and has an assigned RPO number.
// We'll be iterating over a reverse post-order of the CFG, skipping the entry block.
let (entry_block, postorder) = match self.postorder.as_slice().split_last() {
Some((&eb, rest)) => (eb, rest),
None => return,
};
debug_assert_eq!(Some(entry_block), func.layout.entry_block());
// Do a first pass where we assign RPO numbers to all reachable nodes.
self.nodes[entry_block].rpo_number = 2 * STRIDE;
for (rpo_idx, &block) in postorder.iter().rev().enumerate() {
// Update the current node and give it an RPO number.
// The entry block got 2, the rest start at 3 by multiples of STRIDE to leave
// room for future dominator tree modifications.
//
// Since `compute_idom` will only look at nodes with an assigned RPO number, the
// function will never see an uninitialized predecessor.
//
// Due to the nature of the post-order traversal, every node we visit will have at
// least one predecessor that has previously been visited during this RPO.
self.nodes[block] = DomNode {
idom: self.compute_idom(block, cfg, &func.layout).into(),
rpo_number: (rpo_idx as u32 + 3) * STRIDE,
}
}
// Now that we have RPO numbers for everything and initial immediate dominator estimates,
// iterate until convergence.
//
// If the function is free of irreducible control flow, this will exit after one iteration.
let mut changed = true;
while changed {
changed = false;
for &block in postorder.iter().rev() {
let idom = self.compute_idom(block, cfg, &func.layout).into();
if self.nodes[block].idom != idom {
self.nodes[block].idom = idom;
changed = true;
}
}
}
}
// Compute the immediate dominator for `block` using the current `idom` states for the reachable
// nodes.
fn compute_idom(&self, block: Block, cfg: &ControlFlowGraph, layout: &Layout) -> Inst {
// Get an iterator with just the reachable, already visited predecessors to `block`.
// Note that during the first pass, `rpo_number` is 1 for reachable blocks that haven't
// been visited yet, 0 for unreachable blocks.
let mut reachable_preds = cfg
.pred_iter(block)
.filter(|&BlockPredecessor { block: pred, .. }| self.nodes[pred].rpo_number > 1);
// The RPO must visit at least one predecessor before this node.
let mut idom = reachable_preds
.next()
.expect("block node must have one reachable predecessor");
for pred in reachable_preds {
idom = self.common_dominator(idom, pred, layout);
}
idom.inst
}
}
/// Optional pre-order information that can be computed for a dominator tree.
///
/// This data structure is computed from a `DominatorTree` and provides:
///
/// - A forward traversable dominator tree through the `children()` iterator.
/// - An ordering of blocks according to a dominator tree pre-order.
/// - Constant time dominance checks at the block granularity.
///
/// The information in this auxiliary data structure is not easy to update when the control flow
/// graph changes, which is why it is kept separate.
pub struct DominatorTreePreorder {
nodes: SecondaryMap<Block, ExtraNode>,
// Scratch memory used by `compute_postorder()`.
stack: Vec<Block>,
}
#[derive(Default, Clone)]
struct ExtraNode {
/// First child node in the domtree.
child: PackedOption<Block>,
/// Next sibling node in the domtree. This linked list is ordered according to the CFG RPO.
sibling: PackedOption<Block>,
/// Sequence number for this node in a pre-order traversal of the dominator tree.
/// Unreachable blocks have number 0, the entry block is 1.
pre_number: u32,
/// Maximum `pre_number` for the sub-tree of the dominator tree that is rooted at this node.
/// This is always >= `pre_number`.
pre_max: u32,
}
/// Creating and computing the dominator tree pre-order.
impl DominatorTreePreorder {
/// Create a new blank `DominatorTreePreorder`.
pub fn new() -> Self {
Self {
nodes: SecondaryMap::new(),
stack: Vec::new(),
}
}
/// Recompute this data structure to match `domtree`.
pub fn compute(&mut self, domtree: &DominatorTree, layout: &Layout) {
self.nodes.clear();
debug_assert_eq!(self.stack.len(), 0);
// Step 1: Populate the child and sibling links.
//
// By following the CFG post-order and pushing to the front of the lists, we make sure that
// sibling lists are ordered according to the CFG reverse post-order.
for &block in domtree.cfg_postorder() {
if let Some(idom_inst) = domtree.idom(block) {
let idom = layout.pp_block(idom_inst);
let sib = mem::replace(&mut self.nodes[idom].child, block.into());
self.nodes[block].sibling = sib;
} else {
// The only block without an immediate dominator is the entry.
self.stack.push(block);
}
}
// Step 2. Assign pre-order numbers from a DFS of the dominator tree.
debug_assert!(self.stack.len() <= 1);
let mut n = 0;
while let Some(block) = self.stack.pop() {
n += 1;
let node = &mut self.nodes[block];
node.pre_number = n;
node.pre_max = n;
if let Some(n) = node.sibling.expand() {
self.stack.push(n);
}
if let Some(n) = node.child.expand() {
self.stack.push(n);
}
}
// Step 3. Propagate the `pre_max` numbers up the tree.
// The CFG post-order is topologically ordered w.r.t. dominance so a node comes after all
// its dominator tree children.
for &block in domtree.cfg_postorder() {
if let Some(idom_inst) = domtree.idom(block) {
let idom = layout.pp_block(idom_inst);
let pre_max = cmp::max(self.nodes[block].pre_max, self.nodes[idom].pre_max);
self.nodes[idom].pre_max = pre_max;
}
}
}
}
/// An iterator that enumerates the direct children of a block in the dominator tree.
pub struct ChildIter<'a> {
dtpo: &'a DominatorTreePreorder,
next: PackedOption<Block>,
}
impl<'a> Iterator for ChildIter<'a> {
type Item = Block;
fn next(&mut self) -> Option<Block> {
let n = self.next.expand();
if let Some(block) = n {
self.next = self.dtpo.nodes[block].sibling;
}
n
}
}
/// Query interface for the dominator tree pre-order.
impl DominatorTreePreorder {
/// Get an iterator over the direct children of `block` in the dominator tree.
///
/// These are the block's whose immediate dominator is an instruction in `block`, ordered according
/// to the CFG reverse post-order.
pub fn children(&self, block: Block) -> ChildIter {
ChildIter {
dtpo: self,
next: self.nodes[block].child,
}
}
/// Fast, constant time dominance check with block granularity.
///
/// This computes the same result as `domtree.dominates(a, b)`, but in guaranteed fast constant
/// time. This is less general than the `DominatorTree` method because it only works with block
/// program points.
///
/// A block is considered to dominate itself.
pub fn dominates(&self, a: Block, b: Block) -> bool {
let na = &self.nodes[a];
let nb = &self.nodes[b];
na.pre_number <= nb.pre_number && na.pre_max >= nb.pre_max
}
/// Compare two blocks according to the dominator pre-order.
pub fn pre_cmp_block(&self, a: Block, b: Block) -> Ordering {
self.nodes[a].pre_number.cmp(&self.nodes[b].pre_number)
}
/// Compare two program points according to the dominator tree pre-order.
///
/// This ordering of program points have the property that given a program point, pp, all the
/// program points dominated by pp follow immediately and contiguously after pp in the order.
pub fn pre_cmp<A, B>(&self, a: A, b: B, layout: &Layout) -> Ordering
where
A: Into<ExpandedProgramPoint>,
B: Into<ExpandedProgramPoint>,
{
let a = a.into();
let b = b.into();
self.pre_cmp_block(layout.pp_block(a), layout.pp_block(b))
.then(layout.cmp(a, b))
}
More examples
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
fn expand_cond_trap(
inst: ir::Inst,
func: &mut ir::Function,
cfg: &mut ControlFlowGraph,
opcode: ir::Opcode,
arg: ir::Value,
code: ir::TrapCode,
) {
trace!(
"expanding conditional trap: {:?}: {}",
inst,
func.dfg.display_inst(inst)
);
// Parse the instruction.
let trapz = match opcode {
ir::Opcode::Trapz => true,
ir::Opcode::Trapnz | ir::Opcode::ResumableTrapnz => false,
_ => panic!("Expected cond trap: {}", func.dfg.display_inst(inst)),
};
// Split the block after `inst`:
//
// trapnz arg
// ..
//
// Becomes:
//
// brz arg, new_block_resume
// jump new_block_trap
//
// new_block_trap:
// trap
//
// new_block_resume:
// ..
let old_block = func.layout.pp_block(inst);
let new_block_trap = func.dfg.make_block();
let new_block_resume = func.dfg.make_block();
// Trapping is a rare event, mark the trapping block as cold.
func.layout.set_cold(new_block_trap);
// Replace trap instruction by the inverted condition.
if trapz {
func.dfg.replace(inst).brnz(arg, new_block_resume, &[]);
} else {
func.dfg.replace(inst).brz(arg, new_block_resume, &[]);
}
// Add jump instruction after the inverted branch.
let mut pos = FuncCursor::new(func).after_inst(inst);
pos.use_srcloc(inst);
pos.ins().jump(new_block_trap, &[]);
// Insert the new label and the unconditional trap terminator.
pos.insert_block(new_block_trap);
match opcode {
ir::Opcode::Trapz | ir::Opcode::Trapnz => {
pos.ins().trap(code);
}
ir::Opcode::ResumableTrapnz => {
pos.ins().resumable_trap(code);
pos.ins().jump(new_block_resume, &[]);
}
_ => unreachable!(),
}
// Insert the new label and resume the execution when the trap fails.
pos.insert_block(new_block_resume);
// Finally update the CFG.
cfg.recompute_block(pos.func, old_block);
cfg.recompute_block(pos.func, new_block_resume);
cfg.recompute_block(pos.func, new_block_trap);
}
962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046
fn verify_inst_arg(
&self,
loc_inst: Inst,
v: Value,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
self.verify_value(loc_inst, v, errors)?;
let dfg = &self.func.dfg;
let loc_block = self.func.layout.pp_block(loc_inst);
let is_reachable = self.expected_domtree.is_reachable(loc_block);
// SSA form
match dfg.value_def(v) {
ValueDef::Result(def_inst, _) => {
// Value is defined by an instruction that exists.
if !dfg.inst_is_valid(def_inst) {
return errors.fatal((
loc_inst,
self.context(loc_inst),
format!("{} is defined by invalid instruction {}", v, def_inst),
));
}
// Defining instruction is inserted in a block.
if self.func.layout.inst_block(def_inst) == None {
return errors.fatal((
loc_inst,
self.context(loc_inst),
format!("{} is defined by {} which has no block", v, def_inst),
));
}
// Defining instruction dominates the instruction that uses the value.
if is_reachable {
if !self
.expected_domtree
.dominates(def_inst, loc_inst, &self.func.layout)
{
return errors.fatal((
loc_inst,
self.context(loc_inst),
format!("uses value {} from non-dominating {}", v, def_inst),
));
}
if def_inst == loc_inst {
return errors.fatal((
loc_inst,
self.context(loc_inst),
format!("uses value {} from itself", v),
));
}
}
}
ValueDef::Param(block, _) => {
// Value is defined by an existing block.
if !dfg.block_is_valid(block) {
return errors.fatal((
loc_inst,
self.context(loc_inst),
format!("{} is defined by invalid block {}", v, block),
));
}
// Defining block is inserted in the layout
if !self.func.layout.is_block_inserted(block) {
return errors.fatal((
loc_inst,
self.context(loc_inst),
format!("{} is defined by {} which is not in the layout", v, block),
));
}
// The defining block dominates the instruction using this value.
if is_reachable
&& !self
.expected_domtree
.dominates(block, loc_inst, &self.func.layout)
{
return errors.fatal((
loc_inst,
self.context(loc_inst),
format!("uses value arg from non-dominating {}", block),
));
}
}
}
Ok(())
}
sourcepub fn append_inst(&mut self, inst: Inst, block: Block)
pub fn append_inst(&mut self, inst: Inst, block: Block)
Append inst
to the end of block
.
Examples found in repository?
More examples
195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
fn add_node(&mut self, node: &Node, args: &[Value], to_block: Block) -> ValueList {
let (instdata, result_ty, arity) = match node {
Node::Pure { op, ty, arity, .. } | Node::Inst { op, ty, arity, .. } => (
op.with_args(args, &mut self.func.dfg.value_lists),
*ty,
*arity,
),
Node::Load { op, ty, .. } => {
(op.with_args(args, &mut self.func.dfg.value_lists), *ty, 1)
}
_ => panic!("Cannot `add_node()` on block param or projection"),
};
let srcloc = match node {
Node::Inst { srcloc, .. } | Node::Load { srcloc, .. } => *srcloc,
_ => RelSourceLoc::default(),
};
let opcode = instdata.opcode();
// Is this instruction either an actual terminator (an
// instruction that must end the block), or at least in the
// group of branches at the end (including conditional
// branches that may be followed by an actual terminator)? We
// call this the "terminator group", and we record the first
// inst in this group (`first_branch` below) so that we do not
// insert instructions needed only by args of later
// instructions in the terminator group in the middle of the
// terminator group.
//
// E.g., for the original sequence
// v1 = op ...
// brnz vCond, block1
// jump block2(v1)
//
// elaboration would naively produce
//
// brnz vCond, block1
// v1 = op ...
// jump block2(v1)
//
// but we use the `first_branch` mechanism below to ensure
// that once we've emitted at least one branch, all other
// elaborated insts have to go before that. So we emit brnz
// first, then as we elaborate the jump, we find we need the
// `op`; we `insert_inst` it *before* the brnz (which is the
// `first_branch`).
let is_terminator_group_inst =
opcode.is_branch() || opcode.is_return() || opcode == Opcode::Trap;
let inst = self.func.dfg.make_inst(instdata);
self.func.srclocs[inst] = srcloc;
if arity == 1 {
self.func.dfg.append_result(inst, result_ty);
} else {
for _ in 0..arity {
self.func.dfg.append_result(inst, crate::ir::types::INVALID);
}
}
if is_terminator_group_inst {
self.func.layout.append_inst(inst, to_block);
if self.first_branch[to_block].is_none() {
self.first_branch[to_block] = Some(inst).into();
}
} else if let Some(branch) = self.first_branch[to_block].into() {
self.func.layout.insert_inst(inst, branch);
} else {
self.func.layout.append_inst(inst, to_block);
}
self.func.dfg.inst_results_list(inst)
}
sourcepub fn first_inst(&self, block: Block) -> Option<Inst>
pub fn first_inst(&self, block: Block) -> Option<Inst>
Fetch a block’s first instruction.
Examples found in repository?
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436
fn goto_first_insertion_point(&mut self, block: ir::Block) {
if let Some(inst) = self.layout().first_inst(block) {
self.goto_inst(inst);
} else {
self.goto_bottom(block);
}
}
/// Go to the first instruction in `block`.
fn goto_first_inst(&mut self, block: ir::Block) {
let inst = self.layout().first_inst(block).expect("Empty block");
self.goto_inst(inst);
}
/// Go to the last instruction in `block`.
fn goto_last_inst(&mut self, block: ir::Block) {
let inst = self.layout().last_inst(block).expect("Empty block");
self.goto_inst(inst);
}
/// Go to the top of `block` which must be inserted into the layout.
/// At this position, instructions cannot be inserted, but `next_inst()` will move to the first
/// instruction in `block`.
fn goto_top(&mut self, block: ir::Block) {
debug_assert!(self.layout().is_block_inserted(block));
self.set_position(CursorPosition::Before(block));
}
/// Go to the bottom of `block` which must be inserted into the layout.
/// At this position, inserted instructions will be appended to `block`.
fn goto_bottom(&mut self, block: ir::Block) {
debug_assert!(self.layout().is_block_inserted(block));
self.set_position(CursorPosition::After(block));
}
/// Go to the top of the next block in layout order and return it.
///
/// - If the cursor wasn't pointing at anything, go to the top of the first block in the
/// function.
/// - If there are no more blocks, leave the cursor pointing at nothing and return `None`.
///
/// # Examples
///
/// The `next_block()` method is intended for iterating over the blocks in layout order:
///
/// ```
/// # use cranelift_codegen::ir::{Function, Block};
/// # use cranelift_codegen::cursor::{Cursor, FuncCursor};
/// fn edit_func(func: &mut Function) {
/// let mut cursor = FuncCursor::new(func);
/// while let Some(block) = cursor.next_block() {
/// // Edit block.
/// }
/// }
/// ```
fn next_block(&mut self) -> Option<ir::Block> {
let next = if let Some(block) = self.current_block() {
self.layout().next_block(block)
} else {
self.layout().entry_block()
};
self.set_position(match next {
Some(block) => CursorPosition::Before(block),
None => CursorPosition::Nowhere,
});
next
}
/// Go to the bottom of the previous block in layout order and return it.
///
/// - If the cursor wasn't pointing at anything, go to the bottom of the last block in the
/// function.
/// - If there are no more blocks, leave the cursor pointing at nothing and return `None`.
///
/// # Examples
///
/// The `prev_block()` method is intended for iterating over the blocks in backwards layout order:
///
/// ```
/// # use cranelift_codegen::ir::{Function, Block};
/// # use cranelift_codegen::cursor::{Cursor, FuncCursor};
/// fn edit_func(func: &mut Function) {
/// let mut cursor = FuncCursor::new(func);
/// while let Some(block) = cursor.prev_block() {
/// // Edit block.
/// }
/// }
/// ```
fn prev_block(&mut self) -> Option<ir::Block> {
let prev = if let Some(block) = self.current_block() {
self.layout().prev_block(block)
} else {
self.layout().last_block()
};
self.set_position(match prev {
Some(block) => CursorPosition::After(block),
None => CursorPosition::Nowhere,
});
prev
}
/// Move to the next instruction in the same block and return it.
///
/// - If the cursor was positioned before a block, go to the first instruction in that block.
/// - If there are no more instructions in the block, go to the `After(block)` position and return
/// `None`.
/// - If the cursor wasn't pointing anywhere, keep doing that.
///
/// This method will never move the cursor to a different block.
///
/// # Examples
///
/// The `next_inst()` method is intended for iterating over the instructions in a block like
/// this:
///
/// ```
/// # use cranelift_codegen::ir::{Function, Block};
/// # use cranelift_codegen::cursor::{Cursor, FuncCursor};
/// fn edit_block(func: &mut Function, block: Block) {
/// let mut cursor = FuncCursor::new(func).at_top(block);
/// while let Some(inst) = cursor.next_inst() {
/// // Edit instructions...
/// }
/// }
/// ```
/// The loop body can insert and remove instructions via the cursor.
///
/// Iterating over all the instructions in a function looks like this:
///
/// ```
/// # use cranelift_codegen::ir::{Function, Block};
/// # use cranelift_codegen::cursor::{Cursor, FuncCursor};
/// fn edit_func(func: &mut Function) {
/// let mut cursor = FuncCursor::new(func);
/// while let Some(block) = cursor.next_block() {
/// while let Some(inst) = cursor.next_inst() {
/// // Edit instructions...
/// }
/// }
/// }
/// ```
fn next_inst(&mut self) -> Option<ir::Inst> {
use self::CursorPosition::*;
match self.position() {
Nowhere | After(..) => None,
At(inst) => {
if let Some(next) = self.layout().next_inst(inst) {
self.set_position(At(next));
Some(next)
} else {
let pos = After(
self.layout()
.inst_block(inst)
.expect("current instruction removed?"),
);
self.set_position(pos);
None
}
}
Before(block) => {
if let Some(next) = self.layout().first_inst(block) {
self.set_position(At(next));
Some(next)
} else {
self.set_position(After(block));
None
}
}
}
}
More examples
431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454
pub fn remove_block(&mut self, block: Block) {
debug_assert!(self.is_block_inserted(block), "block not in the layout");
debug_assert!(self.first_inst(block).is_none(), "block must be empty.");
// Clear the `block` node and extract links.
let prev;
let next;
{
let n = &mut self.blocks[block];
prev = n.prev;
next = n.next;
n.prev = None.into();
n.next = None.into();
}
// Fix up links to `block`.
match prev.expand() {
None => self.first_block = next.expand(),
Some(p) => self.blocks[p].next = next,
}
match next.expand() {
None => self.last_block = prev.expand(),
Some(n) => self.blocks[n].prev = prev,
}
}
1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803
pub fn run(&self, errors: &mut VerifierErrors) -> VerifierStepResult<()> {
self.verify_global_values(errors)?;
self.verify_heaps(errors)?;
self.verify_tables(errors)?;
self.verify_jump_tables(errors)?;
self.typecheck_entry_block_params(errors)?;
self.check_entry_not_cold(errors)?;
self.typecheck_function_signature(errors)?;
for block in self.func.layout.blocks() {
if self.func.layout.first_inst(block).is_none() {
return errors.fatal((block, format!("{} cannot be empty", block)));
}
for inst in self.func.layout.block_insts(block) {
self.block_integrity(block, inst, errors)?;
self.instruction_integrity(inst, errors)?;
self.typecheck(inst, errors)?;
self.immediate_constraints(inst, errors)?;
}
self.encodable_as_bb(block, errors)?;
}
verify_flags(self.func, &self.expected_cfg, errors)?;
if !errors.is_empty() {
log::warn!(
"Found verifier errors in function:\n{}",
pretty_verifier_error(self.func, None, errors.clone())
);
}
Ok(())
}
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
pub fn eliminate_unreachable_code(
func: &mut ir::Function,
cfg: &mut ControlFlowGraph,
domtree: &DominatorTree,
) {
let _tt = timing::unreachable_code();
let mut pos = FuncCursor::new(func);
while let Some(block) = pos.next_block() {
if domtree.is_reachable(block) {
continue;
}
trace!("Eliminating unreachable {}", block);
// Move the cursor out of the way and make sure the next lop iteration goes to the right
// block.
pos.prev_block();
// Remove all instructions from `block`.
while let Some(inst) = pos.func.layout.first_inst(block) {
trace!(" - {}", pos.func.dfg.display_inst(inst));
pos.func.layout.remove_inst(inst);
}
// Once the block is completely empty, we can update the CFG which removes it from any
// predecessor lists.
cfg.recompute_block(pos.func, block);
// Finally, remove the block from the layout.
pos.func.layout.remove_block(block);
}
// Remove all jumptable block-list contents that refer to unreachable
// blocks; the jumptable itself must have been unused (or used only in an
// unreachable block) if so. Note that we are not necessarily removing *all*
// unused jumptables, because that would require computing their
// reachability as well; we are just removing enough to clean up references
// to deleted blocks.
for jt_data in func.jump_tables.values_mut() {
let invalid_ref = jt_data.iter().any(|block| !domtree.is_reachable(*block));
if invalid_ref {
jt_data.clear();
}
}
}
189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
fn compute_block_input_states(
func: &Function,
cfg: &ControlFlowGraph,
) -> SecondaryMap<Block, Option<LastStores>> {
let mut block_input = SecondaryMap::with_capacity(func.dfg.num_blocks());
let mut worklist: SmallVec<[Block; 16]> = smallvec![];
let mut worklist_set = FxHashSet::default();
let entry = func.layout.entry_block().unwrap();
worklist.push(entry);
worklist_set.insert(entry);
block_input[entry] = Some(LastStores::default());
while let Some(block) = worklist.pop() {
worklist_set.remove(&block);
let state = block_input[block].clone().unwrap();
trace!("alias analysis: input to {} is {:?}", block, state);
let state = func
.layout
.block_insts(block)
.fold(state, |mut state, inst| {
state.update(func, inst);
trace!("after {}: state is {:?}", inst, state);
state
});
for succ in cfg.succ_iter(block) {
let succ_first_inst = func.layout.first_inst(succ).unwrap();
let succ_state = &mut block_input[succ];
let old = succ_state.clone();
if let Some(succ_state) = succ_state.as_mut() {
succ_state.meet_from(&state, succ_first_inst);
} else {
*succ_state = Some(state);
};
let updated = *succ_state != old;
if updated && worklist_set.insert(succ) {
worklist.push(succ);
}
}
}
block_input
}
56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
pub fn do_simple_gvn(func: &mut Function, domtree: &mut DominatorTree) {
let _tt = timing::gvn();
debug_assert!(domtree.is_valid());
// Visit blocks in a reverse post-order.
//
// The RefCell here is a bit ugly since the HashKeys in the ScopedHashMap
// need a reference to the function.
let pos = RefCell::new(FuncCursor::new(func));
let mut visible_values: ScopedHashMap<HashKey, Inst> = ScopedHashMap::new();
let mut scope_stack: Vec<Inst> = Vec::new();
for &block in domtree.cfg_postorder().iter().rev() {
{
// Pop any scopes that we just exited.
let layout = &pos.borrow().func.layout;
loop {
if let Some(current) = scope_stack.last() {
if domtree.dominates(*current, block, layout) {
break;
}
} else {
break;
}
scope_stack.pop();
visible_values.decrement_depth();
}
// Push a scope for the current block.
scope_stack.push(layout.first_inst(block).unwrap());
visible_values.increment_depth();
}
pos.borrow_mut().goto_top(block);
while let Some(inst) = {
let mut pos = pos.borrow_mut();
pos.next_inst()
} {
// Resolve aliases, particularly aliases we created earlier.
pos.borrow_mut().func.dfg.resolve_aliases_in_arguments(inst);
let func = Ref::map(pos.borrow(), |pos| &pos.func);
let opcode = func.dfg[inst].opcode();
if opcode.is_branch() && !opcode.is_terminator() {
scope_stack.push(func.layout.next_inst(inst).unwrap());
visible_values.increment_depth();
}
if trivially_unsafe_for_gvn(opcode) {
continue;
}
// These are split up to separate concerns.
if is_load_and_not_readonly(&func.dfg[inst]) {
continue;
}
let ctrl_typevar = func.dfg.ctrl_typevar(inst);
let key = HashKey {
inst: func.dfg[inst],
ty: ctrl_typevar,
pos: &pos,
};
use crate::scoped_hash_map::Entry::*;
match visible_values.entry(key) {
Occupied(entry) => {
#[allow(clippy::debug_assert_with_mut_call)]
{
// Clippy incorrectly believes `&func.layout` should not be used here:
// https://github.com/rust-lang/rust-clippy/issues/4737
debug_assert!(domtree.dominates(*entry.get(), inst, &func.layout));
}
// If the redundant instruction is representing the current
// scope, pick a new representative.
let old = scope_stack.last_mut().unwrap();
if *old == inst {
*old = func.layout.next_inst(inst).unwrap();
}
// Replace the redundant instruction and remove it.
drop(func);
let mut pos = pos.borrow_mut();
pos.func.dfg.replace_with_aliases(inst, *entry.get());
pos.remove_inst_and_step_back();
}
Vacant(entry) => {
entry.insert(inst);
}
}
}
}
}
sourcepub fn last_inst(&self, block: Block) -> Option<Inst>
pub fn last_inst(&self, block: Block) -> Option<Inst>
Fetch a block’s last instruction.
Examples found in repository?
282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
fn goto_last_inst(&mut self, block: ir::Block) {
let inst = self.layout().last_inst(block).expect("Empty block");
self.goto_inst(inst);
}
/// Go to the top of `block` which must be inserted into the layout.
/// At this position, instructions cannot be inserted, but `next_inst()` will move to the first
/// instruction in `block`.
fn goto_top(&mut self, block: ir::Block) {
debug_assert!(self.layout().is_block_inserted(block));
self.set_position(CursorPosition::Before(block));
}
/// Go to the bottom of `block` which must be inserted into the layout.
/// At this position, inserted instructions will be appended to `block`.
fn goto_bottom(&mut self, block: ir::Block) {
debug_assert!(self.layout().is_block_inserted(block));
self.set_position(CursorPosition::After(block));
}
/// Go to the top of the next block in layout order and return it.
///
/// - If the cursor wasn't pointing at anything, go to the top of the first block in the
/// function.
/// - If there are no more blocks, leave the cursor pointing at nothing and return `None`.
///
/// # Examples
///
/// The `next_block()` method is intended for iterating over the blocks in layout order:
///
/// ```
/// # use cranelift_codegen::ir::{Function, Block};
/// # use cranelift_codegen::cursor::{Cursor, FuncCursor};
/// fn edit_func(func: &mut Function) {
/// let mut cursor = FuncCursor::new(func);
/// while let Some(block) = cursor.next_block() {
/// // Edit block.
/// }
/// }
/// ```
fn next_block(&mut self) -> Option<ir::Block> {
let next = if let Some(block) = self.current_block() {
self.layout().next_block(block)
} else {
self.layout().entry_block()
};
self.set_position(match next {
Some(block) => CursorPosition::Before(block),
None => CursorPosition::Nowhere,
});
next
}
/// Go to the bottom of the previous block in layout order and return it.
///
/// - If the cursor wasn't pointing at anything, go to the bottom of the last block in the
/// function.
/// - If there are no more blocks, leave the cursor pointing at nothing and return `None`.
///
/// # Examples
///
/// The `prev_block()` method is intended for iterating over the blocks in backwards layout order:
///
/// ```
/// # use cranelift_codegen::ir::{Function, Block};
/// # use cranelift_codegen::cursor::{Cursor, FuncCursor};
/// fn edit_func(func: &mut Function) {
/// let mut cursor = FuncCursor::new(func);
/// while let Some(block) = cursor.prev_block() {
/// // Edit block.
/// }
/// }
/// ```
fn prev_block(&mut self) -> Option<ir::Block> {
let prev = if let Some(block) = self.current_block() {
self.layout().prev_block(block)
} else {
self.layout().last_block()
};
self.set_position(match prev {
Some(block) => CursorPosition::After(block),
None => CursorPosition::Nowhere,
});
prev
}
/// Move to the next instruction in the same block and return it.
///
/// - If the cursor was positioned before a block, go to the first instruction in that block.
/// - If there are no more instructions in the block, go to the `After(block)` position and return
/// `None`.
/// - If the cursor wasn't pointing anywhere, keep doing that.
///
/// This method will never move the cursor to a different block.
///
/// # Examples
///
/// The `next_inst()` method is intended for iterating over the instructions in a block like
/// this:
///
/// ```
/// # use cranelift_codegen::ir::{Function, Block};
/// # use cranelift_codegen::cursor::{Cursor, FuncCursor};
/// fn edit_block(func: &mut Function, block: Block) {
/// let mut cursor = FuncCursor::new(func).at_top(block);
/// while let Some(inst) = cursor.next_inst() {
/// // Edit instructions...
/// }
/// }
/// ```
/// The loop body can insert and remove instructions via the cursor.
///
/// Iterating over all the instructions in a function looks like this:
///
/// ```
/// # use cranelift_codegen::ir::{Function, Block};
/// # use cranelift_codegen::cursor::{Cursor, FuncCursor};
/// fn edit_func(func: &mut Function) {
/// let mut cursor = FuncCursor::new(func);
/// while let Some(block) = cursor.next_block() {
/// while let Some(inst) = cursor.next_inst() {
/// // Edit instructions...
/// }
/// }
/// }
/// ```
fn next_inst(&mut self) -> Option<ir::Inst> {
use self::CursorPosition::*;
match self.position() {
Nowhere | After(..) => None,
At(inst) => {
if let Some(next) = self.layout().next_inst(inst) {
self.set_position(At(next));
Some(next)
} else {
let pos = After(
self.layout()
.inst_block(inst)
.expect("current instruction removed?"),
);
self.set_position(pos);
None
}
}
Before(block) => {
if let Some(next) = self.layout().first_inst(block) {
self.set_position(At(next));
Some(next)
} else {
self.set_position(After(block));
None
}
}
}
}
/// Move to the previous instruction in the same block and return it.
///
/// - If the cursor was positioned after a block, go to the last instruction in that block.
/// - If there are no more instructions in the block, go to the `Before(block)` position and return
/// `None`.
/// - If the cursor wasn't pointing anywhere, keep doing that.
///
/// This method will never move the cursor to a different block.
///
/// # Examples
///
/// The `prev_inst()` method is intended for iterating backwards over the instructions in an
/// block like this:
///
/// ```
/// # use cranelift_codegen::ir::{Function, Block};
/// # use cranelift_codegen::cursor::{Cursor, FuncCursor};
/// fn edit_block(func: &mut Function, block: Block) {
/// let mut cursor = FuncCursor::new(func).at_bottom(block);
/// while let Some(inst) = cursor.prev_inst() {
/// // Edit instructions...
/// }
/// }
/// ```
fn prev_inst(&mut self) -> Option<ir::Inst> {
use self::CursorPosition::*;
match self.position() {
Nowhere | Before(..) => None,
At(inst) => {
if let Some(prev) = self.layout().prev_inst(inst) {
self.set_position(At(prev));
Some(prev)
} else {
let pos = Before(
self.layout()
.inst_block(inst)
.expect("current instruction removed?"),
);
self.set_position(pos);
None
}
}
After(block) => {
if let Some(prev) = self.layout().last_inst(block) {
self.set_position(At(prev));
Some(prev)
} else {
self.set_position(Before(block));
None
}
}
}
}
More examples
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
fn has_pre_header(
layout: &Layout,
cfg: &ControlFlowGraph,
domtree: &DominatorTree,
header: Block,
) -> Option<(Block, Inst)> {
let mut result = None;
for BlockPredecessor {
block: pred_block,
inst: branch_inst,
} in cfg.pred_iter(header)
{
// We only count normal edges (not the back edges)
if !domtree.dominates(header, branch_inst, layout) {
if result.is_some() {
// We have already found one, there are more than one
return None;
}
if branch_inst != layout.last_inst(pred_block).unwrap()
|| cfg.succ_iter(pred_block).nth(1).is_some()
{
// It's along a critical edge, so don't use it.
return None;
}
result = Some((pred_block, branch_inst));
}
}
result
}
514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563
fn block_integrity(
&self,
block: Block,
inst: Inst,
errors: &mut VerifierErrors,
) -> VerifierStepResult<()> {
let is_terminator = self.func.dfg[inst].opcode().is_terminator();
let is_last_inst = self.func.layout.last_inst(block) == Some(inst);
if is_terminator && !is_last_inst {
// Terminating instructions only occur at the end of blocks.
return errors.fatal((
inst,
self.context(inst),
format!(
"a terminator instruction was encountered before the end of {}",
block
),
));
}
if is_last_inst && !is_terminator {
return errors.fatal((block, "block does not end in a terminator instruction"));
}
// Instructions belong to the correct block.
let inst_block = self.func.layout.inst_block(inst);
if inst_block != Some(block) {
return errors.fatal((
inst,
self.context(inst),
format!("should belong to {} not {:?}", block, inst_block),
));
}
// Parameters belong to the correct block.
for &arg in self.func.dfg.block_params(block) {
match self.func.dfg.value_def(arg) {
ValueDef::Param(arg_block, _) => {
if block != arg_block {
return errors.fatal((arg, format!("does not belong to {}", block)));
}
}
_ => {
return errors.fatal((arg, "expected an argument, found a result"));
}
}
}
Ok(())
}
sourcepub fn next_inst(&self, inst: Inst) -> Option<Inst>
pub fn next_inst(&self, inst: Inst) -> Option<Inst>
Fetch the instruction following inst
.
Examples found in repository?
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436
fn goto_after_inst(&mut self, inst: ir::Inst) {
debug_assert!(self.layout().inst_block(inst).is_some());
let new_pos = if let Some(next) = self.layout().next_inst(inst) {
CursorPosition::At(next)
} else {
CursorPosition::After(
self.layout()
.inst_block(inst)
.expect("current instruction removed?"),
)
};
self.set_position(new_pos);
}
/// Go to a specific instruction which must be inserted in the layout.
/// New instructions will be inserted before `inst`.
fn goto_inst(&mut self, inst: ir::Inst) {
debug_assert!(self.layout().inst_block(inst).is_some());
self.set_position(CursorPosition::At(inst));
}
/// Go to the position for inserting instructions at the beginning of `block`,
/// which unlike `goto_first_inst` doesn't assume that any instructions have
/// been inserted into `block` yet.
fn goto_first_insertion_point(&mut self, block: ir::Block) {
if let Some(inst) = self.layout().first_inst(block) {
self.goto_inst(inst);
} else {
self.goto_bottom(block);
}
}
/// Go to the first instruction in `block`.
fn goto_first_inst(&mut self, block: ir::Block) {
let inst = self.layout().first_inst(block).expect("Empty block");
self.goto_inst(inst);
}
/// Go to the last instruction in `block`.
fn goto_last_inst(&mut self, block: ir::Block) {
let inst = self.layout().last_inst(block).expect("Empty block");
self.goto_inst(inst);
}
/// Go to the top of `block` which must be inserted into the layout.
/// At this position, instructions cannot be inserted, but `next_inst()` will move to the first
/// instruction in `block`.
fn goto_top(&mut self, block: ir::Block) {
debug_assert!(self.layout().is_block_inserted(block));
self.set_position(CursorPosition::Before(block));
}
/// Go to the bottom of `block` which must be inserted into the layout.
/// At this position, inserted instructions will be appended to `block`.
fn goto_bottom(&mut self, block: ir::Block) {
debug_assert!(self.layout().is_block_inserted(block));
self.set_position(CursorPosition::After(block));
}
/// Go to the top of the next block in layout order and return it.
///
/// - If the cursor wasn't pointing at anything, go to the top of the first block in the
/// function.
/// - If there are no more blocks, leave the cursor pointing at nothing and return `None`.
///
/// # Examples
///
/// The `next_block()` method is intended for iterating over the blocks in layout order:
///
/// ```
/// # use cranelift_codegen::ir::{Function, Block};
/// # use cranelift_codegen::cursor::{Cursor, FuncCursor};
/// fn edit_func(func: &mut Function) {
/// let mut cursor = FuncCursor::new(func);
/// while let Some(block) = cursor.next_block() {
/// // Edit block.
/// }
/// }
/// ```
fn next_block(&mut self) -> Option<ir::Block> {
let next = if let Some(block) = self.current_block() {
self.layout().next_block(block)
} else {
self.layout().entry_block()
};
self.set_position(match next {
Some(block) => CursorPosition::Before(block),
None => CursorPosition::Nowhere,
});
next
}
/// Go to the bottom of the previous block in layout order and return it.
///
/// - If the cursor wasn't pointing at anything, go to the bottom of the last block in the
/// function.
/// - If there are no more blocks, leave the cursor pointing at nothing and return `None`.
///
/// # Examples
///
/// The `prev_block()` method is intended for iterating over the blocks in backwards layout order:
///
/// ```
/// # use cranelift_codegen::ir::{Function, Block};
/// # use cranelift_codegen::cursor::{Cursor, FuncCursor};
/// fn edit_func(func: &mut Function) {
/// let mut cursor = FuncCursor::new(func);
/// while let Some(block) = cursor.prev_block() {
/// // Edit block.
/// }
/// }
/// ```
fn prev_block(&mut self) -> Option<ir::Block> {
let prev = if let Some(block) = self.current_block() {
self.layout().prev_block(block)
} else {
self.layout().last_block()
};
self.set_position(match prev {
Some(block) => CursorPosition::After(block),
None => CursorPosition::Nowhere,
});
prev
}
/// Move to the next instruction in the same block and return it.
///
/// - If the cursor was positioned before a block, go to the first instruction in that block.
/// - If there are no more instructions in the block, go to the `After(block)` position and return
/// `None`.
/// - If the cursor wasn't pointing anywhere, keep doing that.
///
/// This method will never move the cursor to a different block.
///
/// # Examples
///
/// The `next_inst()` method is intended for iterating over the instructions in a block like
/// this:
///
/// ```
/// # use cranelift_codegen::ir::{Function, Block};
/// # use cranelift_codegen::cursor::{Cursor, FuncCursor};
/// fn edit_block(func: &mut Function, block: Block) {
/// let mut cursor = FuncCursor::new(func).at_top(block);
/// while let Some(inst) = cursor.next_inst() {
/// // Edit instructions...
/// }
/// }
/// ```
/// The loop body can insert and remove instructions via the cursor.
///
/// Iterating over all the instructions in a function looks like this:
///
/// ```
/// # use cranelift_codegen::ir::{Function, Block};
/// # use cranelift_codegen::cursor::{Cursor, FuncCursor};
/// fn edit_func(func: &mut Function) {
/// let mut cursor = FuncCursor::new(func);
/// while let Some(block) = cursor.next_block() {
/// while let Some(inst) = cursor.next_inst() {
/// // Edit instructions...
/// }
/// }
/// }
/// ```
fn next_inst(&mut self) -> Option<ir::Inst> {
use self::CursorPosition::*;
match self.position() {
Nowhere | After(..) => None,
At(inst) => {
if let Some(next) = self.layout().next_inst(inst) {
self.set_position(At(next));
Some(next)
} else {
let pos = After(
self.layout()
.inst_block(inst)
.expect("current instruction removed?"),
);
self.set_position(pos);
None
}
}
Before(block) => {
if let Some(next) = self.layout().first_inst(block) {
self.set_position(At(next));
Some(next)
} else {
self.set_position(After(block));
None
}
}
}
}
More examples
56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
pub fn do_simple_gvn(func: &mut Function, domtree: &mut DominatorTree) {
let _tt = timing::gvn();
debug_assert!(domtree.is_valid());
// Visit blocks in a reverse post-order.
//
// The RefCell here is a bit ugly since the HashKeys in the ScopedHashMap
// need a reference to the function.
let pos = RefCell::new(FuncCursor::new(func));
let mut visible_values: ScopedHashMap<HashKey, Inst> = ScopedHashMap::new();
let mut scope_stack: Vec<Inst> = Vec::new();
for &block in domtree.cfg_postorder().iter().rev() {
{
// Pop any scopes that we just exited.
let layout = &pos.borrow().func.layout;
loop {
if let Some(current) = scope_stack.last() {
if domtree.dominates(*current, block, layout) {
break;
}
} else {
break;
}
scope_stack.pop();
visible_values.decrement_depth();
}
// Push a scope for the current block.
scope_stack.push(layout.first_inst(block).unwrap());
visible_values.increment_depth();
}
pos.borrow_mut().goto_top(block);
while let Some(inst) = {
let mut pos = pos.borrow_mut();
pos.next_inst()
} {
// Resolve aliases, particularly aliases we created earlier.
pos.borrow_mut().func.dfg.resolve_aliases_in_arguments(inst);
let func = Ref::map(pos.borrow(), |pos| &pos.func);
let opcode = func.dfg[inst].opcode();
if opcode.is_branch() && !opcode.is_terminator() {
scope_stack.push(func.layout.next_inst(inst).unwrap());
visible_values.increment_depth();
}
if trivially_unsafe_for_gvn(opcode) {
continue;
}
// These are split up to separate concerns.
if is_load_and_not_readonly(&func.dfg[inst]) {
continue;
}
let ctrl_typevar = func.dfg.ctrl_typevar(inst);
let key = HashKey {
inst: func.dfg[inst],
ty: ctrl_typevar,
pos: &pos,
};
use crate::scoped_hash_map::Entry::*;
match visible_values.entry(key) {
Occupied(entry) => {
#[allow(clippy::debug_assert_with_mut_call)]
{
// Clippy incorrectly believes `&func.layout` should not be used here:
// https://github.com/rust-lang/rust-clippy/issues/4737
debug_assert!(domtree.dominates(*entry.get(), inst, &func.layout));
}
// If the redundant instruction is representing the current
// scope, pick a new representative.
let old = scope_stack.last_mut().unwrap();
if *old == inst {
*old = func.layout.next_inst(inst).unwrap();
}
// Replace the redundant instruction and remove it.
drop(func);
let mut pos = pos.borrow_mut();
pos.func.dfg.replace_with_aliases(inst, *entry.get());
pos.remove_inst_and_step_back();
}
Vacant(entry) => {
entry.insert(inst);
}
}
}
}
}
sourcepub fn prev_inst(&self, inst: Inst) -> Option<Inst>
pub fn prev_inst(&self, inst: Inst) -> Option<Inst>
Fetch the instruction preceding inst
.
Examples found in repository?
More examples
462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665
fn prev_inst(&mut self) -> Option<ir::Inst> {
use self::CursorPosition::*;
match self.position() {
Nowhere | Before(..) => None,
At(inst) => {
if let Some(prev) = self.layout().prev_inst(inst) {
self.set_position(At(prev));
Some(prev)
} else {
let pos = Before(
self.layout()
.inst_block(inst)
.expect("current instruction removed?"),
);
self.set_position(pos);
None
}
}
After(block) => {
if let Some(prev) = self.layout().last_inst(block) {
self.set_position(At(prev));
Some(prev)
} else {
self.set_position(Before(block));
None
}
}
}
}
/// Insert an instruction at the current position.
///
/// - If pointing at an instruction, the new instruction is inserted before the current
/// instruction.
/// - If pointing at the bottom of a block, the new instruction is appended to the block.
/// - Otherwise panic.
///
/// In either case, the cursor is not moved, such that repeated calls to `insert_inst()` causes
/// instructions to appear in insertion order in the block.
fn insert_inst(&mut self, inst: ir::Inst) {
use self::CursorPosition::*;
match self.position() {
Nowhere | Before(..) => panic!("Invalid insert_inst position"),
At(cur) => self.layout_mut().insert_inst(inst, cur),
After(block) => self.layout_mut().append_inst(inst, block),
}
}
/// Remove the instruction under the cursor.
///
/// The cursor is left pointing at the position following the current instruction.
///
/// Return the instruction that was removed.
fn remove_inst(&mut self) -> ir::Inst {
let inst = self.current_inst().expect("No instruction to remove");
self.next_inst();
self.layout_mut().remove_inst(inst);
inst
}
/// Remove the instruction under the cursor.
///
/// The cursor is left pointing at the position preceding the current instruction.
///
/// Return the instruction that was removed.
fn remove_inst_and_step_back(&mut self) -> ir::Inst {
let inst = self.current_inst().expect("No instruction to remove");
self.prev_inst();
self.layout_mut().remove_inst(inst);
inst
}
/// Insert a block at the current position and switch to it.
///
/// As far as possible, this method behaves as if the block header were an instruction inserted
/// at the current position.
///
/// - If the cursor is pointing at an existing instruction, *the current block is split in two*
/// and the current instruction becomes the first instruction in the inserted block.
/// - If the cursor points at the bottom of a block, the new block is inserted after the current
/// one, and moved to the bottom of the new block where instructions can be appended.
/// - If the cursor points to the top of a block, the new block is inserted above the current one.
/// - If the cursor is not pointing at anything, the new block is placed last in the layout.
///
/// This means that it is always valid to call this method, and it always leaves the cursor in
/// a state that will insert instructions into the new block.
fn insert_block(&mut self, new_block: ir::Block) {
use self::CursorPosition::*;
match self.position() {
At(inst) => {
self.layout_mut().split_block(new_block, inst);
// All other cases move to `After(block)`, but in this case we'll stay `At(inst)`.
return;
}
Nowhere => self.layout_mut().append_block(new_block),
Before(block) => self.layout_mut().insert_block(new_block, block),
After(block) => self.layout_mut().insert_block_after(new_block, block),
}
// For everything but `At(inst)` we end up appending to the new block.
self.set_position(After(new_block));
}
}
/// Function cursor.
///
/// A `FuncCursor` holds a mutable reference to a whole `ir::Function` while keeping a position
/// too. The function can be re-borrowed by accessing the public `cur.func` member.
///
/// This cursor is for use before legalization. The inserted instructions are not given an
/// encoding.
pub struct FuncCursor<'f> {
pos: CursorPosition,
srcloc: ir::SourceLoc,
/// The referenced function.
pub func: &'f mut ir::Function,
}
impl<'f> FuncCursor<'f> {
/// Create a new `FuncCursor` pointing nowhere.
pub fn new(func: &'f mut ir::Function) -> Self {
Self {
pos: CursorPosition::Nowhere,
srcloc: Default::default(),
func,
}
}
/// Use the source location of `inst` for future instructions.
pub fn use_srcloc(&mut self, inst: ir::Inst) {
self.srcloc = self.func.srcloc(inst);
}
/// Create an instruction builder that inserts an instruction at the current position.
pub fn ins(&mut self) -> ir::InsertBuilder<&mut FuncCursor<'f>> {
ir::InsertBuilder::new(self)
}
}
impl<'f> Cursor for FuncCursor<'f> {
fn position(&self) -> CursorPosition {
self.pos
}
fn set_position(&mut self, pos: CursorPosition) {
self.pos = pos
}
fn srcloc(&self) -> ir::SourceLoc {
self.srcloc
}
fn set_srcloc(&mut self, srcloc: ir::SourceLoc) {
self.func.params.ensure_base_srcloc(srcloc);
self.srcloc = srcloc;
}
fn layout(&self) -> &ir::Layout {
&self.func.layout
}
fn layout_mut(&mut self) -> &mut ir::Layout {
&mut self.func.layout
}
}
impl<'c, 'f> ir::InstInserterBase<'c> for &'c mut FuncCursor<'f> {
fn data_flow_graph(&self) -> &ir::DataFlowGraph {
&self.func.dfg
}
fn data_flow_graph_mut(&mut self) -> &mut ir::DataFlowGraph {
&mut self.func.dfg
}
fn insert_built_inst(self, inst: ir::Inst) -> &'c mut ir::DataFlowGraph {
// TODO: Remove this assertion once #796 is fixed.
#[cfg(debug_assertions)]
{
if let CursorPosition::At(_) = self.position() {
if let Some(curr) = self.current_inst() {
if let Some(prev) = self.layout().prev_inst(curr) {
let prev_op = self.data_flow_graph()[prev].opcode();
let inst_op = self.data_flow_graph()[inst].opcode();
let curr_op = self.data_flow_graph()[curr].opcode();
if prev_op.is_branch()
&& !prev_op.is_terminator()
&& !inst_op.is_terminator()
{
panic!(
"Inserting instruction {} after {}, and before {}",
inst_op, prev_op, curr_op
)
}
};
};
};
}
self.insert_inst(inst);
if !self.srcloc.is_default() {
self.func.set_srcloc(inst, self.srcloc);
}
&mut self.func.dfg
}
479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574
fn branch_order(pos: &mut FuncCursor, cfg: &mut ControlFlowGraph, block: Block, inst: Inst) {
let (term_inst, term_inst_args, term_dest, cond_inst, cond_inst_args, cond_dest, kind) =
match pos.func.dfg[inst] {
InstructionData::Jump {
opcode: Opcode::Jump,
destination,
ref args,
} => {
let next_block = if let Some(next_block) = pos.func.layout.next_block(block) {
next_block
} else {
return;
};
if destination == next_block {
return;
}
let prev_inst = if let Some(prev_inst) = pos.func.layout.prev_inst(inst) {
prev_inst
} else {
return;
};
let prev_inst_data = &pos.func.dfg[prev_inst];
if let Some(prev_dest) = prev_inst_data.branch_destination() {
if prev_dest != next_block {
return;
}
} else {
return;
}
match prev_inst_data {
InstructionData::Branch {
opcode,
args: ref prev_args,
destination: cond_dest,
} => {
let cond_arg = {
let args = pos.func.dfg.inst_args(prev_inst);
args[0]
};
let kind = match opcode {
Opcode::Brz => BranchOrderKind::BrzToBrnz(cond_arg),
Opcode::Brnz => BranchOrderKind::BrnzToBrz(cond_arg),
_ => panic!("unexpected opcode"),
};
(
inst,
args.clone(),
destination,
prev_inst,
prev_args.clone(),
*cond_dest,
kind,
)
}
_ => return,
}
}
_ => return,
};
let cond_args = cond_inst_args.as_slice(&pos.func.dfg.value_lists).to_vec();
let term_args = term_inst_args.as_slice(&pos.func.dfg.value_lists).to_vec();
match kind {
BranchOrderKind::BrnzToBrz(cond_arg) => {
pos.func
.dfg
.replace(term_inst)
.jump(cond_dest, &cond_args[1..]);
pos.func
.dfg
.replace(cond_inst)
.brz(cond_arg, term_dest, &term_args);
}
BranchOrderKind::BrzToBrnz(cond_arg) => {
pos.func
.dfg
.replace(term_inst)
.jump(cond_dest, &cond_args[1..]);
pos.func
.dfg
.replace(cond_inst)
.brnz(cond_arg, term_dest, &term_args);
}
}
cfg.recompute_block(pos.func, block);
}
sourcepub fn canonical_branch_inst(
&self,
dfg: &DataFlowGraph,
block: Block
) -> Option<Inst>
pub fn canonical_branch_inst(
&self,
dfg: &DataFlowGraph,
block: Block
) -> Option<Inst>
Fetch the first instruction in a block’s terminal branch group.
sourcepub fn insert_inst(&mut self, inst: Inst, before: Inst)
pub fn insert_inst(&mut self, inst: Inst, before: Inst)
Insert inst
before the instruction before
in the same block.
Examples found in repository?
More examples
195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
fn add_node(&mut self, node: &Node, args: &[Value], to_block: Block) -> ValueList {
let (instdata, result_ty, arity) = match node {
Node::Pure { op, ty, arity, .. } | Node::Inst { op, ty, arity, .. } => (
op.with_args(args, &mut self.func.dfg.value_lists),
*ty,
*arity,
),
Node::Load { op, ty, .. } => {
(op.with_args(args, &mut self.func.dfg.value_lists), *ty, 1)
}
_ => panic!("Cannot `add_node()` on block param or projection"),
};
let srcloc = match node {
Node::Inst { srcloc, .. } | Node::Load { srcloc, .. } => *srcloc,
_ => RelSourceLoc::default(),
};
let opcode = instdata.opcode();
// Is this instruction either an actual terminator (an
// instruction that must end the block), or at least in the
// group of branches at the end (including conditional
// branches that may be followed by an actual terminator)? We
// call this the "terminator group", and we record the first
// inst in this group (`first_branch` below) so that we do not
// insert instructions needed only by args of later
// instructions in the terminator group in the middle of the
// terminator group.
//
// E.g., for the original sequence
// v1 = op ...
// brnz vCond, block1
// jump block2(v1)
//
// elaboration would naively produce
//
// brnz vCond, block1
// v1 = op ...
// jump block2(v1)
//
// but we use the `first_branch` mechanism below to ensure
// that once we've emitted at least one branch, all other
// elaborated insts have to go before that. So we emit brnz
// first, then as we elaborate the jump, we find we need the
// `op`; we `insert_inst` it *before* the brnz (which is the
// `first_branch`).
let is_terminator_group_inst =
opcode.is_branch() || opcode.is_return() || opcode == Opcode::Trap;
let inst = self.func.dfg.make_inst(instdata);
self.func.srclocs[inst] = srcloc;
if arity == 1 {
self.func.dfg.append_result(inst, result_ty);
} else {
for _ in 0..arity {
self.func.dfg.append_result(inst, crate::ir::types::INVALID);
}
}
if is_terminator_group_inst {
self.func.layout.append_inst(inst, to_block);
if self.first_branch[to_block].is_none() {
self.first_branch[to_block] = Some(inst).into();
}
} else if let Some(branch) = self.first_branch[to_block].into() {
self.func.layout.insert_inst(inst, branch);
} else {
self.func.layout.append_inst(inst, to_block);
}
self.func.dfg.inst_results_list(inst)
}
sourcepub fn remove_inst(&mut self, inst: Inst)
pub fn remove_inst(&mut self, inst: Inst)
Remove inst
from the layout.
Examples found in repository?
515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
fn remove_inst(&mut self) -> ir::Inst {
let inst = self.current_inst().expect("No instruction to remove");
self.next_inst();
self.layout_mut().remove_inst(inst);
inst
}
/// Remove the instruction under the cursor.
///
/// The cursor is left pointing at the position preceding the current instruction.
///
/// Return the instruction that was removed.
fn remove_inst_and_step_back(&mut self) -> ir::Inst {
let inst = self.current_inst().expect("No instruction to remove");
self.prev_inst();
self.layout_mut().remove_inst(inst);
inst
}
More examples
55 56 57 58 59 60 61 62 63 64 65 66
fn vmctx_addr(inst: ir::Inst, func: &mut ir::Function) {
// Get the value representing the `vmctx` argument.
let vmctx = func
.special_param(ir::ArgumentPurpose::VMContext)
.expect("Missing vmctx parameter");
// Replace the `global_value` instruction's value with an alias to the vmctx arg.
let result = func.dfg.first_result(inst);
func.dfg.clear_results(inst);
func.dfg.change_to_alias(result, vmctx);
func.layout.remove_inst(inst);
}
377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
pub fn transplant_inst(&mut self, dst: Inst, src: Inst) {
debug_assert_eq!(
self.dfg.inst_results(dst).len(),
self.dfg.inst_results(src).len()
);
debug_assert!(self
.dfg
.inst_results(dst)
.iter()
.zip(self.dfg.inst_results(src))
.all(|(a, b)| self.dfg.value_type(*a) == self.dfg.value_type(*b)));
self.dfg[dst] = self.dfg[src];
self.layout.remove_inst(src);
}
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
pub fn expand_heap_addr(
inst: ir::Inst,
func: &mut ir::Function,
cfg: &mut ControlFlowGraph,
isa: &dyn TargetIsa,
heap: ir::Heap,
index: ir::Value,
offset: Uimm32,
access_size: Uimm8,
) {
let mut pos = FuncCursor::new(func).at_inst(inst);
pos.use_srcloc(inst);
let addr =
bounds_check_and_compute_addr(&mut pos, cfg, isa, heap, index, offset.into(), access_size);
// Replace the `heap_addr` and its result value with the legalized native
// address.
let addr_inst = pos.func.dfg.value_def(addr).unwrap_inst();
pos.func.dfg.replace_with_aliases(inst, addr_inst);
pos.func.layout.remove_inst(inst);
}
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
pub fn eliminate_unreachable_code(
func: &mut ir::Function,
cfg: &mut ControlFlowGraph,
domtree: &DominatorTree,
) {
let _tt = timing::unreachable_code();
let mut pos = FuncCursor::new(func);
while let Some(block) = pos.next_block() {
if domtree.is_reachable(block) {
continue;
}
trace!("Eliminating unreachable {}", block);
// Move the cursor out of the way and make sure the next lop iteration goes to the right
// block.
pos.prev_block();
// Remove all instructions from `block`.
while let Some(inst) = pos.func.layout.first_inst(block) {
trace!(" - {}", pos.func.dfg.display_inst(inst));
pos.func.layout.remove_inst(inst);
}
// Once the block is completely empty, we can update the CFG which removes it from any
// predecessor lists.
cfg.recompute_block(pos.func, block);
// Finally, remove the block from the layout.
pos.func.layout.remove_block(block);
}
// Remove all jumptable block-list contents that refer to unreachable
// blocks; the jumptable itself must have been unused (or used only in an
// unreachable block) if so. Note that we are not necessarily removing *all*
// unused jumptables, because that would require computing their
// reachability as well; we are just removing enough to clean up references
// to deleted blocks.
for jt_data in func.jump_tables.values_mut() {
let invalid_ref = jt_data.iter().any(|block| !domtree.is_reachable(*block));
if invalid_ref {
jt_data.clear();
}
}
}
sourcepub fn block_insts(&self, block: Block) -> Insts<'_> ⓘ
pub fn block_insts(&self, block: Block) -> Insts<'_> ⓘ
Iterate over the instructions in block
in layout order.
Examples found in repository?
255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
fn decorate_block<FW: FuncWriter>(
func_w: &mut FW,
w: &mut dyn Write,
func: &Function,
aliases: &SecondaryMap<Value, Vec<Value>>,
block: Block,
) -> fmt::Result {
// Indent all instructions if any srclocs are present.
let indent = if func.rel_srclocs().is_empty() { 4 } else { 36 };
func_w.write_block_header(w, func, block, indent)?;
for a in func.dfg.block_params(block).iter().cloned() {
write_value_aliases(w, aliases, a, indent)?;
}
for inst in func.layout.block_insts(block) {
func_w.write_instruction(w, func, aliases, inst, indent)?;
}
Ok(())
}
More examples
679 680 681 682 683 684 685 686 687 688 689 690 691 692 693
pub fn block_likely_branches(&self, block: Block) -> Insts {
// Note: Checking whether an instruction is a branch or not while walking backward might add
// extra overhead. However, we know that the number of branches is limited to 2 at the end of
// each block, and therefore we can just iterate over the last 2 instructions.
let mut iter = self.block_insts(block);
let head = iter.head;
let tail = iter.tail;
iter.next_back();
let head = iter.next_back().or(head);
Insts {
layout: self,
head,
tail,
}
}
340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
pub fn is_block_basic(&self, block: Block) -> Result<(), (Inst, &'static str)> {
let dfg = &self.dfg;
let inst_iter = self.layout.block_insts(block);
// Ignore all instructions prior to the first branch.
let mut inst_iter = inst_iter.skip_while(|&inst| !dfg[inst].opcode().is_branch());
// A conditional branch is permitted in a basic block only when followed
// by a terminal jump instruction.
if let Some(_branch) = inst_iter.next() {
if let Some(next) = inst_iter.next() {
match dfg[next].opcode() {
Opcode::Jump => (),
_ => return Err((next, "post-branch instruction not jump")),
}
}
}
Ok(())
}
1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803
pub fn run(&self, errors: &mut VerifierErrors) -> VerifierStepResult<()> {
self.verify_global_values(errors)?;
self.verify_heaps(errors)?;
self.verify_tables(errors)?;
self.verify_jump_tables(errors)?;
self.typecheck_entry_block_params(errors)?;
self.check_entry_not_cold(errors)?;
self.typecheck_function_signature(errors)?;
for block in self.func.layout.blocks() {
if self.func.layout.first_inst(block).is_none() {
return errors.fatal((block, format!("{} cannot be empty", block)));
}
for inst in self.func.layout.block_insts(block) {
self.block_integrity(block, inst, errors)?;
self.instruction_integrity(inst, errors)?;
self.typecheck(inst, errors)?;
self.immediate_constraints(inst, errors)?;
}
self.encodable_as_bb(block, errors)?;
}
verify_flags(self.func, &self.expected_cfg, errors)?;
if !errors.is_empty() {
log::warn!(
"Found verifier errors in function:\n{}",
pretty_verifier_error(self.func, None, errors.clone())
);
}
Ok(())
}
189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
fn compute_block_input_states(
func: &Function,
cfg: &ControlFlowGraph,
) -> SecondaryMap<Block, Option<LastStores>> {
let mut block_input = SecondaryMap::with_capacity(func.dfg.num_blocks());
let mut worklist: SmallVec<[Block; 16]> = smallvec![];
let mut worklist_set = FxHashSet::default();
let entry = func.layout.entry_block().unwrap();
worklist.push(entry);
worklist_set.insert(entry);
block_input[entry] = Some(LastStores::default());
while let Some(block) = worklist.pop() {
worklist_set.remove(&block);
let state = block_input[block].clone().unwrap();
trace!("alias analysis: input to {} is {:?}", block, state);
let state = func
.layout
.block_insts(block)
.fold(state, |mut state, inst| {
state.update(func, inst);
trace!("after {}: state is {:?}", inst, state);
state
});
for succ in cfg.succ_iter(block) {
let succ_first_inst = func.layout.first_inst(succ).unwrap();
let succ_state = &mut block_input[succ];
let old = succ_state.clone();
if let Some(succ_state) = succ_state.as_mut() {
succ_state.meet_from(&state, succ_first_inst);
} else {
*succ_state = Some(state);
};
let updated = *succ_state != old;
if updated && worklist_set.insert(succ) {
worklist.push(succ);
}
}
}
block_input
}
fn compute_load_last_stores(
func: &Function,
block_input: SecondaryMap<Block, Option<LastStores>>,
) -> FxHashMap<Inst, PackedMemoryState> {
let mut load_mem_state = FxHashMap::default();
load_mem_state.reserve(func.dfg.num_insts() / 8);
for block in func.layout.blocks() {
let mut state = block_input[block].clone().unwrap();
for inst in func.layout.block_insts(block) {
trace!(
"alias analysis: scanning at {} with state {:?} ({:?})",
inst,
state,
func.dfg[inst],
);
// N.B.: we match `Load` specifically, and not any
// other kinds of loads (or any opcode such that
// `opcode.can_load()` returns true), because some
// "can load" instructions actually have very
// different semantics (are not just a load of a
// particularly-typed value). For example, atomic
// (load/store, RMW, CAS) instructions "can load" but
// definitely should not participate in store-to-load
// forwarding or redundant-load elimination. Our goal
// here is to provide a `MemoryState` just for plain
// old loads whose semantics we can completely reason
// about.
if let InstructionData::Load {
opcode: Opcode::Load,
flags,
..
} = func.dfg[inst]
{
let mem_state = *state.for_flags(flags);
trace!(
"alias analysis: at {}: load with mem_state {:?}",
inst,
mem_state,
);
load_mem_state.insert(inst, mem_state.into());
}
state.update(func, inst);
}
}
load_mem_state
}
214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
fn compute_block_input_states(&mut self) {
let mut queue = vec![];
let mut queue_set = FxHashSet::default();
let entry = self.func.layout.entry_block().unwrap();
queue.push(entry);
queue_set.insert(entry);
while let Some(block) = queue.pop() {
queue_set.remove(&block);
let mut state = self
.block_input
.entry(block)
.or_insert_with(|| LastStores::default())
.clone();
trace!(
"alias analysis: input to block{} is {:?}",
block.index(),
state
);
for inst in self.func.layout.block_insts(block) {
state.update(self.func, inst);
trace!("after inst{}: state is {:?}", inst.index(), state);
}
visit_block_succs(self.func, block, |_inst, succ, _from_table| {
let succ_first_inst = self
.func
.layout
.block_insts(succ)
.into_iter()
.next()
.unwrap();
let updated = match self.block_input.get_mut(&succ) {
Some(succ_state) => {
let old = succ_state.clone();
succ_state.meet_from(&state, succ_first_inst);
*succ_state != old
}
None => {
self.block_input.insert(succ, state.clone());
true
}
};
if updated && queue_set.insert(succ) {
queue.push(succ);
}
});
}
}
sourcepub fn block_likely_branches(&self, block: Block) -> Insts<'_> ⓘ
pub fn block_likely_branches(&self, block: Block) -> Insts<'_> ⓘ
Iterate over a limited set of instruction which are likely the branches of block
in layout
order. Any instruction not visited by this iterator is not a branch, but an instruction visited by this may not be a branch.
Examples found in repository?
More examples
353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
fn push_successors(&mut self, func: &Function, block: Block) {
for inst in func.layout.block_likely_branches(block) {
match func.dfg.analyze_branch(inst) {
BranchInfo::SingleDest(succ, _) => self.push_if_unseen(succ),
BranchInfo::Table(jt, dest) => {
for succ in func.jump_tables[jt].iter() {
self.push_if_unseen(*succ);
}
if let Some(dest) = dest {
self.push_if_unseen(dest);
}
}
BranchInfo::NotABranch => {}
}
}
}
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
fn compute_block(&mut self, func: &Function, block: Block) {
for inst in func.layout.block_likely_branches(block) {
match func.dfg.analyze_branch(inst) {
BranchInfo::SingleDest(dest, _) => {
self.add_edge(block, inst, dest);
}
BranchInfo::Table(jt, dest) => {
if let Some(dest) = dest {
self.add_edge(block, inst, dest);
}
for dest in func.jump_tables[jt].iter() {
self.add_edge(block, inst, *dest);
}
}
BranchInfo::NotABranch => {}
}
}
}
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
fn block_nodes(&self, w: &mut dyn Write) -> Result {
let mut aliases = SecondaryMap::<_, Vec<_>>::new();
for v in self.func.dfg.values() {
// VADFS returns the immediate target of an alias
if let Some(k) = self.func.dfg.value_alias_dest_for_serialization(v) {
aliases[k].push(v);
}
}
for block in &self.func.layout {
write!(w, " {} [shape=record, label=\"{{", block)?;
crate::write::write_block_header(w, self.func, block, 4)?;
// Add all outgoing branch instructions to the label.
for inst in self.func.layout.block_likely_branches(block) {
write!(w, " | <{}>", inst)?;
PlainWriter.write_instruction(w, self.func, &aliases, inst, 0)?;
}
writeln!(w, "}}\"]")?
}
Ok(())
}
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
pub fn new(f: &Function) -> BlockLoweringOrder {
trace!("BlockLoweringOrder: function body {:?}", f);
// Make sure that we have an entry block, and the entry block is
// not marked as cold. (The verifier ensures this as well, but
// the user may not have run the verifier, and this property is
// critical to avoid a miscompile, so we assert it here too.)
let entry = f.layout.entry_block().expect("Must have entry block");
assert!(!f.layout.is_cold(entry));
// Step 1: compute the in-edge and out-edge count of every block.
let mut block_in_count = SecondaryMap::with_default(0);
let mut block_out_count = SecondaryMap::with_default(0);
// Cache the block successors to avoid re-examining branches below.
let mut block_succs: SmallVec<[(Inst, usize, Block); 128]> = SmallVec::new();
let mut block_succ_range = SecondaryMap::with_default((0, 0));
let mut indirect_branch_target_clif_blocks = FxHashSet::default();
for block in f.layout.blocks() {
let block_succ_start = block_succs.len();
let mut succ_idx = 0;
visit_block_succs(f, block, |inst, succ, from_table| {
block_out_count[block] += 1;
block_in_count[succ] += 1;
block_succs.push((inst, succ_idx, succ));
succ_idx += 1;
if from_table {
indirect_branch_target_clif_blocks.insert(succ);
}
});
let block_succ_end = block_succs.len();
block_succ_range[block] = (block_succ_start, block_succ_end);
for inst in f.layout.block_likely_branches(block) {
if f.dfg[inst].opcode() == Opcode::Return {
// Implicit output edge for any return.
block_out_count[block] += 1;
}
}
}
// Implicit input edge for entry block.
block_in_count[entry] += 1;
// All blocks ending in conditional branches or br_tables must
// have edge-moves inserted at the top of successor blocks,
// not at the end of themselves. This is because the moves
// would have to be inserted prior to the branch's register
// use; but RA2's model is that the moves happen *on* the
// edge, after every def/use in the block. RA2 will check for
// "branch register use safety" and panic if such a problem
// occurs. To avoid this, we force the below algorithm to
// never merge the edge block onto the end of a block that
// ends in a conditional branch. We do this by "faking" more
// than one successor, even if there is only one.
//
// (One might ask, isn't that always the case already? It
// could not be, in cases of br_table with no table and just a
// default label, for example.)
for block in f.layout.blocks() {
for inst in f.layout.block_likely_branches(block) {
// If the block has a branch with any "fixed args"
// (not blockparam args) ...
if f.dfg[inst].opcode().is_branch() && f.dfg.inst_fixed_args(inst).len() > 0 {
// ... then force a minimum successor count of
// two, so the below algorithm cannot put
// edge-moves on the end of the block.
block_out_count[block] = std::cmp::max(2, block_out_count[block]);
}
}
}
// Here we define the implicit CLIF-plus-edges graph. There are
// conceptually two such graphs: the original, with every edge explicit,
// and the merged one, with blocks (represented by `LoweredBlock`
// values) that contain original CLIF blocks, edges, or both. This
// function returns a lowered block's successors as per the latter, with
// consideration to edge-block merging.
//
// Note that there is a property of the block-merging rules below
// that is very important to ensure we don't miss any lowered blocks:
// any block in the implicit CLIF-plus-edges graph will *only* be
// included in one block in the merged graph.
//
// This, combined with the property that every edge block is reachable
// only from one predecessor (and hence cannot be reached by a DFS
// backedge), means that it is sufficient in our DFS below to track
// visited-bits per original CLIF block only, not per edge. This greatly
// simplifies the data structures (no need to keep a sparse hash-set of
// (block, block) tuples).
let compute_lowered_succs = |ret: &mut Vec<(Inst, LoweredBlock)>, block: LoweredBlock| {
let start_idx = ret.len();
match block {
LoweredBlock::Orig { block } | LoweredBlock::EdgeAndOrig { block, .. } => {
// At an orig block; successors are always edge blocks,
// possibly with orig blocks following.
let range = block_succ_range[block];
for &(edge_inst, succ_idx, succ) in &block_succs[range.0..range.1] {
if block_in_count[succ] == 1 {
ret.push((
edge_inst,
LoweredBlock::EdgeAndOrig {
pred: block,
edge_inst,
succ_idx,
block: succ,
},
));
} else {
ret.push((
edge_inst,
LoweredBlock::Edge {
pred: block,
edge_inst,
succ_idx,
succ,
},
));
}
}
}
LoweredBlock::Edge {
succ, edge_inst, ..
}
| LoweredBlock::OrigAndEdge {
succ, edge_inst, ..
} => {
// At an edge block; successors are always orig blocks,
// possibly with edge blocks following.
if block_out_count[succ] == 1 {
let range = block_succ_range[succ];
// check if the one succ is a real CFG edge (vs.
// implicit return succ).
if range.1 - range.0 > 0 {
debug_assert!(range.1 - range.0 == 1);
let (succ_edge_inst, succ_succ_idx, succ_succ) = block_succs[range.0];
ret.push((
edge_inst,
LoweredBlock::OrigAndEdge {
block: succ,
edge_inst: succ_edge_inst,
succ_idx: succ_succ_idx,
succ: succ_succ,
},
));
} else {
ret.push((edge_inst, LoweredBlock::Orig { block: succ }));
}
} else {
ret.push((edge_inst, LoweredBlock::Orig { block: succ }));
}
}
}
let end_idx = ret.len();
(start_idx, end_idx)
};
// Build the explicit LoweredBlock-to-LoweredBlock successors list.
let mut lowered_succs = vec![];
let mut lowered_succ_indices = vec![];
// Step 2: Compute RPO traversal of the implicit CLIF-plus-edge-block graph. Use an
// explicit stack so we don't overflow the real stack with a deep DFS.
#[derive(Debug)]
struct StackEntry {
this: LoweredBlock,
succs: (usize, usize), // range in lowered_succs
cur_succ: usize, // index in lowered_succs
}
let mut stack: SmallVec<[StackEntry; 16]> = SmallVec::new();
let mut visited = FxHashSet::default();
let mut postorder = vec![];
// Add the entry block.
//
// FIXME(cfallin): we might be able to use OrigAndEdge. Find a
// way to not special-case the entry block here.
let block = LoweredBlock::Orig { block: entry };
visited.insert(block);
let range = compute_lowered_succs(&mut lowered_succs, block);
lowered_succ_indices.resize(lowered_succs.len(), 0);
stack.push(StackEntry {
this: block,
succs: range,
cur_succ: range.1,
});
while !stack.is_empty() {
let stack_entry = stack.last_mut().unwrap();
let range = stack_entry.succs;
if stack_entry.cur_succ == range.0 {
postorder.push((stack_entry.this, range));
stack.pop();
} else {
// Heuristic: chase the children in reverse. This puts the first
// successor block first in RPO, all other things being equal,
// which tends to prioritize loop backedges over out-edges,
// putting the edge-block closer to the loop body and minimizing
// live-ranges in linear instruction space.
let next = lowered_succs[stack_entry.cur_succ - 1].1;
stack_entry.cur_succ -= 1;
if visited.contains(&next) {
continue;
}
visited.insert(next);
let range = compute_lowered_succs(&mut lowered_succs, next);
lowered_succ_indices.resize(lowered_succs.len(), 0);
stack.push(StackEntry {
this: next,
succs: range,
cur_succ: range.1,
});
}
}
postorder.reverse();
let rpo = postorder;
// Step 3: now that we have RPO, build the BlockIndex/BB fwd/rev maps.
let mut lowered_order = vec![];
let mut cold_blocks = FxHashSet::default();
let mut lowered_succ_ranges = vec![];
let mut lb_to_bindex = FxHashMap::default();
let mut indirect_branch_targets = FxHashSet::default();
for (block, succ_range) in rpo.into_iter() {
let index = BlockIndex::new(lowered_order.len());
lb_to_bindex.insert(block, index);
lowered_order.push(block);
lowered_succ_ranges.push(succ_range);
match block {
LoweredBlock::Orig { block }
| LoweredBlock::OrigAndEdge { block, .. }
| LoweredBlock::EdgeAndOrig { block, .. } => {
if f.layout.is_cold(block) {
cold_blocks.insert(index);
}
if indirect_branch_target_clif_blocks.contains(&block) {
indirect_branch_targets.insert(index);
}
}
LoweredBlock::Edge { pred, succ, .. } => {
if f.layout.is_cold(pred) || f.layout.is_cold(succ) {
cold_blocks.insert(index);
}
if indirect_branch_target_clif_blocks.contains(&succ) {
indirect_branch_targets.insert(index);
}
}
}
}
let lowered_succ_indices = lowered_succs
.iter()
.map(|&(inst, succ)| (inst, lb_to_bindex.get(&succ).cloned().unwrap()))
.collect();
let mut orig_map = SecondaryMap::with_default(None);
for (i, lb) in lowered_order.iter().enumerate() {
let i = BlockIndex::new(i);
if let Some(b) = lb.orig_block() {
orig_map[b] = Some(i);
}
}
let result = BlockLoweringOrder {
lowered_order,
lowered_succs,
lowered_succ_indices,
lowered_succ_ranges,
orig_map,
cold_blocks,
indirect_branch_targets,
};
trace!("BlockLoweringOrder: {:?}", result);
result
}
sourcepub fn split_block(&mut self, new_block: Block, before: Inst)
pub fn split_block(&mut self, new_block: Block, before: Inst)
Split the block containing before
in two.
Insert new_block
after the old block and move before
and the following instructions to
new_block
:
old_block:
i1
i2
i3 << before
i4
becomes:
old_block:
i1
i2
new_block:
i3 << before
i4
Examples found in repository?
548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
fn insert_block(&mut self, new_block: ir::Block) {
use self::CursorPosition::*;
match self.position() {
At(inst) => {
self.layout_mut().split_block(new_block, inst);
// All other cases move to `After(block)`, but in this case we'll stay `At(inst)`.
return;
}
Nowhere => self.layout_mut().append_block(new_block),
Before(block) => self.layout_mut().insert_block(new_block, block),
After(block) => self.layout_mut().insert_block_after(new_block, block),
}
// For everything but `At(inst)` we end up appending to the new block.
self.set_position(After(new_block));
}
Trait Implementations§
source§impl<'f> IntoIterator for &'f Layout
impl<'f> IntoIterator for &'f Layout
Use a layout reference in a for loop.