/* Splits superblocks. */ static void break_superblocks (void) { sbitmap superblocks; int i, need; superblocks = sbitmap_alloc (n_basic_blocks); sbitmap_zero (superblocks); need = 0; for (i = 0; i < n_basic_blocks; i++) if (BASIC_BLOCK(i)->flags & BB_SUPERBLOCK) { BASIC_BLOCK(i)->flags &= ~BB_SUPERBLOCK; SET_BIT (superblocks, i); need = 1; } if (need) { rebuild_jump_labels (get_insns ()); find_many_sub_basic_blocks (superblocks); } free (superblocks); }
static void make_reorder_chain () { basic_block prev = NULL; int nbb_m1 = n_basic_blocks - 1; basic_block next; /* Loop until we've placed every block. */ do { int i; next = NULL; /* Find the next unplaced block. */ /* ??? Get rid of this loop, and track which blocks are not yet placed more directly, so as to avoid the O(N^2) worst case. Perhaps keep a doubly-linked list of all to-be-placed blocks; remove from the list as we place. The head of that list is what we're looking for here. */ for (i = 0; i <= nbb_m1 && !next; ++i) { basic_block bb = BASIC_BLOCK (i); if (! RBI (bb)->visited) next = bb; } if (next) prev = make_reorder_chain_1 (next, prev); } while (next); RBI (prev)->next = NULL; }
DEBUG_FUNCTION void debug_bb_n_slim (int n) { basic_block bb = BASIC_BLOCK (n); debug_bb_slim (bb); }
int flow_loops_find (struct loops *loops, int flags) { int i; int b; int num_loops; edge e; sbitmap headers; int *dfs_order; int *rc_order; basic_block header; basic_block bb; /* This function cannot be repeatedly called with different flags to build up the loop information. The loop tree must always be built if this function is called. */ if (! (flags & LOOP_TREE)) abort (); memset (loops, 0, sizeof *loops); /* Taking care of this degenerate case makes the rest of this code simpler. */ if (n_basic_blocks == 0) return 0; dfs_order = NULL; rc_order = NULL; /* Join loops with shared headers. */ canonicalize_loop_headers (); /* Compute the dominators. */ calculate_dominance_info (CDI_DOMINATORS); /* Count the number of loop headers. This should be the same as the number of natural loops. */ headers = sbitmap_alloc (last_basic_block); sbitmap_zero (headers); num_loops = 0; FOR_EACH_BB (header) { int more_latches = 0; header->loop_depth = 0; /* If we have an abnormal predecessor, do not consider the loop (not worth the problems). */ for (e = header->pred; e; e = e->pred_next) if (e->flags & EDGE_ABNORMAL) break; if (e) continue; for (e = header->pred; e; e = e->pred_next) { basic_block latch = e->src; if (e->flags & EDGE_ABNORMAL) abort (); /* Look for back edges where a predecessor is dominated by this block. A natural loop has a single entry node (header) that dominates all the nodes in the loop. It also has single back edge to the header from a latch node. */ if (latch != ENTRY_BLOCK_PTR && dominated_by_p (CDI_DOMINATORS, latch, header)) { /* Shared headers should be eliminated by now. */ if (more_latches) abort (); more_latches = 1; SET_BIT (headers, header->index); num_loops++; } } } /* Allocate loop structures. */ loops->parray = xcalloc (num_loops + 1, sizeof (struct loop *)); /* Dummy loop containing whole function. */ loops->parray[0] = xcalloc (1, sizeof (struct loop)); loops->parray[0]->next = NULL; loops->parray[0]->inner = NULL; loops->parray[0]->outer = NULL; loops->parray[0]->depth = 0; loops->parray[0]->pred = NULL; loops->parray[0]->num_nodes = n_basic_blocks + 2; loops->parray[0]->latch = EXIT_BLOCK_PTR; loops->parray[0]->header = ENTRY_BLOCK_PTR; ENTRY_BLOCK_PTR->loop_father = loops->parray[0]; EXIT_BLOCK_PTR->loop_father = loops->parray[0]; loops->tree_root = loops->parray[0]; /* Find and record information about all the natural loops in the CFG. */ loops->num = 1; FOR_EACH_BB (bb) bb->loop_father = loops->tree_root; if (num_loops) { /* Compute depth first search order of the CFG so that outer natural loops will be found before inner natural loops. */ dfs_order = xmalloc (n_basic_blocks * sizeof (int)); rc_order = xmalloc (n_basic_blocks * sizeof (int)); flow_depth_first_order_compute (dfs_order, rc_order); /* Save CFG derived information to avoid recomputing it. */ loops->cfg.dfs_order = dfs_order; loops->cfg.rc_order = rc_order; num_loops = 1; for (b = 0; b < n_basic_blocks; b++) { struct loop *loop; /* Search the nodes of the CFG in reverse completion order so that we can find outer loops first. */ if (!TEST_BIT (headers, rc_order[b])) continue; header = BASIC_BLOCK (rc_order[b]); loop = loops->parray[num_loops] = xcalloc (1, sizeof (struct loop)); loop->header = header; loop->num = num_loops; num_loops++; /* Look for the latch for this header block. */ for (e = header->pred; e; e = e->pred_next) { basic_block latch = e->src; if (latch != ENTRY_BLOCK_PTR && dominated_by_p (CDI_DOMINATORS, latch, header)) { loop->latch = latch; break; } } flow_loop_tree_node_add (header->loop_father, loop); loop->num_nodes = flow_loop_nodes_find (loop->header, loop); } /* Assign the loop nesting depth and enclosed loop level for each loop. */ loops->levels = flow_loops_level_compute (loops); /* Scan the loops. */ for (i = 1; i < num_loops; i++) flow_loop_scan (loops->parray[i], flags); loops->num = num_loops; } else { free_dominance_info (CDI_DOMINATORS); } sbitmap_free (headers); loops->state = 0; #ifdef ENABLE_CHECKING verify_flow_info (); verify_loop_structure (loops); #endif return loops->num; }
void debug_bb_n_slim (int n) { struct basic_block_def *bb = BASIC_BLOCK (n); debug_bb_slim (bb); }
static void fixup_reorder_chain (void) { basic_block bb, prev_bb; int index; rtx insn = NULL; if (cfg_layout_function_header) { set_first_insn (cfg_layout_function_header); insn = cfg_layout_function_header; while (NEXT_INSN (insn)) insn = NEXT_INSN (insn); } /* First do the bulk reordering -- rechain the blocks without regard to the needed changes to jumps and labels. */ for (bb = ENTRY_BLOCK_PTR->next_bb, index = 0; bb != 0; bb = bb->rbi->next, index++) { if (bb->rbi->header) { if (insn) NEXT_INSN (insn) = bb->rbi->header; else set_first_insn (bb->rbi->header); PREV_INSN (bb->rbi->header) = insn; insn = bb->rbi->header; while (NEXT_INSN (insn)) insn = NEXT_INSN (insn); } if (insn) NEXT_INSN (insn) = BB_HEAD (bb); else set_first_insn (BB_HEAD (bb)); PREV_INSN (BB_HEAD (bb)) = insn; insn = BB_END (bb); if (bb->rbi->footer) { NEXT_INSN (insn) = bb->rbi->footer; PREV_INSN (bb->rbi->footer) = insn; while (NEXT_INSN (insn)) insn = NEXT_INSN (insn); } } if (index != n_basic_blocks) abort (); NEXT_INSN (insn) = cfg_layout_function_footer; if (cfg_layout_function_footer) PREV_INSN (cfg_layout_function_footer) = insn; while (NEXT_INSN (insn)) insn = NEXT_INSN (insn); set_last_insn (insn); #ifdef ENABLE_CHECKING verify_insn_chain (); #endif delete_dead_jumptables (); /* Now add jumps and labels as needed to match the blocks new outgoing edges. */ for (bb = ENTRY_BLOCK_PTR->next_bb; bb ; bb = bb->rbi->next) { edge e_fall, e_taken, e; rtx bb_end_insn; basic_block nb; if (bb->succ == NULL) continue; /* Find the old fallthru edge, and another non-EH edge for a taken jump. */ e_taken = e_fall = NULL; for (e = bb->succ; e ; e = e->succ_next) if (e->flags & EDGE_FALLTHRU) e_fall = e; else if (! (e->flags & EDGE_EH)) e_taken = e; bb_end_insn = BB_END (bb); if (GET_CODE (bb_end_insn) == JUMP_INSN) { if (any_condjump_p (bb_end_insn)) { /* If the old fallthru is still next, nothing to do. */ if (bb->rbi->next == e_fall->dest || (!bb->rbi->next && e_fall->dest == EXIT_BLOCK_PTR)) continue; /* The degenerated case of conditional jump jumping to the next instruction can happen on target having jumps with side effects. Create temporarily the duplicated edge representing branch. It will get unidentified by force_nonfallthru_and_redirect that would otherwise get confused by fallthru edge not pointing to the next basic block. */ if (!e_taken) { rtx note; edge e_fake; e_fake = unchecked_make_edge (bb, e_fall->dest, 0); if (!redirect_jump (BB_END (bb), block_label (bb), 0)) abort (); note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX); if (note) { int prob = INTVAL (XEXP (note, 0)); e_fake->probability = prob; e_fake->count = e_fall->count * prob / REG_BR_PROB_BASE; e_fall->probability -= e_fall->probability; e_fall->count -= e_fake->count; if (e_fall->probability < 0) e_fall->probability = 0; if (e_fall->count < 0) e_fall->count = 0; } } /* There is one special case: if *neither* block is next, such as happens at the very end of a function, then we'll need to add a new unconditional jump. Choose the taken edge based on known or assumed probability. */ else if (bb->rbi->next != e_taken->dest) { rtx note = find_reg_note (bb_end_insn, REG_BR_PROB, 0); if (note && INTVAL (XEXP (note, 0)) < REG_BR_PROB_BASE / 2 && invert_jump (bb_end_insn, label_for_bb (e_fall->dest), 0)) { e_fall->flags &= ~EDGE_FALLTHRU; e_taken->flags |= EDGE_FALLTHRU; update_br_prob_note (bb); e = e_fall, e_fall = e_taken, e_taken = e; } } /* Otherwise we can try to invert the jump. This will basically never fail, however, keep up the pretense. */ else if (invert_jump (bb_end_insn, label_for_bb (e_fall->dest), 0)) { e_fall->flags &= ~EDGE_FALLTHRU; e_taken->flags |= EDGE_FALLTHRU; update_br_prob_note (bb); continue; } } else if (returnjump_p (bb_end_insn)) continue; else { /* Otherwise we have some switch or computed jump. In the 99% case, there should not have been a fallthru edge. */ if (! e_fall) continue; #ifdef CASE_DROPS_THROUGH /* Except for VAX. Since we didn't have predication for the tablejump, the fallthru block should not have moved. */ if (bb->rbi->next == e_fall->dest) continue; bb_end_insn = skip_insns_after_block (bb); #else abort (); #endif } } else { /* No fallthru implies a noreturn function with EH edges, or something similarly bizarre. In any case, we don't need to do anything. */ if (! e_fall) continue; /* If the fallthru block is still next, nothing to do. */ if (bb->rbi->next == e_fall->dest) continue; /* A fallthru to exit block. */ if (!bb->rbi->next && e_fall->dest == EXIT_BLOCK_PTR) continue; } /* We got here if we need to add a new jump insn. */ nb = force_nonfallthru (e_fall); if (nb) { cfg_layout_initialize_rbi (nb); nb->rbi->visited = 1; nb->rbi->next = bb->rbi->next; bb->rbi->next = nb; /* Don't process this new block. */ bb = nb; } } /* Put basic_block_info in the new order. */ if (rtl_dump_file) { fprintf (rtl_dump_file, "Reordered sequence:\n"); for (bb = ENTRY_BLOCK_PTR->next_bb, index = 0; bb; bb = bb->rbi->next, index ++) { fprintf (rtl_dump_file, " %i ", index); if (bb->rbi->original) fprintf (rtl_dump_file, "duplicate of %i ", bb->rbi->original->index); else if (forwarder_block_p (bb) && GET_CODE (BB_HEAD (bb)) != CODE_LABEL) fprintf (rtl_dump_file, "compensation "); else fprintf (rtl_dump_file, "bb %i ", bb->index); fprintf (rtl_dump_file, " [%i]\n", bb->frequency); } } prev_bb = ENTRY_BLOCK_PTR; bb = ENTRY_BLOCK_PTR->next_bb; index = 0; for (; bb; prev_bb = bb, bb = bb->rbi->next, index ++) { bb->index = index; BASIC_BLOCK (index) = bb; bb->prev_bb = prev_bb; prev_bb->next_bb = bb; } prev_bb->next_bb = EXIT_BLOCK_PTR; EXIT_BLOCK_PTR->prev_bb = prev_bb; /* Annoying special case - jump around dead jumptables left in the code. */ FOR_EACH_BB (bb) { edge e; for (e = bb->succ; e && !(e->flags & EDGE_FALLTHRU); e = e->succ_next) continue; if (e && !can_fallthru (e->src, e->dest)) force_nonfallthru (e); } }