/* Return true if we should ignore the basic block for purposes of tracing. */ static bool ignore_bb_p (const_basic_block bb) { if (bb->index < NUM_FIXED_BLOCKS) return true; if (optimize_bb_for_size_p (bb)) return true; if (gimple *g = last_stmt (CONST_CAST_BB (bb))) { /* A transaction is a single entry multiple exit region. It must be duplicated in its entirety or not at all. */ if (gimple_code (g) == GIMPLE_TRANSACTION) return true; /* An IFN_UNIQUE call must be duplicated as part of its group, or not at all. */ if (is_gimple_call (g) && gimple_call_internal_p (g) && gimple_call_internal_unique_p (g)) return true; } return false; }
static unsigned int tree_optimize_tail_calls_1 (bool opt_tailcalls) { edge e; bool phis_constructed = false; struct tailcall *tailcalls = NULL, *act, *next; bool changed = false; basic_block first = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)); tree param; gimple stmt; edge_iterator ei; if (!suitable_for_tail_opt_p ()) return 0; if (opt_tailcalls) opt_tailcalls = suitable_for_tail_call_opt_p (); FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) { /* Only traverse the normal exits, i.e. those that end with return statement. */ stmt = last_stmt (e->src); if (stmt && gimple_code (stmt) == GIMPLE_RETURN) find_tail_calls (e->src, &tailcalls); }
static tree tree_may_unswitch_on (basic_block bb, struct loop *loop) { gimple stmt, def; tree cond, use; basic_block def_bb; ssa_op_iter iter; /* BB must end in a simple conditional jump. */ stmt = last_stmt (bb); if (!stmt || gimple_code (stmt) != GIMPLE_COND) return NULL_TREE; /* To keep the things simple, we do not directly remove the conditions, but just replace tests with 0 != 0 resp. 1 != 0. Prevent the infinite loop where we would unswitch again on such a condition. */ if (gimple_cond_true_p (stmt) || gimple_cond_false_p (stmt)) return NULL_TREE; /* Condition must be invariant. */ FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE) { def = SSA_NAME_DEF_STMT (use); def_bb = gimple_bb (def); if (def_bb && flow_bb_inside_loop_p (loop, def_bb)) return NULL_TREE; }
static bool do_while_loop_p (struct loop *loop) { gimple *stmt = last_stmt (loop->latch); /* If the latch of the loop is not empty, it is not a do-while loop. */ if (stmt && gimple_code (stmt) != GIMPLE_LABEL) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Loop %i is not do-while loop: latch is not empty.\n", loop->num); return false; } /* If the header contains just a condition, it is not a do-while loop. */ stmt = last_and_only_stmt (loop->header); if (stmt && gimple_code (stmt) == GIMPLE_COND) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Loop %i is not do-while loop: " "header contains just condition.\n", loop->num); return false; } if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Loop %i is do-while loop\n", loop->num); return true; }
static unsigned int tree_ssa_ifcombine (void) { basic_block *bbs; bool cfg_changed = false; int i; bbs = single_pred_before_succ_order (); calculate_dominance_info (CDI_DOMINATORS); /* Search every basic block for COND_EXPR we may be able to optimize. We walk the blocks in order that guarantees that a block with a single predecessor is processed after the predecessor. This ensures that we collapse outter ifs before visiting the inner ones, and also that we do not try to visit a removed block. This is opposite of PHI-OPT, because we cascade the combining rather than cascading PHIs. */ for (i = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS - 1; i >= 0; i--) { basic_block bb = bbs[i]; gimple stmt = last_stmt (bb); if (stmt && gimple_code (stmt) == GIMPLE_COND) cfg_changed |= tree_ssa_ifcombine_bb (bb); } free (bbs); return cfg_changed ? TODO_cleanup_cfg : 0; }
static bool candidate_bb_for_phi_optimization (basic_block bb, basic_block *cond_block_p, basic_block *other_block_p) { tree last0, last1; basic_block cond_block, other_block; /* One of the alternatives must come from a block ending with a COND_EXPR. */ last0 = last_stmt (EDGE_PRED (bb, 0)->src); last1 = last_stmt (EDGE_PRED (bb, 1)->src); if (last0 && TREE_CODE (last0) == COND_EXPR) { cond_block = EDGE_PRED (bb, 0)->src; other_block = EDGE_PRED (bb, 1)->src; } else if (last1 && TREE_CODE (last1) == COND_EXPR) { other_block = EDGE_PRED (bb, 0)->src; cond_block = EDGE_PRED (bb, 1)->src; } else return false; /* COND_BLOCK must have precisely two successors. We indirectly verify that those successors are BB and OTHER_BLOCK. */ if (EDGE_COUNT (cond_block->succs) != 2 || (EDGE_SUCC (cond_block, 0)->flags & EDGE_ABNORMAL) != 0 || (EDGE_SUCC (cond_block, 1)->flags & EDGE_ABNORMAL) != 0) return false; /* OTHER_BLOCK must have a single predecessor which is COND_BLOCK, OTHER_BLOCK must have a single successor which is BB and OTHER_BLOCK must have no PHI nodes. */ if (EDGE_COUNT (other_block->preds) != 1 || EDGE_PRED (other_block, 0)->src != cond_block || EDGE_COUNT (other_block->succs) != 1 || EDGE_SUCC (other_block, 0)->dest != bb || phi_nodes (other_block)) return false; *cond_block_p = cond_block; *other_block_p = other_block; /* Everything looks OK. */ return true; }
static bool should_duplicate_loop_header_p (basic_block header, struct loop *loop, int *limit) { gimple_stmt_iterator bsi; gimple last; /* Do not copy one block more than once (we do not really want to do loop peeling here). */ if (header->aux) return false; /* Loop header copying usually increases size of the code. This used not to be true, since quite often it is possible to verify that the condition is satisfied in the first iteration and therefore to eliminate it. Jump threading handles these cases now. */ if (optimize_loop_for_size_p (loop)) return false; gcc_assert (EDGE_COUNT (header->succs) > 0); if (single_succ_p (header)) return false; if (flow_bb_inside_loop_p (loop, EDGE_SUCC (header, 0)->dest) && flow_bb_inside_loop_p (loop, EDGE_SUCC (header, 1)->dest)) return false; /* If this is not the original loop header, we want it to have just one predecessor in order to match the && pattern. */ if (header != loop->header && !single_pred_p (header)) return false; last = last_stmt (header); if (gimple_code (last) != GIMPLE_COND) return false; /* Approximately copy the conditions that used to be used in jump.c -- at most 20 insns and no calls. */ for (bsi = gsi_start_bb (header); !gsi_end_p (bsi); gsi_next (&bsi)) { last = gsi_stmt (bsi); if (gimple_code (last) == GIMPLE_LABEL) continue; if (is_gimple_debug (last)) continue; if (is_gimple_call (last)) return false; *limit -= estimate_num_insns (last, &eni_size_weights); if (*limit < 0) return false; } return true; }
static void tree_ssa_forward_propagate_single_use_vars (void) { basic_block bb; varray_type vars_worklist, cond_worklist; vars = BITMAP_ALLOC (NULL); VARRAY_TREE_INIT (vars_worklist, 10, "VARS worklist"); VARRAY_TREE_INIT (cond_worklist, 10, "COND worklist"); /* Prime the COND_EXPR worklist by placing all the COND_EXPRs on the worklist. */ FOR_EACH_BB (bb) { tree last = last_stmt (bb); if (last && TREE_CODE (last) == COND_EXPR) VARRAY_PUSH_TREE (cond_worklist, last); } while (VARRAY_ACTIVE_SIZE (cond_worklist) > 0) { /* First get a list of all the interesting COND_EXPRs and potential single use variables which feed those COND_EXPRs. This will drain COND_WORKLIST and initialize VARS_WORKLIST. */ record_single_argument_cond_exprs (cond_worklist, &vars_worklist, vars); if (VARRAY_ACTIVE_SIZE (vars_worklist) > 0) { /* Now compute immediate uses for all the variables we care about. */ compute_immediate_uses (TDFA_USE_OPS, need_imm_uses_for); /* We've computed immediate uses, so we can/must clear the VARS bitmap for the next iteration. */ bitmap_clear (vars); /* And optimize. This will drain VARS_WORKLIST and initialize COND_WORKLIST for the next iteration. */ substitute_single_use_vars (&cond_worklist, vars_worklist); /* We do not incrementally update the dataflow information so we must free it here and recompute the necessary bits on the next iteration. If this turns out to be expensive, methods for incrementally updating the dataflow are known. */ free_df (); } } /* All done. Clean up. */ BITMAP_FREE (vars); }
static bool should_duplicate_loop_header_p (basic_block header, struct loop *loop, int *limit) { block_stmt_iterator bsi; tree last; /* Do not copy one block more than once (we do not really want to do loop peeling here). */ if (header->aux) return false; gcc_assert (EDGE_COUNT (header->succs) > 0); if (EDGE_COUNT (header->succs) == 1) return false; if (flow_bb_inside_loop_p (loop, EDGE_SUCC (header, 0)->dest) && flow_bb_inside_loop_p (loop, EDGE_SUCC (header, 1)->dest)) return false; /* If this is not the original loop header, we want it to have just one predecessor in order to match the && pattern. */ if (header != loop->header && EDGE_COUNT (header->preds) >= 2) return false; last = last_stmt (header); if (TREE_CODE (last) != COND_EXPR) return false; /* Approximately copy the conditions that used to be used in jump.c -- at most 20 insns and no calls. */ for (bsi = bsi_start (header); !bsi_end_p (bsi); bsi_next (&bsi)) { last = bsi_stmt (bsi); if (TREE_CODE (last) == LABEL_EXPR) continue; if (get_call_expr_in (last)) return false; *limit -= estimate_num_insns (last); if (*limit < 0) return false; } return true; }
static bool do_while_loop_p (struct loop *loop) { tree stmt = last_stmt (loop->latch); /* If the latch of the loop is not empty, it is not a do-while loop. */ if (stmt && TREE_CODE (stmt) != LABEL_EXPR) return false; /* If the header contains just a condition, it is not a do-while loop. */ stmt = last_and_only_stmt (loop->header); if (stmt && TREE_CODE (stmt) == COND_EXPR) return false; return true; }
/* Return true if we should ignore the basic block for purposes of tracing. */ static bool ignore_bb_p (const_basic_block bb) { gimple g; if (bb->index < NUM_FIXED_BLOCKS) return true; if (optimize_bb_for_size_p (bb)) return true; /* A transaction is a single entry multiple exit region. It must be duplicated in its entirety or not at all. */ g = last_stmt (CONST_CAST_BB (bb)); if (g && gimple_code (g) == GIMPLE_TRANSACTION) return true; return false; }
static bool do_while_loop_p (struct loop *loop) { gimple stmt = last_stmt (loop->latch); /* If the latch of the loop is not empty, it is not a do-while loop. */ if (stmt && gimple_code (stmt) != GIMPLE_LABEL) return false; /* If the header contains just a condition, it is not a do-while loop. */ stmt = last_and_only_stmt (loop->header); if (stmt && gimple_code (stmt) == GIMPLE_COND) return false; return true; }
static void create_canonical_iv (struct loop *loop, edge exit, tree niter) { edge in; tree type, var; gcond *cond; gimple_stmt_iterator incr_at; enum tree_code cmp; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Added canonical iv to loop %d, ", loop->num); print_generic_expr (dump_file, niter, TDF_SLIM); fprintf (dump_file, " iterations.\n"); } cond = as_a <gcond *> (last_stmt (exit->src)); in = EDGE_SUCC (exit->src, 0); if (in == exit) in = EDGE_SUCC (exit->src, 1); /* Note that we do not need to worry about overflows, since type of niter is always unsigned and all comparisons are just for equality/nonequality -- i.e. everything works with a modulo arithmetics. */ type = TREE_TYPE (niter); niter = fold_build2 (PLUS_EXPR, type, niter, build_int_cst (type, 1)); incr_at = gsi_last_bb (in->src); create_iv (niter, build_int_cst (type, -1), NULL_TREE, loop, &incr_at, false, NULL, &var); cmp = (exit->flags & EDGE_TRUE_VALUE) ? EQ_EXPR : NE_EXPR; gimple_cond_set_code (cond, cmp); gimple_cond_set_lhs (cond, var); gimple_cond_set_rhs (cond, build_int_cst (type, 0)); update_stmt (cond); }
static tree tree_may_unswitch_on (basic_block bb, struct loop *loop) { tree stmt, def, cond, use; basic_block def_bb; ssa_op_iter iter; /* BB must end in a simple conditional jump. */ stmt = last_stmt (bb); if (!stmt || TREE_CODE (stmt) != COND_EXPR) return NULL_TREE; /* Condition must be invariant. */ FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE) { def = SSA_NAME_DEF_STMT (use); def_bb = bb_for_stmt (def); if (def_bb && flow_bb_inside_loop_p (loop, def_bb)) return NULL_TREE; }
static unsigned int tree_ssa_ifcombine (void) { basic_block *bbs; bool cfg_changed = false; int i; bbs = blocks_in_phiopt_order (); for (i = 0; i < n_basic_blocks - NUM_FIXED_BLOCKS; ++i) { basic_block bb = bbs[i]; gimple stmt = last_stmt (bb); if (stmt && gimple_code (stmt) == GIMPLE_COND) cfg_changed |= tree_ssa_ifcombine_bb (bb); } free (bbs); return cfg_changed ? TODO_cleanup_cfg : 0; }
static unsigned int tree_ssa_ifcombine (void) { basic_block *bbs; bool cfg_changed = false; int i; bbs = single_pred_before_succ_order (); calculate_dominance_info (CDI_DOMINATORS); for (i = 0; i < n_basic_blocks - NUM_FIXED_BLOCKS; ++i) { basic_block bb = bbs[i]; gimple stmt = last_stmt (bb); if (stmt && gimple_code (stmt) == GIMPLE_COND) cfg_changed |= tree_ssa_ifcombine_bb (bb); } free (bbs); return cfg_changed ? TODO_cleanup_cfg : 0; }
unsigned int execute_fixup_cfg (void) { basic_block bb; gimple_stmt_iterator gsi; int todo = gimple_in_ssa_p (cfun) ? TODO_verify_ssa : 0; gcov_type count_scale; edge e; edge_iterator ei; count_scale = GCOV_COMPUTE_SCALE (cgraph_get_node (current_function_decl)->count, ENTRY_BLOCK_PTR->count); ENTRY_BLOCK_PTR->count = cgraph_get_node (current_function_decl)->count; EXIT_BLOCK_PTR->count = apply_scale (EXIT_BLOCK_PTR->count, count_scale); FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) e->count = apply_scale (e->count, count_scale); FOR_EACH_BB (bb) { bb->count = apply_scale (bb->count, count_scale); for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple stmt = gsi_stmt (gsi); tree decl = is_gimple_call (stmt) ? gimple_call_fndecl (stmt) : NULL; if (decl) { int flags = gimple_call_flags (stmt); if (flags & (ECF_CONST | ECF_PURE | ECF_LOOPING_CONST_OR_PURE)) { if (gimple_purge_dead_abnormal_call_edges (bb)) todo |= TODO_cleanup_cfg; if (gimple_in_ssa_p (cfun)) { todo |= TODO_update_ssa | TODO_cleanup_cfg; update_stmt (stmt); } } if (flags & ECF_NORETURN && fixup_noreturn_call (stmt)) todo |= TODO_cleanup_cfg; } if (maybe_clean_eh_stmt (stmt) && gimple_purge_dead_eh_edges (bb)) todo |= TODO_cleanup_cfg; } FOR_EACH_EDGE (e, ei, bb->succs) e->count = apply_scale (e->count, count_scale); /* If we have a basic block with no successors that does not end with a control statement or a noreturn call end it with a call to __builtin_unreachable. This situation can occur when inlining a noreturn call that does in fact return. */ if (EDGE_COUNT (bb->succs) == 0) { gimple stmt = last_stmt (bb); if (!stmt || (!is_ctrl_stmt (stmt) && (!is_gimple_call (stmt) || (gimple_call_flags (stmt) & ECF_NORETURN) == 0))) { stmt = gimple_build_call (builtin_decl_implicit (BUILT_IN_UNREACHABLE), 0); gimple_stmt_iterator gsi = gsi_last_bb (bb); gsi_insert_after (&gsi, stmt, GSI_NEW_STMT); } } } if (count_scale != REG_BR_PROB_BASE) compute_function_frequency (); /* We just processed all calls. */ if (cfun->gimple_df) vec_free (MODIFIED_NORETURN_CALLS (cfun)); /* Dump a textual representation of the flowgraph. */ if (dump_file) gimple_dump_cfg (dump_file, dump_flags); if (current_loops && (todo & TODO_cleanup_cfg)) loops_state_set (LOOPS_NEED_FIXUP); return todo; }
static bool should_duplicate_loop_header_p (basic_block header, struct loop *loop, int *limit) { gimple_stmt_iterator bsi; gimple *last; gcc_assert (!header->aux); /* Loop header copying usually increases size of the code. This used not to be true, since quite often it is possible to verify that the condition is satisfied in the first iteration and therefore to eliminate it. Jump threading handles these cases now. */ if (optimize_loop_for_size_p (loop)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Not duplicating bb %i: optimizing for size.\n", header->index); return false; } gcc_assert (EDGE_COUNT (header->succs) > 0); if (single_succ_p (header)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Not duplicating bb %i: it is single succ.\n", header->index); return false; } if (flow_bb_inside_loop_p (loop, EDGE_SUCC (header, 0)->dest) && flow_bb_inside_loop_p (loop, EDGE_SUCC (header, 1)->dest)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Not duplicating bb %i: both sucessors are in loop.\n", loop->num); return false; } /* If this is not the original loop header, we want it to have just one predecessor in order to match the && pattern. */ if (header != loop->header && !single_pred_p (header)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Not duplicating bb %i: it has mutiple predecestors.\n", header->index); return false; } last = last_stmt (header); if (gimple_code (last) != GIMPLE_COND) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Not duplicating bb %i: it does not end by conditional.\n", header->index); return false; } /* Count number of instructions and punt on calls. */ for (bsi = gsi_start_bb (header); !gsi_end_p (bsi); gsi_next (&bsi)) { last = gsi_stmt (bsi); if (gimple_code (last) == GIMPLE_LABEL) continue; if (is_gimple_debug (last)) continue; if (gimple_code (last) == GIMPLE_CALL && !gimple_inexpensive_call_p (as_a <gcall *> (last))) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Not duplicating bb %i: it contains call.\n", header->index); return false; } *limit -= estimate_num_insns (last, &eni_size_weights); if (*limit < 0) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Not duplicating bb %i contains too many insns.\n", header->index); return false; } } if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Will duplicate bb %i\n", header->index); return true; }
static bool ifcombine_iforif (basic_block inner_cond_bb, basic_block outer_cond_bb) { gimple inner_cond, outer_cond; tree name1, name2, bits1, bits2; inner_cond = last_stmt (inner_cond_bb); if (!inner_cond || gimple_code (inner_cond) != GIMPLE_COND) return false; outer_cond = last_stmt (outer_cond_bb); if (!outer_cond || gimple_code (outer_cond) != GIMPLE_COND) return false; /* See if we have two bit tests of the same name in both tests. In that case remove the outer test and change the inner one to test for name & (bits1 | bits2) != 0. */ if (recognize_bits_test (inner_cond, &name1, &bits1) && recognize_bits_test (outer_cond, &name2, &bits2)) { gimple_stmt_iterator gsi; tree t; /* Find the common name which is bit-tested. */ if (name1 == name2) ; else if (bits1 == bits2) { t = name2; name2 = bits2; bits2 = t; t = name1; name1 = bits1; bits1 = t; } else if (name1 == bits2) { t = name2; name2 = bits2; bits2 = t; } else if (bits1 == name2) { t = name1; name1 = bits1; bits1 = t; } else return false; /* As we strip non-widening conversions in finding a common name that is tested make sure to end up with an integral type for building the bit operations. */ if (TYPE_PRECISION (TREE_TYPE (bits1)) >= TYPE_PRECISION (TREE_TYPE (bits2))) { bits1 = fold_convert (unsigned_type_for (TREE_TYPE (bits1)), bits1); name1 = fold_convert (TREE_TYPE (bits1), name1); bits2 = fold_convert (unsigned_type_for (TREE_TYPE (bits2)), bits2); bits2 = fold_convert (TREE_TYPE (bits1), bits2); } else { bits2 = fold_convert (unsigned_type_for (TREE_TYPE (bits2)), bits2); name1 = fold_convert (TREE_TYPE (bits2), name1); bits1 = fold_convert (unsigned_type_for (TREE_TYPE (bits1)), bits1); bits1 = fold_convert (TREE_TYPE (bits2), bits1); } /* Do it. */ gsi = gsi_for_stmt (inner_cond); t = fold_build2 (BIT_IOR_EXPR, TREE_TYPE (name1), bits1, bits2); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (name1), name1, t); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); t = fold_build2 (NE_EXPR, boolean_type_node, t, build_int_cst (TREE_TYPE (t), 0)); gimple_cond_set_condition_from_tree (inner_cond, t); update_stmt (inner_cond); /* Leave CFG optimization to cfg_cleanup. */ gimple_cond_set_condition_from_tree (outer_cond, boolean_false_node); update_stmt (outer_cond); if (dump_file) { fprintf (dump_file, "optimizing bits or bits test to "); print_generic_expr (dump_file, name1, 0); fprintf (dump_file, " & T != 0\nwith temporary T = "); print_generic_expr (dump_file, bits1, 0); fprintf (dump_file, " | "); print_generic_expr (dump_file, bits2, 0); fprintf (dump_file, "\n"); } return true; } /* See if we have two comparisons that we can merge into one. This happens for C++ operator overloading where for example GE_EXPR is implemented as GT_EXPR || EQ_EXPR. */ else if (TREE_CODE_CLASS (gimple_cond_code (inner_cond)) == tcc_comparison && TREE_CODE_CLASS (gimple_cond_code (outer_cond)) == tcc_comparison && operand_equal_p (gimple_cond_lhs (inner_cond), gimple_cond_lhs (outer_cond), 0) && operand_equal_p (gimple_cond_rhs (inner_cond), gimple_cond_rhs (outer_cond), 0)) { enum tree_code code1 = gimple_cond_code (inner_cond); enum tree_code code2 = gimple_cond_code (outer_cond); enum tree_code code; tree t; #define CHK(a,b) ((code1 == a ## _EXPR && code2 == b ## _EXPR) \ || (code2 == a ## _EXPR && code1 == b ## _EXPR)) /* Merge the two condition codes if possible. */ if (code1 == code2) code = code1; else if (CHK (EQ, LT)) code = LE_EXPR; else if (CHK (EQ, GT)) code = GE_EXPR; else if (CHK (LT, LE)) code = LE_EXPR; else if (CHK (GT, GE)) code = GE_EXPR; else if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (inner_cond))) || flag_unsafe_math_optimizations) { if (CHK (LT, GT)) code = NE_EXPR; else if (CHK (LT, NE)) code = NE_EXPR; else if (CHK (GT, NE)) code = NE_EXPR; else return false; } /* We could check for combinations leading to trivial true/false. */ else return false; #undef CHK /* Do it. */ t = fold_build2 (code, boolean_type_node, gimple_cond_lhs (outer_cond), gimple_cond_rhs (outer_cond)); t = canonicalize_cond_expr_cond (t); if (!t) return false; gimple_cond_set_condition_from_tree (inner_cond, t); update_stmt (inner_cond); /* Leave CFG optimization to cfg_cleanup. */ gimple_cond_set_condition_from_tree (outer_cond, boolean_false_node); update_stmt (outer_cond); if (dump_file) { fprintf (dump_file, "optimizing two comparisons to "); print_generic_expr (dump_file, t, 0); fprintf (dump_file, "\n"); } return true; } return false; }
static bool ifcombine_ifandif (basic_block inner_cond_bb, basic_block outer_cond_bb) { gimple_stmt_iterator gsi; gimple inner_cond, outer_cond; tree name1, name2, bit1, bit2; inner_cond = last_stmt (inner_cond_bb); if (!inner_cond || gimple_code (inner_cond) != GIMPLE_COND) return false; outer_cond = last_stmt (outer_cond_bb); if (!outer_cond || gimple_code (outer_cond) != GIMPLE_COND) return false; /* See if we test a single bit of the same name in both tests. In that case remove the outer test, merging both else edges, and change the inner one to test for name & (bit1 | bit2) == (bit1 | bit2). */ if (recognize_single_bit_test (inner_cond, &name1, &bit1) && recognize_single_bit_test (outer_cond, &name2, &bit2) && name1 == name2) { tree t, t2; /* Do it. */ gsi = gsi_for_stmt (inner_cond); t = fold_build2 (LSHIFT_EXPR, TREE_TYPE (name1), build_int_cst (TREE_TYPE (name1), 1), bit1); t2 = fold_build2 (LSHIFT_EXPR, TREE_TYPE (name1), build_int_cst (TREE_TYPE (name1), 1), bit2); t = fold_build2 (BIT_IOR_EXPR, TREE_TYPE (name1), t, t2); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); t2 = fold_build2 (BIT_AND_EXPR, TREE_TYPE (name1), name1, t); t2 = force_gimple_operand_gsi (&gsi, t2, true, NULL_TREE, true, GSI_SAME_STMT); t = fold_build2 (EQ_EXPR, boolean_type_node, t2, t); gimple_cond_set_condition_from_tree (inner_cond, t); update_stmt (inner_cond); /* Leave CFG optimization to cfg_cleanup. */ gimple_cond_set_condition_from_tree (outer_cond, boolean_true_node); update_stmt (outer_cond); if (dump_file) { fprintf (dump_file, "optimizing double bit test to "); print_generic_expr (dump_file, name1, 0); fprintf (dump_file, " & T == T\nwith temporary T = (1 << "); print_generic_expr (dump_file, bit1, 0); fprintf (dump_file, ") | (1 << "); print_generic_expr (dump_file, bit2, 0); fprintf (dump_file, ")\n"); } return true; } return false; }
static unsigned int tree_ssa_phiopt (void) { basic_block bb; basic_block *bb_order; unsigned n, i; bool cfgchanged = false; /* Search every basic block for COND_EXPR we may be able to optimize. We walk the blocks in order that guarantees that a block with a single predecessor is processed before the predecessor. This ensures that we collapse inner ifs before visiting the outer ones, and also that we do not try to visit a removed block. */ bb_order = blocks_in_phiopt_order (); n = n_basic_blocks - NUM_FIXED_BLOCKS; for (i = 0; i < n; i++) { tree cond_expr; tree phi; basic_block bb1, bb2; edge e1, e2; tree arg0, arg1; bb = bb_order[i]; cond_expr = last_stmt (bb); /* Check to see if the last statement is a COND_EXPR. */ if (!cond_expr || TREE_CODE (cond_expr) != COND_EXPR) continue; e1 = EDGE_SUCC (bb, 0); bb1 = e1->dest; e2 = EDGE_SUCC (bb, 1); bb2 = e2->dest; /* We cannot do the optimization on abnormal edges. */ if ((e1->flags & EDGE_ABNORMAL) != 0 || (e2->flags & EDGE_ABNORMAL) != 0) continue; /* If either bb1's succ or bb2 or bb2's succ is non NULL. */ if (EDGE_COUNT (bb1->succs) == 0 || bb2 == NULL || EDGE_COUNT (bb2->succs) == 0) continue; /* Find the bb which is the fall through to the other. */ if (EDGE_SUCC (bb1, 0)->dest == bb2) ; else if (EDGE_SUCC (bb2, 0)->dest == bb1) { basic_block bb_tmp = bb1; edge e_tmp = e1; bb1 = bb2; bb2 = bb_tmp; e1 = e2; e2 = e_tmp; } else continue; e1 = EDGE_SUCC (bb1, 0); /* Make sure that bb1 is just a fall through. */ if (!single_succ_p (bb1) || (e1->flags & EDGE_FALLTHRU) == 0) continue; /* Also make sure that bb1 only have one predecessor and that it is bb. */ if (!single_pred_p (bb1) || single_pred (bb1) != bb) continue; phi = phi_nodes (bb2); /* Check to make sure that there is only one PHI node. TODO: we could do it with more than one iff the other PHI nodes have the same elements for these two edges. */ if (!phi || PHI_CHAIN (phi) != NULL) continue; arg0 = PHI_ARG_DEF_TREE (phi, e1->dest_idx); arg1 = PHI_ARG_DEF_TREE (phi, e2->dest_idx); /* Something is wrong if we cannot find the arguments in the PHI node. */ gcc_assert (arg0 != NULL && arg1 != NULL); /* Do the replacement of conditional if it can be done. */ if (conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1)) cfgchanged = true; else if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1)) cfgchanged = true; else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1)) cfgchanged = true; else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1)) cfgchanged = true; } free (bb_order); /* If the CFG has changed, we should cleanup the CFG. */ return cfgchanged ? TODO_cleanup_cfg : 0; }
static bool abs_replacement (basic_block cond_bb, basic_block middle_bb, edge e0 ATTRIBUTE_UNUSED, edge e1, gimple phi, tree arg0, tree arg1) { tree result; gimple new_stmt, cond; gimple_stmt_iterator gsi; edge true_edge, false_edge; gimple assign; edge e; tree rhs, lhs; bool negate; enum tree_code cond_code; /* If the type says honor signed zeros we cannot do this optimization. */ if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg1)))) return false; /* OTHER_BLOCK must have only one executable statement which must have the form arg0 = -arg1 or arg1 = -arg0. */ assign = last_and_only_stmt (middle_bb); /* If we did not find the proper negation assignment, then we can not optimize. */ if (assign == NULL) return false; /* If we got here, then we have found the only executable statement in OTHER_BLOCK. If it is anything other than arg = -arg1 or arg1 = -arg0, then we can not optimize. */ if (gimple_code (assign) != GIMPLE_ASSIGN) return false; lhs = gimple_assign_lhs (assign); if (gimple_assign_rhs_code (assign) != NEGATE_EXPR) return false; rhs = gimple_assign_rhs1 (assign); /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */ if (!(lhs == arg0 && rhs == arg1) && !(lhs == arg1 && rhs == arg0)) return false; cond = last_stmt (cond_bb); result = PHI_RESULT (phi); /* Only relationals comparing arg[01] against zero are interesting. */ cond_code = gimple_cond_code (cond); if (cond_code != GT_EXPR && cond_code != GE_EXPR && cond_code != LT_EXPR && cond_code != LE_EXPR) return false; /* Make sure the conditional is arg[01] OP y. */ if (gimple_cond_lhs (cond) != rhs) return false; if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond))) ? real_zerop (gimple_cond_rhs (cond)) : integer_zerop (gimple_cond_rhs (cond))) ; else return false; /* We need to know which is the true edge and which is the false edge so that we know if have abs or negative abs. */ extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge); /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we will need to negate the result. Similarly for LT_EXPR/LE_EXPR if the false edge goes to OTHER_BLOCK. */ if (cond_code == GT_EXPR || cond_code == GE_EXPR) e = true_edge; else e = false_edge; if (e->dest == middle_bb) negate = true; else negate = false; result = duplicate_ssa_name (result, NULL); if (negate) { tree tmp = create_tmp_var (TREE_TYPE (result), NULL); add_referenced_var (tmp); lhs = make_ssa_name (tmp, NULL); } else lhs = result; /* Build the modify expression with abs expression. */ new_stmt = gimple_build_assign_with_ops (ABS_EXPR, lhs, rhs, NULL); gsi = gsi_last_bb (cond_bb); gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT); if (negate) { /* Get the right GSI. We want to insert after the recently added ABS_EXPR statement (which we know is the first statement in the block. */ new_stmt = gimple_build_assign_with_ops (NEGATE_EXPR, result, lhs, NULL); gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT); } replace_phi_edge_with_variable (cond_bb, e1, phi, result); /* Note that we optimized this PHI. */ return true; }
static bool conditional_replacement (basic_block cond_bb, basic_block middle_bb, edge e0, edge e1, gimple phi, tree arg0, tree arg1) { tree result; gimple stmt, new_stmt; tree cond; gimple_stmt_iterator gsi; edge true_edge, false_edge; tree new_var, new_var2; /* FIXME: Gimplification of complex type is too hard for now. */ if (TREE_CODE (TREE_TYPE (arg0)) == COMPLEX_TYPE || TREE_CODE (TREE_TYPE (arg1)) == COMPLEX_TYPE) return false; /* The PHI arguments have the constants 0 and 1, then convert it to the conditional. */ if ((integer_zerop (arg0) && integer_onep (arg1)) || (integer_zerop (arg1) && integer_onep (arg0))) ; else return false; if (!empty_block_p (middle_bb)) return false; /* At this point we know we have a GIMPLE_COND with two successors. One successor is BB, the other successor is an empty block which falls through into BB. There is a single PHI node at the join point (BB) and its arguments are constants (0, 1). So, given the condition COND, and the two PHI arguments, we can rewrite this PHI into non-branching code: dest = (COND) or dest = COND' We use the condition as-is if the argument associated with the true edge has the value one or the argument associated with the false edge as the value zero. Note that those conditions are not the same since only one of the outgoing edges from the GIMPLE_COND will directly reach BB and thus be associated with an argument. */ stmt = last_stmt (cond_bb); result = PHI_RESULT (phi); /* To handle special cases like floating point comparison, it is easier and less error-prone to build a tree and gimplify it on the fly though it is less efficient. */ cond = fold_build2 (gimple_cond_code (stmt), boolean_type_node, gimple_cond_lhs (stmt), gimple_cond_rhs (stmt)); /* We need to know which is the true edge and which is the false edge so that we know when to invert the condition below. */ extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge); if ((e0 == true_edge && integer_zerop (arg0)) || (e0 == false_edge && integer_onep (arg0)) || (e1 == true_edge && integer_zerop (arg1)) || (e1 == false_edge && integer_onep (arg1))) cond = fold_build1 (TRUTH_NOT_EXPR, TREE_TYPE (cond), cond); /* Insert our new statements at the end of conditional block before the COND_STMT. */ gsi = gsi_for_stmt (stmt); new_var = force_gimple_operand_gsi (&gsi, cond, true, NULL, true, GSI_SAME_STMT); if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (new_var))) { new_var2 = create_tmp_var (TREE_TYPE (result), NULL); add_referenced_var (new_var2); new_stmt = gimple_build_assign_with_ops (CONVERT_EXPR, new_var2, new_var, NULL); new_var2 = make_ssa_name (new_var2, new_stmt); gimple_assign_set_lhs (new_stmt, new_var2); gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT); new_var = new_var2; } replace_phi_edge_with_variable (cond_bb, e1, phi, new_var); /* Note that we optimized this PHI. */ return true; }
/* The core routine of conditional store replacement and normal phi optimizations. Both share much of the infrastructure in how to match applicable basic block patterns. DO_STORE_ELIM is true when we want to do conditional store replacement, false otherwise. DO_HOIST_LOADS is true when we want to hoist adjacent loads out of diamond control flow patterns, false otherwise. */ static unsigned int tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads) { basic_block bb; basic_block *bb_order; unsigned n, i; bool cfgchanged = false; hash_set<tree> *nontrap = 0; if (do_store_elim) /* Calculate the set of non-trapping memory accesses. */ nontrap = get_non_trapping (); /* Search every basic block for COND_EXPR we may be able to optimize. We walk the blocks in order that guarantees that a block with a single predecessor is processed before the predecessor. This ensures that we collapse inner ifs before visiting the outer ones, and also that we do not try to visit a removed block. */ bb_order = single_pred_before_succ_order (); n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; for (i = 0; i < n; i++) { gimple cond_stmt; gphi *phi; basic_block bb1, bb2; edge e1, e2; tree arg0, arg1; bb = bb_order[i]; cond_stmt = last_stmt (bb); /* Check to see if the last statement is a GIMPLE_COND. */ if (!cond_stmt || gimple_code (cond_stmt) != GIMPLE_COND) continue; e1 = EDGE_SUCC (bb, 0); bb1 = e1->dest; e2 = EDGE_SUCC (bb, 1); bb2 = e2->dest; /* We cannot do the optimization on abnormal edges. */ if ((e1->flags & EDGE_ABNORMAL) != 0 || (e2->flags & EDGE_ABNORMAL) != 0) continue; /* If either bb1's succ or bb2 or bb2's succ is non NULL. */ if (EDGE_COUNT (bb1->succs) == 0 || bb2 == NULL || EDGE_COUNT (bb2->succs) == 0) continue; /* Find the bb which is the fall through to the other. */ if (EDGE_SUCC (bb1, 0)->dest == bb2) ; else if (EDGE_SUCC (bb2, 0)->dest == bb1) { std::swap (bb1, bb2); std::swap (e1, e2); } else if (do_store_elim && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest) { basic_block bb3 = EDGE_SUCC (bb1, 0)->dest; if (!single_succ_p (bb1) || (EDGE_SUCC (bb1, 0)->flags & EDGE_FALLTHRU) == 0 || !single_succ_p (bb2) || (EDGE_SUCC (bb2, 0)->flags & EDGE_FALLTHRU) == 0 || EDGE_COUNT (bb3->preds) != 2) continue; if (cond_if_else_store_replacement (bb1, bb2, bb3)) cfgchanged = true; continue; } else if (do_hoist_loads && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest) { basic_block bb3 = EDGE_SUCC (bb1, 0)->dest; if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt))) && single_succ_p (bb1) && single_succ_p (bb2) && single_pred_p (bb1) && single_pred_p (bb2) && EDGE_COUNT (bb->succs) == 2 && EDGE_COUNT (bb3->preds) == 2 /* If one edge or the other is dominant, a conditional move is likely to perform worse than the well-predicted branch. */ && !predictable_edge_p (EDGE_SUCC (bb, 0)) && !predictable_edge_p (EDGE_SUCC (bb, 1))) hoist_adjacent_loads (bb, bb1, bb2, bb3); continue; } else continue; e1 = EDGE_SUCC (bb1, 0); /* Make sure that bb1 is just a fall through. */ if (!single_succ_p (bb1) || (e1->flags & EDGE_FALLTHRU) == 0) continue; /* Also make sure that bb1 only have one predecessor and that it is bb. */ if (!single_pred_p (bb1) || single_pred (bb1) != bb) continue; if (do_store_elim) { /* bb1 is the middle block, bb2 the join block, bb the split block, e1 the fallthrough edge from bb1 to bb2. We can't do the optimization if the join block has more than two predecessors. */ if (EDGE_COUNT (bb2->preds) > 2) continue; if (cond_store_replacement (bb1, bb2, e1, e2, nontrap)) cfgchanged = true; } else { gimple_seq phis = phi_nodes (bb2); gimple_stmt_iterator gsi; bool candorest = true; /* Value replacement can work with more than one PHI so try that first. */ for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi)) { phi = as_a <gphi *> (gsi_stmt (gsi)); arg0 = gimple_phi_arg_def (phi, e1->dest_idx); arg1 = gimple_phi_arg_def (phi, e2->dest_idx); if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2) { candorest = false; cfgchanged = true; break; } } if (!candorest) continue; phi = single_non_singleton_phi_for_edges (phis, e1, e2); if (!phi) continue; arg0 = gimple_phi_arg_def (phi, e1->dest_idx); arg1 = gimple_phi_arg_def (phi, e2->dest_idx); /* Something is wrong if we cannot find the arguments in the PHI node. */ gcc_assert (arg0 != NULL && arg1 != NULL); if (factor_out_conditional_conversion (e1, e2, phi, arg0, arg1)) { /* factor_out_conditional_conversion may create a new PHI in BB2 and eliminate an existing PHI in BB2. Recompute values that may be affected by that change. */ phis = phi_nodes (bb2); phi = single_non_singleton_phi_for_edges (phis, e1, e2); gcc_assert (phi); arg0 = gimple_phi_arg_def (phi, e1->dest_idx); arg1 = gimple_phi_arg_def (phi, e2->dest_idx); gcc_assert (arg0 != NULL && arg1 != NULL); } /* Do the replacement of conditional if it can be done. */ if (conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1)) cfgchanged = true; else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1)) cfgchanged = true; else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1)) cfgchanged = true; } } free (bb_order); if (do_store_elim) delete nontrap; /* If the CFG has changed, we should cleanup the CFG. */ if (cfgchanged && do_store_elim) { /* In cond-store replacement we have added some loads on edges and new VOPS (as we moved the store, and created a load). */ gsi_commit_edge_inserts (); return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals; } else if (cfgchanged) return TODO_cleanup_cfg; return 0; }
static unsigned int copy_loop_headers (void) { struct loops *loops; unsigned i; struct loop *loop; basic_block header; edge exit, entry; basic_block *bbs, *copied_bbs; unsigned n_bbs; unsigned bbs_size; loops = loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES); if (!loops) return 0; #ifdef ENABLE_CHECKING verify_loop_structure (loops); #endif bbs = XNEWVEC (basic_block, n_basic_blocks); copied_bbs = XNEWVEC (basic_block, n_basic_blocks); bbs_size = n_basic_blocks; for (i = 1; i < loops->num; i++) { /* Copy at most 20 insns. */ int limit = 20; loop = loops->parray[i]; if (!loop) continue; header = loop->header; /* If the loop is already a do-while style one (either because it was written as such, or because jump threading transformed it into one), we might be in fact peeling the first iteration of the loop. This in general is not a good idea. */ if (do_while_loop_p (loop)) continue; /* Iterate the header copying up to limit; this takes care of the cases like while (a && b) {...}, where we want to have both of the conditions copied. TODO -- handle while (a || b) - like cases, by not requiring the header to have just a single successor and copying up to postdominator. */ exit = NULL; n_bbs = 0; while (should_duplicate_loop_header_p (header, loop, &limit)) { /* Find a successor of header that is inside a loop; i.e. the new header after the condition is copied. */ if (flow_bb_inside_loop_p (loop, EDGE_SUCC (header, 0)->dest)) exit = EDGE_SUCC (header, 0); else exit = EDGE_SUCC (header, 1); bbs[n_bbs++] = header; gcc_assert (bbs_size > n_bbs); header = exit->dest; } if (!exit) continue; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Duplicating header of the loop %d up to edge %d->%d.\n", loop->num, exit->src->index, exit->dest->index); /* Ensure that the header will have just the latch as a predecessor inside the loop. */ if (!single_pred_p (exit->dest)) exit = single_pred_edge (loop_split_edge_with (exit, NULL)); entry = loop_preheader_edge (loop); if (!tree_duplicate_sese_region (entry, exit, bbs, n_bbs, copied_bbs)) { fprintf (dump_file, "Duplication failed.\n"); continue; } /* If the loop has the form "for (i = j; i < j + 10; i++)" then this copying can introduce a case where we rely on undefined signed overflow to eliminate the preheader condition, because we assume that "j < j + 10" is true. We don't want to warn about that case for -Wstrict-overflow, because in general we don't warn about overflow involving loops. Prevent the warning by setting TREE_NO_WARNING. */ if (warn_strict_overflow > 0) { unsigned int i; for (i = 0; i < n_bbs; ++i) { tree last; last = last_stmt (copied_bbs[i]); if (TREE_CODE (last) == COND_EXPR) TREE_NO_WARNING (last) = 1; } } /* Ensure that the latch and the preheader is simple (we know that they are not now, since there was the loop exit condition. */ loop_split_edge_with (loop_preheader_edge (loop), NULL); loop_split_edge_with (loop_latch_edge (loop), NULL); } free (bbs); free (copied_bbs); loop_optimizer_finalize (loops); return 0; }
static bool value_replacement (basic_block cond_bb, basic_block middle_bb, edge e0, edge e1, gimple phi, tree arg0, tree arg1) { gimple cond; edge true_edge, false_edge; enum tree_code code; /* If the type says honor signed zeros we cannot do this optimization. */ if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg1)))) return false; if (!empty_block_p (middle_bb)) return false; cond = last_stmt (cond_bb); code = gimple_cond_code (cond); /* This transformation is only valid for equality comparisons. */ if (code != NE_EXPR && code != EQ_EXPR) return false; /* We need to know which is the true edge and which is the false edge so that we know if have abs or negative abs. */ extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge); /* At this point we know we have a COND_EXPR with two successors. One successor is BB, the other successor is an empty block which falls through into BB. The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR. There is a single PHI node at the join point (BB) with two arguments. We now need to verify that the two arguments in the PHI node match the two arguments to the equality comparison. */ if ((operand_equal_for_phi_arg_p (arg0, gimple_cond_lhs (cond)) && operand_equal_for_phi_arg_p (arg1, gimple_cond_rhs (cond))) || (operand_equal_for_phi_arg_p (arg1, gimple_cond_lhs (cond)) && operand_equal_for_phi_arg_p (arg0, gimple_cond_rhs (cond)))) { edge e; tree arg; /* For NE_EXPR, we want to build an assignment result = arg where arg is the PHI argument associated with the true edge. For EQ_EXPR we want the PHI argument associated with the false edge. */ e = (code == NE_EXPR ? true_edge : false_edge); /* Unfortunately, E may not reach BB (it may instead have gone to OTHER_BLOCK). If that is the case, then we want the single outgoing edge from OTHER_BLOCK which reaches BB and represents the desired path from COND_BLOCK. */ if (e->dest == middle_bb) e = single_succ_edge (e->dest); /* Now we know the incoming edge to BB that has the argument for the RHS of our new assignment statement. */ if (e0 == e) arg = arg0; else arg = arg1; replace_phi_edge_with_variable (cond_bb, e1, phi, arg); /* Note that we optimized this PHI. */ return true; } return false; }
static bool tree_estimate_loop_size (struct loop *loop, edge exit, edge edge_to_cancel, struct loop_size *size, int upper_bound) { basic_block *body = get_loop_body (loop); gimple_stmt_iterator gsi; unsigned int i; bool after_exit; vec<basic_block> path = get_loop_hot_path (loop); size->overall = 0; size->eliminated_by_peeling = 0; size->last_iteration = 0; size->last_iteration_eliminated_by_peeling = 0; size->num_pure_calls_on_hot_path = 0; size->num_non_pure_calls_on_hot_path = 0; size->non_call_stmts_on_hot_path = 0; size->num_branches_on_hot_path = 0; size->constant_iv = 0; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Estimating sizes for loop %i\n", loop->num); for (i = 0; i < loop->num_nodes; i++) { if (edge_to_cancel && body[i] != edge_to_cancel->src && dominated_by_p (CDI_DOMINATORS, body[i], edge_to_cancel->src)) after_exit = true; else after_exit = false; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " BB: %i, after_exit: %i\n", body[i]->index, after_exit); for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple *stmt = gsi_stmt (gsi); int num = estimate_num_insns (stmt, &eni_size_weights); bool likely_eliminated = false; bool likely_eliminated_last = false; bool likely_eliminated_peeled = false; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, " size: %3i ", num); print_gimple_stmt (dump_file, gsi_stmt (gsi), 0, 0); } /* Look for reasons why we might optimize this stmt away. */ if (gimple_has_side_effects (stmt)) ; /* Exit conditional. */ else if (exit && body[i] == exit->src && stmt == last_stmt (exit->src)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Exit condition will be eliminated " "in peeled copies.\n"); likely_eliminated_peeled = true; } else if (edge_to_cancel && body[i] == edge_to_cancel->src && stmt == last_stmt (edge_to_cancel->src)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Exit condition will be eliminated " "in last copy.\n"); likely_eliminated_last = true; } /* Sets of IV variables */ else if (gimple_code (stmt) == GIMPLE_ASSIGN && constant_after_peeling (gimple_assign_lhs (stmt), stmt, loop)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Induction variable computation will" " be folded away.\n"); likely_eliminated = true; } /* Assignments of IV variables. */ else if (gimple_code (stmt) == GIMPLE_ASSIGN && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME && constant_after_peeling (gimple_assign_rhs1 (stmt), stmt, loop) && (gimple_assign_rhs_class (stmt) != GIMPLE_BINARY_RHS || constant_after_peeling (gimple_assign_rhs2 (stmt), stmt, loop))) { size->constant_iv = true; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Constant expression will be folded away.\n"); likely_eliminated = true; } /* Conditionals. */ else if ((gimple_code (stmt) == GIMPLE_COND && constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop) && constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop) /* We don't simplify all constant compares so make sure they are not both constant already. See PR70288. */ && (! is_gimple_min_invariant (gimple_cond_lhs (stmt)) || ! is_gimple_min_invariant (gimple_cond_rhs (stmt)))) || (gimple_code (stmt) == GIMPLE_SWITCH && constant_after_peeling (gimple_switch_index ( as_a <gswitch *> (stmt)), stmt, loop) && ! is_gimple_min_invariant (gimple_switch_index ( as_a <gswitch *> (stmt))))) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Constant conditional.\n"); likely_eliminated = true; } size->overall += num; if (likely_eliminated || likely_eliminated_peeled) size->eliminated_by_peeling += num; if (!after_exit) { size->last_iteration += num; if (likely_eliminated || likely_eliminated_last) size->last_iteration_eliminated_by_peeling += num; } if ((size->overall * 3 / 2 - size->eliminated_by_peeling - size->last_iteration_eliminated_by_peeling) > upper_bound) { free (body); path.release (); return true; } } } while (path.length ()) { basic_block bb = path.pop (); for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple *stmt = gsi_stmt (gsi); if (gimple_code (stmt) == GIMPLE_CALL) { int flags = gimple_call_flags (stmt); tree decl = gimple_call_fndecl (stmt); if (decl && DECL_IS_BUILTIN (decl) && is_inexpensive_builtin (decl)) ; else if (flags & (ECF_PURE | ECF_CONST)) size->num_pure_calls_on_hot_path++; else size->num_non_pure_calls_on_hot_path++; size->num_branches_on_hot_path ++; } else if (gimple_code (stmt) != GIMPLE_CALL && gimple_code (stmt) != GIMPLE_DEBUG) size->non_call_stmts_on_hot_path++; if (((gimple_code (stmt) == GIMPLE_COND && (!constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop) || constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop))) || (gimple_code (stmt) == GIMPLE_SWITCH && !constant_after_peeling (gimple_switch_index ( as_a <gswitch *> (stmt)), stmt, loop))) && (!exit || bb != exit->src)) size->num_branches_on_hot_path++; } } path.release (); if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "size: %i-%i, last_iteration: %i-%i\n", size->overall, size->eliminated_by_peeling, size->last_iteration, size->last_iteration_eliminated_by_peeling); free (body); return false; }
/* The core routine of conditional store replacement and normal phi optimizations. Both share much of the infrastructure in how to match applicable basic block patterns. DO_STORE_ELIM is true when we want to do conditional store replacement, false otherwise. */ static unsigned int tree_ssa_phiopt_worker (bool do_store_elim) { basic_block bb; basic_block *bb_order; unsigned n, i; bool cfgchanged = false; struct pointer_set_t *nontrap = 0; if (do_store_elim) { condstoretemp = NULL_TREE; /* Calculate the set of non-trapping memory accesses. */ nontrap = get_non_trapping (); } /* Search every basic block for COND_EXPR we may be able to optimize. We walk the blocks in order that guarantees that a block with a single predecessor is processed before the predecessor. This ensures that we collapse inner ifs before visiting the outer ones, and also that we do not try to visit a removed block. */ bb_order = blocks_in_phiopt_order (); n = n_basic_blocks - NUM_FIXED_BLOCKS; for (i = 0; i < n; i++) { gimple cond_stmt, phi; basic_block bb1, bb2; edge e1, e2; tree arg0, arg1; bb = bb_order[i]; cond_stmt = last_stmt (bb); /* Check to see if the last statement is a GIMPLE_COND. */ if (!cond_stmt || gimple_code (cond_stmt) != GIMPLE_COND) continue; e1 = EDGE_SUCC (bb, 0); bb1 = e1->dest; e2 = EDGE_SUCC (bb, 1); bb2 = e2->dest; /* We cannot do the optimization on abnormal edges. */ if ((e1->flags & EDGE_ABNORMAL) != 0 || (e2->flags & EDGE_ABNORMAL) != 0) continue; /* If either bb1's succ or bb2 or bb2's succ is non NULL. */ if (EDGE_COUNT (bb1->succs) == 0 || bb2 == NULL || EDGE_COUNT (bb2->succs) == 0) continue; /* Find the bb which is the fall through to the other. */ if (EDGE_SUCC (bb1, 0)->dest == bb2) ; else if (EDGE_SUCC (bb2, 0)->dest == bb1) { basic_block bb_tmp = bb1; edge e_tmp = e1; bb1 = bb2; bb2 = bb_tmp; e1 = e2; e2 = e_tmp; } else continue; e1 = EDGE_SUCC (bb1, 0); /* Make sure that bb1 is just a fall through. */ if (!single_succ_p (bb1) || (e1->flags & EDGE_FALLTHRU) == 0) continue; /* Also make sure that bb1 only have one predecessor and that it is bb. */ if (!single_pred_p (bb1) || single_pred (bb1) != bb) continue; if (do_store_elim) { /* bb1 is the middle block, bb2 the join block, bb the split block, e1 the fallthrough edge from bb1 to bb2. We can't do the optimization if the join block has more than two predecessors. */ if (EDGE_COUNT (bb2->preds) > 2) continue; if (cond_store_replacement (bb1, bb2, e1, e2, nontrap)) cfgchanged = true; } else { gimple_seq phis = phi_nodes (bb2); /* Check to make sure that there is only one PHI node. TODO: we could do it with more than one iff the other PHI nodes have the same elements for these two edges. */ if (! gimple_seq_singleton_p (phis)) continue; phi = gsi_stmt (gsi_start (phis)); arg0 = gimple_phi_arg_def (phi, e1->dest_idx); arg1 = gimple_phi_arg_def (phi, e2->dest_idx); /* Something is wrong if we cannot find the arguments in the PHI node. */ gcc_assert (arg0 != NULL && arg1 != NULL); /* Do the replacement of conditional if it can be done. */ if (conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1)) cfgchanged = true; else if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1)) cfgchanged = true; else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1)) cfgchanged = true; else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1)) cfgchanged = true; } } free (bb_order); if (do_store_elim) pointer_set_destroy (nontrap); /* If the CFG has changed, we should cleanup the CFG. */ if (cfgchanged && do_store_elim) { /* In cond-store replacement we have added some loads on edges and new VOPS (as we moved the store, and created a load). */ gsi_commit_edge_inserts (); return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals; } else if (cfgchanged) return TODO_cleanup_cfg; return 0; }
static bool ifcombine_ifandif (basic_block inner_cond_bb, bool inner_inv, basic_block outer_cond_bb, bool outer_inv, bool result_inv) { gimple_stmt_iterator gsi; gimple inner_stmt, outer_stmt; gcond *inner_cond, *outer_cond; tree name1, name2, bit1, bit2, bits1, bits2; inner_stmt = last_stmt (inner_cond_bb); if (!inner_stmt || gimple_code (inner_stmt) != GIMPLE_COND) return false; inner_cond = as_a <gcond *> (inner_stmt); outer_stmt = last_stmt (outer_cond_bb); if (!outer_stmt || gimple_code (outer_stmt) != GIMPLE_COND) return false; outer_cond = as_a <gcond *> (outer_stmt); /* See if we test a single bit of the same name in both tests. In that case remove the outer test, merging both else edges, and change the inner one to test for name & (bit1 | bit2) == (bit1 | bit2). */ if (recognize_single_bit_test (inner_cond, &name1, &bit1, inner_inv) && recognize_single_bit_test (outer_cond, &name2, &bit2, outer_inv) && name1 == name2) { tree t, t2; /* Do it. */ gsi = gsi_for_stmt (inner_cond); t = fold_build2 (LSHIFT_EXPR, TREE_TYPE (name1), build_int_cst (TREE_TYPE (name1), 1), bit1); t2 = fold_build2 (LSHIFT_EXPR, TREE_TYPE (name1), build_int_cst (TREE_TYPE (name1), 1), bit2); t = fold_build2 (BIT_IOR_EXPR, TREE_TYPE (name1), t, t2); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); t2 = fold_build2 (BIT_AND_EXPR, TREE_TYPE (name1), name1, t); t2 = force_gimple_operand_gsi (&gsi, t2, true, NULL_TREE, true, GSI_SAME_STMT); t = fold_build2 (result_inv ? NE_EXPR : EQ_EXPR, boolean_type_node, t2, t); t = canonicalize_cond_expr_cond (t); if (!t) return false; gimple_cond_set_condition_from_tree (inner_cond, t); update_stmt (inner_cond); /* Leave CFG optimization to cfg_cleanup. */ gimple_cond_set_condition_from_tree (outer_cond, outer_inv ? boolean_false_node : boolean_true_node); update_stmt (outer_cond); if (dump_file) { fprintf (dump_file, "optimizing double bit test to "); print_generic_expr (dump_file, name1, 0); fprintf (dump_file, " & T == T\nwith temporary T = (1 << "); print_generic_expr (dump_file, bit1, 0); fprintf (dump_file, ") | (1 << "); print_generic_expr (dump_file, bit2, 0); fprintf (dump_file, ")\n"); } return true; } /* See if we have two bit tests of the same name in both tests. In that case remove the outer test and change the inner one to test for name & (bits1 | bits2) != 0. */ else if (recognize_bits_test (inner_cond, &name1, &bits1, !inner_inv) && recognize_bits_test (outer_cond, &name2, &bits2, !outer_inv)) { gimple_stmt_iterator gsi; tree t; /* Find the common name which is bit-tested. */ if (name1 == name2) ; else if (bits1 == bits2) { t = name2; name2 = bits2; bits2 = t; t = name1; name1 = bits1; bits1 = t; } else if (name1 == bits2) { t = name2; name2 = bits2; bits2 = t; } else if (bits1 == name2) { t = name1; name1 = bits1; bits1 = t; } else return false; /* As we strip non-widening conversions in finding a common name that is tested make sure to end up with an integral type for building the bit operations. */ if (TYPE_PRECISION (TREE_TYPE (bits1)) >= TYPE_PRECISION (TREE_TYPE (bits2))) { bits1 = fold_convert (unsigned_type_for (TREE_TYPE (bits1)), bits1); name1 = fold_convert (TREE_TYPE (bits1), name1); bits2 = fold_convert (unsigned_type_for (TREE_TYPE (bits2)), bits2); bits2 = fold_convert (TREE_TYPE (bits1), bits2); } else { bits2 = fold_convert (unsigned_type_for (TREE_TYPE (bits2)), bits2); name1 = fold_convert (TREE_TYPE (bits2), name1); bits1 = fold_convert (unsigned_type_for (TREE_TYPE (bits1)), bits1); bits1 = fold_convert (TREE_TYPE (bits2), bits1); } /* Do it. */ gsi = gsi_for_stmt (inner_cond); t = fold_build2 (BIT_IOR_EXPR, TREE_TYPE (name1), bits1, bits2); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (name1), name1, t); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); t = fold_build2 (result_inv ? NE_EXPR : EQ_EXPR, boolean_type_node, t, build_int_cst (TREE_TYPE (t), 0)); t = canonicalize_cond_expr_cond (t); if (!t) return false; gimple_cond_set_condition_from_tree (inner_cond, t); update_stmt (inner_cond); /* Leave CFG optimization to cfg_cleanup. */ gimple_cond_set_condition_from_tree (outer_cond, outer_inv ? boolean_false_node : boolean_true_node); update_stmt (outer_cond); if (dump_file) { fprintf (dump_file, "optimizing bits or bits test to "); print_generic_expr (dump_file, name1, 0); fprintf (dump_file, " & T != 0\nwith temporary T = "); print_generic_expr (dump_file, bits1, 0); fprintf (dump_file, " | "); print_generic_expr (dump_file, bits2, 0); fprintf (dump_file, "\n"); } return true; } /* See if we have two comparisons that we can merge into one. */ else if (TREE_CODE_CLASS (gimple_cond_code (inner_cond)) == tcc_comparison && TREE_CODE_CLASS (gimple_cond_code (outer_cond)) == tcc_comparison) { tree t; enum tree_code inner_cond_code = gimple_cond_code (inner_cond); enum tree_code outer_cond_code = gimple_cond_code (outer_cond); /* Invert comparisons if necessary (and possible). */ if (inner_inv) inner_cond_code = invert_tree_comparison (inner_cond_code, HONOR_NANS (gimple_cond_lhs (inner_cond))); if (inner_cond_code == ERROR_MARK) return false; if (outer_inv) outer_cond_code = invert_tree_comparison (outer_cond_code, HONOR_NANS (gimple_cond_lhs (outer_cond))); if (outer_cond_code == ERROR_MARK) return false; /* Don't return false so fast, try maybe_fold_or_comparisons? */ if (!(t = maybe_fold_and_comparisons (inner_cond_code, gimple_cond_lhs (inner_cond), gimple_cond_rhs (inner_cond), outer_cond_code, gimple_cond_lhs (outer_cond), gimple_cond_rhs (outer_cond)))) { tree t1, t2; gimple_stmt_iterator gsi; if (!LOGICAL_OP_NON_SHORT_CIRCUIT) return false; /* Only do this optimization if the inner bb contains only the conditional. */ if (!gsi_one_before_end_p (gsi_start_nondebug_after_labels_bb (inner_cond_bb))) return false; t1 = fold_build2_loc (gimple_location (inner_cond), inner_cond_code, boolean_type_node, gimple_cond_lhs (inner_cond), gimple_cond_rhs (inner_cond)); t2 = fold_build2_loc (gimple_location (outer_cond), outer_cond_code, boolean_type_node, gimple_cond_lhs (outer_cond), gimple_cond_rhs (outer_cond)); t = fold_build2_loc (gimple_location (inner_cond), TRUTH_AND_EXPR, boolean_type_node, t1, t2); if (result_inv) { t = fold_build1 (TRUTH_NOT_EXPR, TREE_TYPE (t), t); result_inv = false; } gsi = gsi_for_stmt (inner_cond); t = force_gimple_operand_gsi_1 (&gsi, t, is_gimple_condexpr, NULL, true, GSI_SAME_STMT); } if (result_inv) t = fold_build1 (TRUTH_NOT_EXPR, TREE_TYPE (t), t); t = canonicalize_cond_expr_cond (t); if (!t) return false; gimple_cond_set_condition_from_tree (inner_cond, t); update_stmt (inner_cond); /* Leave CFG optimization to cfg_cleanup. */ gimple_cond_set_condition_from_tree (outer_cond, outer_inv ? boolean_false_node : boolean_true_node); update_stmt (outer_cond); if (dump_file) { fprintf (dump_file, "optimizing two comparisons to "); print_generic_expr (dump_file, t, 0); fprintf (dump_file, "\n"); } return true; } return false; }
static bool minmax_replacement (basic_block cond_bb, basic_block middle_bb, edge e0, edge e1, gimple phi, tree arg0, tree arg1) { tree result, type; gimple cond, new_stmt; edge true_edge, false_edge; enum tree_code cmp, minmax, ass_code; tree smaller, larger, arg_true, arg_false; gimple_stmt_iterator gsi, gsi_from; type = TREE_TYPE (PHI_RESULT (phi)); /* The optimization may be unsafe due to NaNs. */ if (HONOR_NANS (TYPE_MODE (type))) return false; cond = last_stmt (cond_bb); cmp = gimple_cond_code (cond); result = PHI_RESULT (phi); /* This transformation is only valid for order comparisons. Record which operand is smaller/larger if the result of the comparison is true. */ if (cmp == LT_EXPR || cmp == LE_EXPR) { smaller = gimple_cond_lhs (cond); larger = gimple_cond_rhs (cond); } else if (cmp == GT_EXPR || cmp == GE_EXPR) { smaller = gimple_cond_rhs (cond); larger = gimple_cond_lhs (cond); } else return false; /* We need to know which is the true edge and which is the false edge so that we know if have abs or negative abs. */ extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge); /* Forward the edges over the middle basic block. */ if (true_edge->dest == middle_bb) true_edge = EDGE_SUCC (true_edge->dest, 0); if (false_edge->dest == middle_bb) false_edge = EDGE_SUCC (false_edge->dest, 0); if (true_edge == e0) { gcc_assert (false_edge == e1); arg_true = arg0; arg_false = arg1; } else { gcc_assert (false_edge == e0); gcc_assert (true_edge == e1); arg_true = arg1; arg_false = arg0; } if (empty_block_p (middle_bb)) { if (operand_equal_for_phi_arg_p (arg_true, smaller) && operand_equal_for_phi_arg_p (arg_false, larger)) { /* Case if (smaller < larger) rslt = smaller; else rslt = larger; */ minmax = MIN_EXPR; } else if (operand_equal_for_phi_arg_p (arg_false, smaller) && operand_equal_for_phi_arg_p (arg_true, larger)) minmax = MAX_EXPR; else return false; } else { /* Recognize the following case, assuming d <= u: if (a <= u) b = MAX (a, d); x = PHI <b, u> This is equivalent to b = MAX (a, d); x = MIN (b, u); */ gimple assign = last_and_only_stmt (middle_bb); tree lhs, op0, op1, bound; if (!assign || gimple_code (assign) != GIMPLE_ASSIGN) return false; lhs = gimple_assign_lhs (assign); ass_code = gimple_assign_rhs_code (assign); if (ass_code != MAX_EXPR && ass_code != MIN_EXPR) return false; op0 = gimple_assign_rhs1 (assign); op1 = gimple_assign_rhs2 (assign); if (true_edge->src == middle_bb) { /* We got here if the condition is true, i.e., SMALLER < LARGER. */ if (!operand_equal_for_phi_arg_p (lhs, arg_true)) return false; if (operand_equal_for_phi_arg_p (arg_false, larger)) { /* Case if (smaller < larger) { r' = MAX_EXPR (smaller, bound) } r = PHI <r', larger> --> to be turned to MIN_EXPR. */ if (ass_code != MAX_EXPR) return false; minmax = MIN_EXPR; if (operand_equal_for_phi_arg_p (op0, smaller)) bound = op1; else if (operand_equal_for_phi_arg_p (op1, smaller)) bound = op0; else return false; /* We need BOUND <= LARGER. */ if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node, bound, larger))) return false; } else if (operand_equal_for_phi_arg_p (arg_false, smaller)) { /* Case if (smaller < larger) { r' = MIN_EXPR (larger, bound) } r = PHI <r', smaller> --> to be turned to MAX_EXPR. */ if (ass_code != MIN_EXPR) return false; minmax = MAX_EXPR; if (operand_equal_for_phi_arg_p (op0, larger)) bound = op1; else if (operand_equal_for_phi_arg_p (op1, larger)) bound = op0; else return false; /* We need BOUND >= SMALLER. */ if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node, bound, smaller))) return false; } else return false; } else { /* We got here if the condition is false, i.e., SMALLER > LARGER. */ if (!operand_equal_for_phi_arg_p (lhs, arg_false)) return false; if (operand_equal_for_phi_arg_p (arg_true, larger)) { /* Case if (smaller > larger) { r' = MIN_EXPR (smaller, bound) } r = PHI <r', larger> --> to be turned to MAX_EXPR. */ if (ass_code != MIN_EXPR) return false; minmax = MAX_EXPR; if (operand_equal_for_phi_arg_p (op0, smaller)) bound = op1; else if (operand_equal_for_phi_arg_p (op1, smaller)) bound = op0; else return false; /* We need BOUND >= LARGER. */ if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node, bound, larger))) return false; } else if (operand_equal_for_phi_arg_p (arg_true, smaller)) { /* Case if (smaller > larger) { r' = MAX_EXPR (larger, bound) } r = PHI <r', smaller> --> to be turned to MIN_EXPR. */ if (ass_code != MAX_EXPR) return false; minmax = MIN_EXPR; if (operand_equal_for_phi_arg_p (op0, larger)) bound = op1; else if (operand_equal_for_phi_arg_p (op1, larger)) bound = op0; else return false; /* We need BOUND <= SMALLER. */ if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node, bound, smaller))) return false; } else return false; } /* Move the statement from the middle block. */ gsi = gsi_last_bb (cond_bb); gsi_from = gsi_last_bb (middle_bb); gsi_move_before (&gsi_from, &gsi); } /* Emit the statement to compute min/max. */ result = duplicate_ssa_name (PHI_RESULT (phi), NULL); new_stmt = gimple_build_assign_with_ops (minmax, result, arg0, arg1); gsi = gsi_last_bb (cond_bb); gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT); replace_phi_edge_with_variable (cond_bb, e1, phi, result); return true; }