/* Find all checks in current function and store info about them in check_infos. */ static void chkp_gather_checks_info (void) { basic_block bb; gimple_stmt_iterator i; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Gathering information about checks...\n"); chkp_init_check_info (); FOR_EACH_BB_FN (bb, cfun) { struct bb_checks *bbc = &check_infos[bb->index]; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Searching checks in BB%d...\n", bb->index); for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i)) { gimple *stmt = gsi_stmt (i); if (gimple_code (stmt) != GIMPLE_CALL) continue; if (gimple_call_fndecl (stmt) == chkp_checkl_fndecl || gimple_call_fndecl (stmt) == chkp_checku_fndecl) { struct check_info ci; chkp_fill_check_info (stmt, &ci); bbc->checks.safe_push (ci); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Adding check information:\n"); fprintf (dump_file, " bounds: "); print_generic_expr (dump_file, ci.bounds, 0); fprintf (dump_file, "\n address: "); chkp_print_addr (ci.addr); fprintf (dump_file, "\n check: "); print_gimple_stmt (dump_file, stmt, 0, 0); } } } } }
/* Transform 1) Memory references. */ static void mf_xform_statements (void) { basic_block bb, next; gimple_stmt_iterator i; int saved_last_basic_block = last_basic_block; enum gimple_rhs_class grhs_class; bb = ENTRY_BLOCK_PTR ->next_bb; do { next = bb->next_bb; for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i)) { gimple s = gsi_stmt (i); /* Only a few GIMPLE statements can reference memory. */ switch (gimple_code (s)) { case GIMPLE_ASSIGN: mf_xform_derefs_1 (&i, gimple_assign_lhs_ptr (s), gimple_location (s), integer_one_node); mf_xform_derefs_1 (&i, gimple_assign_rhs1_ptr (s), gimple_location (s), integer_zero_node); grhs_class = get_gimple_rhs_class (gimple_assign_rhs_code (s)); if (grhs_class == GIMPLE_BINARY_RHS) mf_xform_derefs_1 (&i, gimple_assign_rhs2_ptr (s), gimple_location (s), integer_zero_node); break; case GIMPLE_RETURN: if (gimple_return_retval (s) != NULL_TREE) { mf_xform_derefs_1 (&i, gimple_return_retval_ptr (s), gimple_location (s), integer_zero_node); } break; default: ; } } bb = next; } while (bb && bb->index <= saved_last_basic_block); }
static bool gimple_try_catch_may_fallthru (gimple stmt) { gimple_stmt_iterator i; /* We don't handle GIMPLE_TRY_FINALLY. */ gcc_assert (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH); /* If the TRY block can fall through, the whole TRY_CATCH can fall through. */ if (gimple_seq_may_fallthru (gimple_try_eval (stmt))) return true; i = gsi_start (*gimple_try_cleanup_ptr (stmt)); switch (gimple_code (gsi_stmt (i))) { case GIMPLE_CATCH: /* We expect to see a sequence of GIMPLE_CATCH stmts, each with a catch expression and a body. The whole try/catch may fall through iff any of the catch bodies falls through. */ for (; !gsi_end_p (i); gsi_next (&i)) { if (gimple_seq_may_fallthru (gimple_catch_handler (gsi_stmt (i)))) return true; } return false; case GIMPLE_EH_FILTER: /* The exception filter expression only matters if there is an exception. If the exception does not match EH_FILTER_TYPES, we will execute EH_FILTER_FAILURE, and we will fall through if that falls through. If the exception does match EH_FILTER_TYPES, the stack unwinder will continue up the stack, so we will not fall through. We don't know whether we will throw an exception which matches EH_FILTER_TYPES or not, so we just ignore EH_FILTER_TYPES and assume that we might throw an exception which doesn't match. */ return gimple_seq_may_fallthru (gimple_eh_filter_failure (gsi_stmt (i))); default: /* This case represents statements to be executed when an exception occurs. Those statements are implicitly followed by a GIMPLE_RESX to resume execution after the exception. So in this case the try/catch never falls through. */ return false; } }
static gimple get_prop_source_stmt (tree name, bool single_use_only, bool *single_use_p) { bool single_use = true; do { gimple def_stmt = SSA_NAME_DEF_STMT (name); if (!has_single_use (name)) { single_use = false; if (single_use_only) return NULL; } /* If name is defined by a PHI node or is the default def, bail out. */ if (gimple_code (def_stmt) != GIMPLE_ASSIGN) return NULL; /* If name is not a simple copy destination, we found it. */ if (!gimple_assign_copy_p (def_stmt) || TREE_CODE (gimple_assign_rhs1 (def_stmt)) != SSA_NAME) { tree rhs; if (!single_use_only && single_use_p) *single_use_p = single_use; /* We can look through pointer conversions in the search for a useful stmt for the comparison folding. */ rhs = gimple_assign_rhs1 (def_stmt); if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)) && TREE_CODE (rhs) == SSA_NAME && POINTER_TYPE_P (TREE_TYPE (gimple_assign_lhs (def_stmt))) && POINTER_TYPE_P (TREE_TYPE (rhs))) name = rhs; else return def_stmt; } else { /* Continue searching the def of the copy source name. */ name = gimple_assign_rhs1 (def_stmt); } } while (1); }
gimple_stmt_iterator gsi_for_stmt (gimple stmt) { gimple_stmt_iterator i; basic_block bb = gimple_bb (stmt); if (gimple_code (stmt) == GIMPLE_PHI) i = gsi_start_phis (bb); else i = gsi_start_bb (bb); for (; !gsi_end_p (i); gsi_next (&i)) if (gsi_stmt (i) == stmt) return i; gcc_unreachable (); }
CondExpr::CondExpr(gimple t) : Expression(t) { if (gimple_code(t) != GIMPLE_COND) throw BadGimpleException(t, "cond_expr"); _op = gimple_cond_code(t); _lhs = ValueFactory::INSTANCE.build(gimple_cond_lhs(t)); _rhs = ValueFactory::INSTANCE.build(gimple_cond_rhs(t)); /* tree a = gimple_cond_true_label(t); if (a != NULL && a != NULL_TREE) _then = ValueFactory::INSTANCE.build(gimple_cond_true_label(t)); a = gimple_cond_false_label(t); if (a != NULL && a != NULL_TREE) _else = ValueFactory::INSTANCE.build(gimple_cond_false_label(t)); */ }
bool gsi_remove (gimple_stmt_iterator *i, bool remove_permanently) { gimple_seq_node cur, next, prev; gimple *stmt = gsi_stmt (*i); bool require_eh_edge_purge = false; if (gimple_code (stmt) != GIMPLE_PHI) insert_debug_temps_for_defs (i); /* Free all the data flow information for STMT. */ gimple_set_bb (stmt, NULL); delink_stmt_imm_use (stmt); gimple_set_modified (stmt, true); if (remove_permanently) { require_eh_edge_purge = remove_stmt_from_eh_lp (stmt); gimple_remove_stmt_histograms (cfun, stmt); } /* Update the iterator and re-wire the links in I->SEQ. */ cur = i->ptr; next = cur->next; prev = cur->prev; /* See gsi_set_stmt for why we don't reset prev/next of STMT. */ if (next) /* Cur is not last. */ next->prev = prev; else if (prev->next) /* Cur is last but not first. */ gimple_seq_set_last (i->seq, prev); if (prev->next) /* Cur is not first. */ prev->next = next; else /* Cur is first. */ *i->seq = next; i->ptr = next; return require_eh_edge_purge; }
static bool generate_loops_for_partition (struct loop *loop, bitmap partition, bool copy_p) { unsigned i, x; gimple_stmt_iterator bsi; basic_block *bbs; if (copy_p) { loop = copy_loop_before (loop); create_preheader (loop, CP_SIMPLE_PREHEADERS); create_bb_after_loop (loop); } if (loop == NULL) return false; /* Remove stmts not in the PARTITION bitmap. The order in which we visit the phi nodes and the statements is exactly as in stmts_from_loop. */ bbs = get_loop_body_in_dom_order (loop); for (x = 0, i = 0; i < loop->num_nodes; i++) { basic_block bb = bbs[i]; for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi);) if (!bitmap_bit_p (partition, x++)) remove_phi_node (&bsi, true); else gsi_next (&bsi); for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi);) if (gimple_code (gsi_stmt (bsi)) != GIMPLE_LABEL && !bitmap_bit_p (partition, x++)) gsi_remove (&bsi, false); else gsi_next (&bsi); mark_virtual_ops_in_bb (bb); } free (bbs); return true; }
/* Return true if we should ignore the basic block for purposes of tracing. */ static bool ignore_bb_p (const_basic_block bb) { gimple g; if (bb->index < NUM_FIXED_BLOCKS) return true; if (optimize_bb_for_size_p (bb)) return true; /* A transaction is a single entry multiple exit region. It must be duplicated in its entirety or not at all. */ g = last_stmt (CONST_CAST_BB (bb)); if (g && gimple_code (g) == GIMPLE_TRANSACTION) return true; return false; }
static bool has_inlined_assembly (function *fn) { basic_block bb; gimple_stmt_iterator gsi; /* If we do not have a cfg for this function be conservative and assume it is may have inline assembly. */ if (fn->cfg == NULL) return true; FOR_EACH_BB_FN (bb, fn) for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) if (gimple_code (gsi_stmt (gsi)) == GIMPLE_ASM) return true; return false; }
static void change_orig_node(struct visited *visited, gimple stmt, const_tree orig_node, tree new_node, unsigned int num) { tree cast_lhs = cast_to_orig_type(visited, stmt, orig_node, new_node); switch (gimple_code(stmt)) { case GIMPLE_RETURN: gimple_return_set_retval(as_a_greturn(stmt), cast_lhs); break; case GIMPLE_CALL: gimple_call_set_arg(stmt, num - 1, cast_lhs); break; case GIMPLE_ASM: change_size_overflow_asm_input(as_a_gasm(stmt), cast_lhs); break; default: debug_gimple_stmt(stmt); gcc_unreachable(); } update_stmt(stmt); }
static void adjust_return_value (basic_block bb, tree m, tree a) { tree retval; gimple ret_stmt = gimple_seq_last_stmt (bb_seq (bb)); gimple_stmt_iterator gsi = gsi_last_bb (bb); gcc_assert (gimple_code (ret_stmt) == GIMPLE_RETURN); retval = gimple_return_retval (ret_stmt); if (!retval || retval == error_mark_node) return; if (m) retval = adjust_return_value_with_ops (MULT_EXPR, "mul_tmp", m_acc, retval, gsi); if (a) retval = adjust_return_value_with_ops (PLUS_EXPR, "acc_tmp", a_acc, retval, gsi); gimple_return_set_retval (ret_stmt, retval); update_stmt (ret_stmt); }
// Collect interesting stmts for duplication static void search_interesting_stmts(struct visited *visited) { basic_block bb; bool search_ret; struct interesting_stmts *head = NULL; search_ret = is_interesting_function(current_function_decl, 0); FOR_ALL_BB_FN(bb, cfun) { gimple_stmt_iterator gsi; for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { tree first_node; gimple stmt = gsi_stmt(gsi); switch (gimple_code(stmt)) { case GIMPLE_ASM: if (!is_size_overflow_insert_check_asm(as_a_gasm(stmt))) continue; first_node = get_size_overflow_asm_input(as_a_gasm(stmt)); head = search_interesting_stmt(head, stmt, first_node, 0); break; case GIMPLE_RETURN: if (!search_ret) continue; first_node = gimple_return_retval(as_a_greturn(stmt)); if (first_node == NULL_TREE) break; head = search_interesting_stmt(head, stmt, first_node, 0); break; case GIMPLE_CALL: head = search_interesting_calls(head, as_a_gcall(stmt)); break; default: break; } } }
static unsigned int tree_ssa_ifcombine (void) { basic_block *bbs; bool cfg_changed = false; int i; bbs = blocks_in_phiopt_order (); for (i = 0; i < n_basic_blocks - NUM_FIXED_BLOCKS; ++i) { basic_block bb = bbs[i]; gimple stmt = last_stmt (bb); if (stmt && gimple_code (stmt) == GIMPLE_COND) cfg_changed |= tree_ssa_ifcombine_bb (bb); } free (bbs); return cfg_changed ? TODO_cleanup_cfg : 0; }
static unsigned int tree_ssa_ifcombine (void) { basic_block *bbs; bool cfg_changed = false; int i; bbs = single_pred_before_succ_order (); calculate_dominance_info (CDI_DOMINATORS); for (i = 0; i < n_basic_blocks - NUM_FIXED_BLOCKS; ++i) { basic_block bb = bbs[i]; gimple stmt = last_stmt (bb); if (stmt && gimple_code (stmt) == GIMPLE_COND) cfg_changed |= tree_ssa_ifcombine_bb (bb); } free (bbs); return cfg_changed ? TODO_cleanup_cfg : 0; }
static bool gimple_find_edge_insert_loc (edge e, gimple_stmt_iterator *gsi, basic_block *new_bb) { basic_block dest, src; gimple *tmp; dest = e->dest; /* If the destination has one predecessor which has no PHI nodes, insert there. Except for the exit block. The requirement for no PHI nodes could be relaxed. Basically we would have to examine the PHIs to prove that none of them used the value set by the statement we want to insert on E. That hardly seems worth the effort. */ restart: if (single_pred_p (dest) && gimple_seq_empty_p (phi_nodes (dest)) && dest != EXIT_BLOCK_PTR_FOR_FN (cfun)) { *gsi = gsi_start_bb (dest); if (gsi_end_p (*gsi)) return true; /* Make sure we insert after any leading labels. */ tmp = gsi_stmt (*gsi); while (gimple_code (tmp) == GIMPLE_LABEL) { gsi_next (gsi); if (gsi_end_p (*gsi)) break; tmp = gsi_stmt (*gsi); } if (gsi_end_p (*gsi)) { *gsi = gsi_last_bb (dest); return true; } else return false; } /* If the source has one successor, the edge is not abnormal and the last statement does not end a basic block, insert there. Except for the entry block. */ src = e->src; if ((e->flags & EDGE_ABNORMAL) == 0 && single_succ_p (src) && src != ENTRY_BLOCK_PTR_FOR_FN (cfun)) { *gsi = gsi_last_bb (src); if (gsi_end_p (*gsi)) return true; tmp = gsi_stmt (*gsi); if (!stmt_ends_bb_p (tmp)) return true; switch (gimple_code (tmp)) { case GIMPLE_RETURN: case GIMPLE_RESX: return false; default: break; } } /* Otherwise, create a new basic block, and split this edge. */ dest = split_edge (e); if (new_bb) *new_bb = dest; e = single_pred_edge (dest); goto restart; }
static unsigned int build_cgraph_edges (void) { basic_block bb; struct cgraph_node *node = cgraph_get_node (current_function_decl); struct pointer_set_t *visited_nodes = pointer_set_create (); gimple_stmt_iterator gsi; tree decl; unsigned ix; /* Create the callgraph edges and record the nodes referenced by the function. body. */ FOR_EACH_BB_FN (bb, cfun) { for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple stmt = gsi_stmt (gsi); tree decl; if (is_gimple_debug (stmt)) continue; if (is_gimple_call (stmt)) { int freq = compute_call_stmt_bb_frequency (current_function_decl, bb); decl = gimple_call_fndecl (stmt); if (decl) cgraph_create_edge (node, cgraph_get_create_node (decl), stmt, bb->count, freq); else if (gimple_call_internal_p (stmt)) ; else cgraph_create_indirect_edge (node, stmt, gimple_call_flags (stmt), bb->count, freq); } ipa_record_stmt_references (node, stmt); if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL && gimple_omp_parallel_child_fn (stmt)) { tree fn = gimple_omp_parallel_child_fn (stmt); ipa_record_reference (node, cgraph_get_create_node (fn), IPA_REF_ADDR, stmt); } if (gimple_code (stmt) == GIMPLE_OMP_TASK) { tree fn = gimple_omp_task_child_fn (stmt); if (fn) ipa_record_reference (node, cgraph_get_create_node (fn), IPA_REF_ADDR, stmt); fn = gimple_omp_task_copy_fn (stmt); if (fn) ipa_record_reference (node, cgraph_get_create_node (fn), IPA_REF_ADDR, stmt); } } for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi)) ipa_record_stmt_references (node, gsi_stmt (gsi)); } /* Look for initializers of constant variables and private statics. */ FOR_EACH_LOCAL_DECL (cfun, ix, decl) if (TREE_CODE (decl) == VAR_DECL && (TREE_STATIC (decl) && !DECL_EXTERNAL (decl)) && !DECL_HAS_VALUE_EXPR_P (decl)) varpool_finalize_decl (decl); record_eh_tables (node, cfun); pointer_set_destroy (visited_nodes); return 0; }
static bool check_pow (gimple pow_call) { tree base, expn; enum tree_code bc, ec; if (gimple_call_num_args (pow_call) != 2) return false; base = gimple_call_arg (pow_call, 0); expn = gimple_call_arg (pow_call, 1); if (!check_target_format (expn)) return false; bc = TREE_CODE (base); ec = TREE_CODE (expn); /* Folding candidates are not interesting. Can actually assert that it is already folded. */ if (ec == REAL_CST && bc == REAL_CST) return false; if (bc == REAL_CST) { /* Only handle a fixed range of constant. */ REAL_VALUE_TYPE mv; REAL_VALUE_TYPE bcv = TREE_REAL_CST (base); if (REAL_VALUES_EQUAL (bcv, dconst1)) return false; if (REAL_VALUES_LESS (bcv, dconst1)) return false; real_from_integer (&mv, TYPE_MODE (TREE_TYPE (base)), 256, 0, 1); if (REAL_VALUES_LESS (mv, bcv)) return false; return true; } else if (bc == SSA_NAME) { tree base_val0, base_var, type; gimple base_def; int bit_sz; /* Only handles cases where base value is converted from integer values. */ base_def = SSA_NAME_DEF_STMT (base); if (gimple_code (base_def) != GIMPLE_ASSIGN) return false; if (gimple_assign_rhs_code (base_def) != FLOAT_EXPR) return false; base_val0 = gimple_assign_rhs1 (base_def); base_var = SSA_NAME_VAR (base_val0); if (!DECL_P (base_var)) return false; type = TREE_TYPE (base_var); if (TREE_CODE (type) != INTEGER_TYPE) return false; bit_sz = TYPE_PRECISION (type); /* If the type of the base is too wide, the resulting shrink wrapping condition will be too conservative. */ if (bit_sz > MAX_BASE_INT_BIT_SIZE) return false; return true; } else return false; }
static bool tree_estimate_loop_size (struct loop *loop, edge exit, edge edge_to_cancel, struct loop_size *size, int upper_bound) { basic_block *body = get_loop_body (loop); gimple_stmt_iterator gsi; unsigned int i; bool after_exit; vec<basic_block> path = get_loop_hot_path (loop); size->overall = 0; size->eliminated_by_peeling = 0; size->last_iteration = 0; size->last_iteration_eliminated_by_peeling = 0; size->num_pure_calls_on_hot_path = 0; size->num_non_pure_calls_on_hot_path = 0; size->non_call_stmts_on_hot_path = 0; size->num_branches_on_hot_path = 0; size->constant_iv = 0; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Estimating sizes for loop %i\n", loop->num); for (i = 0; i < loop->num_nodes; i++) { if (edge_to_cancel && body[i] != edge_to_cancel->src && dominated_by_p (CDI_DOMINATORS, body[i], edge_to_cancel->src)) after_exit = true; else after_exit = false; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " BB: %i, after_exit: %i\n", body[i]->index, after_exit); for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple *stmt = gsi_stmt (gsi); int num = estimate_num_insns (stmt, &eni_size_weights); bool likely_eliminated = false; bool likely_eliminated_last = false; bool likely_eliminated_peeled = false; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, " size: %3i ", num); print_gimple_stmt (dump_file, gsi_stmt (gsi), 0, 0); } /* Look for reasons why we might optimize this stmt away. */ if (gimple_has_side_effects (stmt)) ; /* Exit conditional. */ else if (exit && body[i] == exit->src && stmt == last_stmt (exit->src)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Exit condition will be eliminated " "in peeled copies.\n"); likely_eliminated_peeled = true; } else if (edge_to_cancel && body[i] == edge_to_cancel->src && stmt == last_stmt (edge_to_cancel->src)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Exit condition will be eliminated " "in last copy.\n"); likely_eliminated_last = true; } /* Sets of IV variables */ else if (gimple_code (stmt) == GIMPLE_ASSIGN && constant_after_peeling (gimple_assign_lhs (stmt), stmt, loop)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Induction variable computation will" " be folded away.\n"); likely_eliminated = true; } /* Assignments of IV variables. */ else if (gimple_code (stmt) == GIMPLE_ASSIGN && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME && constant_after_peeling (gimple_assign_rhs1 (stmt), stmt, loop) && (gimple_assign_rhs_class (stmt) != GIMPLE_BINARY_RHS || constant_after_peeling (gimple_assign_rhs2 (stmt), stmt, loop))) { size->constant_iv = true; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Constant expression will be folded away.\n"); likely_eliminated = true; } /* Conditionals. */ else if ((gimple_code (stmt) == GIMPLE_COND && constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop) && constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop) /* We don't simplify all constant compares so make sure they are not both constant already. See PR70288. */ && (! is_gimple_min_invariant (gimple_cond_lhs (stmt)) || ! is_gimple_min_invariant (gimple_cond_rhs (stmt)))) || (gimple_code (stmt) == GIMPLE_SWITCH && constant_after_peeling (gimple_switch_index ( as_a <gswitch *> (stmt)), stmt, loop) && ! is_gimple_min_invariant (gimple_switch_index ( as_a <gswitch *> (stmt))))) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Constant conditional.\n"); likely_eliminated = true; } size->overall += num; if (likely_eliminated || likely_eliminated_peeled) size->eliminated_by_peeling += num; if (!after_exit) { size->last_iteration += num; if (likely_eliminated || likely_eliminated_last) size->last_iteration_eliminated_by_peeling += num; } if ((size->overall * 3 / 2 - size->eliminated_by_peeling - size->last_iteration_eliminated_by_peeling) > upper_bound) { free (body); path.release (); return true; } } } while (path.length ()) { basic_block bb = path.pop (); for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple *stmt = gsi_stmt (gsi); if (gimple_code (stmt) == GIMPLE_CALL) { int flags = gimple_call_flags (stmt); tree decl = gimple_call_fndecl (stmt); if (decl && DECL_IS_BUILTIN (decl) && is_inexpensive_builtin (decl)) ; else if (flags & (ECF_PURE | ECF_CONST)) size->num_pure_calls_on_hot_path++; else size->num_non_pure_calls_on_hot_path++; size->num_branches_on_hot_path ++; } else if (gimple_code (stmt) != GIMPLE_CALL && gimple_code (stmt) != GIMPLE_DEBUG) size->non_call_stmts_on_hot_path++; if (((gimple_code (stmt) == GIMPLE_COND && (!constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop) || constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop))) || (gimple_code (stmt) == GIMPLE_SWITCH && !constant_after_peeling (gimple_switch_index ( as_a <gswitch *> (stmt)), stmt, loop))) && (!exit || bb != exit->src)) size->num_branches_on_hot_path++; } } path.release (); if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "size: %i-%i, last_iteration: %i-%i\n", size->overall, size->eliminated_by_peeling, size->last_iteration, size->last_iteration_eliminated_by_peeling); free (body); return false; }
static void my_dump_gimple(gimple gnode, gimple_stmt_iterator *ptrgsi) { int gcode; tree tnode; tree funcdecl; tree desc_node; tree ptr_desc_node; tree t; tree tmp_var; tree const_char_restrict_ptr_type_node; gimple tmp_gstmt; gimple new_gnode; const char *hellocstr = "Hello, GCC!\n"; int i; struct c_binding *b; expanded_location xloc; /* * Extract the Gimple Code from a gimple node */ gcode = gimple_code(gnode); /* * Get the line number of cooresponding * source code from a gimple node */ if(gimple_has_location(gnode)) { xloc = expand_location(gimple_location(gnode)); printf("line %d:", xloc.line); } printf("\t\t\t\t%s\n", gimple_code_name[gcode]); switch(gcode) { case GIMPLE_ASSIGN: /* * Add a printf("Hello, GCC!\n"); statement * after the first appearing assignment * if yes equals to 1, then we have already * added the statement, and no need to add * again */ if(!yes) { /* * Since printf is a builtin function, we need * to get the function declaration using * built_in_decls[]. The index number can be * found in gcc source gcc/builtins.def */ funcdecl = built_in_decls[BUILT_IN_PRINTF]; if(funcdecl == NULL_TREE) { printf("cannot find printf\n"); } else { /* * In gimple, every statement is simplified into * three oprands mode. And our printf() statement * is change into following two gimple statements: * * <D.XXX> = (const char * restrict) &"Hello, GCC!\n"[0] * printf(<D.XXX>); * * Note that <D.XXX> is a temporary variable, we can * actually use any name we like as long as no * confliction. */ /* * Generate a STRING_CST, the value is "Hello, GCC!\n" */ desc_node = build_string(strlen(hellocstr), hellocstr); /* * Two points need to notice here: * 1. STRING_CST build by build_string() do * not have TREE_TYPE set, so we need to * set it manually. * 2. build_string() will add a trailing '\0' * when building the STRING_CST, so we do * not need to care with it. */ TREE_TYPE(desc_node) = build_array_type( char_type_node, build_index_type( build_int_cst(NULL_TREE, strlen(hellocstr)))); /* * Define a const char * restrict type node * here for convertion. * I'm not sure why we need to add a restrict * attribute, but GCC really does it when it * converting a STRING_CST from AST to Gimple. */ const_char_restrict_ptr_type_node = build_qualified_type( build_pointer_type( build_qualified_type( char_type_node, TYPE_QUAL_CONST)), TYPE_QUAL_RESTRICT); /* * When we in AST, if we want to use STRING_CST * the form is like this <ADDR_EXPR<STRING_CST>>, * but when we turn to gimple, it is like this * <ADDR_EXPR<ADDAR_REF<STRING_CST>>>. * So we need to do a convertion there. */ /* * First wrap STRING_CST with ARRAY_REF */ t = build4(ARRAY_REF, char_type_node, desc_node, build_int_cst(NULL_TREE, 0), NULL, NULL); /* * Second wrap ARRAY_REF with ADDR_EXPR */ ptr_desc_node = build1(ADDR_EXPR, const_char_restrict_ptr_type_node, t); /* * I'm not sure why we need to use fold_convert() * here, but if we do not, we cannot make the * compiling successful. */ ptr_desc_node = fold_convert( const_char_restrict_ptr_type_node, ptr_desc_node); /* * If is_gimple_min_invariant(ptr_desc_node) * is true, we build a corrent argument, otherwise * the argument is not suitable for gimple call */ if(!is_gimple_min_invariant(ptr_desc_node)) { printf("Something wrong with is_gimple_min_invariant\n"); return ; } /* * This applies for a temporary variable */ tmp_var = make_rename_temp( const_char_restrict_ptr_type_node, "plugin_var"); /* * Build a gimple statement. Still remember that? * <D.XXX> = (const char * restrict) "Hello, GCC!\n" */ tmp_gstmt = gimple_build_assign(tmp_var, ptr_desc_node); /* * Check if the gimple statment is corrent */ if(!is_gimple_assign(tmp_gstmt)) { printf("tmp_gstmt is invalid\n"); } printf("Insert gimple statment:"); print_gimple_stmt(stdout, tmp_gstmt, 0, TDF_DETAILS | TDF_VERBOSE | TDF_TREE); /* * Insert the gimple statment into the basic block */ gsi_insert_after(ptrgsi, tmp_gstmt, GSI_NEW_STMT); if(is_gimple_operand(tmp_var)) { printf("begin to insert printf\n"); yes = 1; printf("Insert gimple statment:"); /* * Insert the gimple statment printf * into the basic block */ new_gnode = gimple_build_call( funcdecl, 1, tmp_var); print_gimple_stmt(stdout, new_gnode, 0, 0); gsi_insert_after(ptrgsi, new_gnode, GSI_NEW_STMT); } else { print_generic_stmt(stdout, ptr_desc_node, TDF_DETAILS | TDF_VERBOSE | TDF_TREE); printf("Not Gimple Operands\n"); } /* * Since we have more than one consecutive statements * to insert, we can actually use build a gimple * sequence, insert all statement into the sequence, * and then insert the sequence into the basic block. * This seems to be a better method. */ } } else { } break; default: break; } }
/* Look into pointer pointed to by GSIP and figure out what interesting side effects it has. */ static void check_stmt (gimple_stmt_iterator *gsip, funct_state local, bool ipa) { gimple stmt = gsi_stmt (*gsip); if (is_gimple_debug (stmt)) return; if (dump_file) { fprintf (dump_file, " scanning: "); print_gimple_stmt (dump_file, stmt, 0, 0); } if (gimple_has_volatile_ops (stmt) && !gimple_clobber_p (stmt)) { local->pure_const_state = IPA_NEITHER; if (dump_file) fprintf (dump_file, " Volatile stmt is not const/pure\n"); } /* Look for loads and stores. */ walk_stmt_load_store_ops (stmt, local, ipa ? check_ipa_load : check_load, ipa ? check_ipa_store : check_store); if (gimple_code (stmt) != GIMPLE_CALL && stmt_could_throw_p (stmt)) { if (cfun->can_throw_non_call_exceptions) { if (dump_file) fprintf (dump_file, " can throw; looping\n"); local->looping = true; } if (stmt_can_throw_external (stmt)) { if (dump_file) fprintf (dump_file, " can throw externally\n"); local->can_throw = true; } else if (dump_file) fprintf (dump_file, " can throw\n"); } switch (gimple_code (stmt)) { case GIMPLE_CALL: check_call (local, stmt, ipa); break; case GIMPLE_LABEL: if (DECL_NONLOCAL (gimple_label_label (stmt))) /* Target of long jump. */ { if (dump_file) fprintf (dump_file, " nonlocal label is not const/pure\n"); local->pure_const_state = IPA_NEITHER; } break; case GIMPLE_ASM: if (gimple_asm_clobbers_memory_p (stmt)) { if (dump_file) fprintf (dump_file, " memory asm clobber is not const/pure\n"); /* Abandon all hope, ye who enter here. */ local->pure_const_state = IPA_NEITHER; } if (gimple_asm_volatile_p (stmt)) { if (dump_file) fprintf (dump_file, " volatile is not const/pure\n"); /* Abandon all hope, ye who enter here. */ local->pure_const_state = IPA_NEITHER; local->looping = true; } return; default: break; } }
static bool stmt_simple_for_scop_p (basic_block scop_entry, loop_p outermost_loop, gimple stmt, basic_block bb) { loop_p loop = bb->loop_father; gcc_assert (scop_entry); /* GIMPLE_ASM and GIMPLE_CALL may embed arbitrary side effects. Calls have side-effects, except those to const or pure functions. */ if (gimple_has_volatile_ops (stmt) || (gimple_code (stmt) == GIMPLE_CALL && !(gimple_call_flags (stmt) & (ECF_CONST | ECF_PURE))) || (gimple_code (stmt) == GIMPLE_ASM)) return false; if (is_gimple_debug (stmt)) return true; if (!stmt_has_simple_data_refs_p (outermost_loop, stmt)) return false; switch (gimple_code (stmt)) { case GIMPLE_RETURN: case GIMPLE_LABEL: return true; case GIMPLE_COND: { /* We can handle all binary comparisons. Inequalities are also supported as they can be represented with union of polyhedra. */ enum tree_code code = gimple_cond_code (stmt); if (!(code == LT_EXPR || code == GT_EXPR || code == LE_EXPR || code == GE_EXPR || code == EQ_EXPR || code == NE_EXPR)) return false; for (unsigned i = 0; i < 2; ++i) { tree op = gimple_op (stmt, i); if (!graphite_can_represent_expr (scop_entry, loop, op) /* We can not handle REAL_TYPE. Failed for pr39260. */ || TREE_CODE (TREE_TYPE (op)) == REAL_TYPE) return false; } return true; } case GIMPLE_ASSIGN: case GIMPLE_CALL: return true; default: /* These nodes cut a new scope. */ return false; } return false; }
static bool dse_possible_dead_store_p (gimple stmt, gimple *use_stmt) { gimple temp; unsigned cnt = 0; *use_stmt = NULL; /* Find the first dominated statement that clobbers (part of) the memory stmt stores to with no intermediate statement that may use part of the memory stmt stores. That is, find a store that may prove stmt to be a dead store. */ temp = stmt; do { gimple use_stmt; imm_use_iterator ui; bool fail = false; tree defvar; /* Limit stmt walking to be linear in the number of possibly dead stores. */ if (++cnt > 256) return false; if (gimple_code (temp) == GIMPLE_PHI) defvar = PHI_RESULT (temp); else defvar = gimple_vdef (temp); temp = NULL; FOR_EACH_IMM_USE_STMT (use_stmt, ui, defvar) { cnt++; /* If we ever reach our DSE candidate stmt again fail. We cannot handle dead stores in loops. */ if (use_stmt == stmt) { fail = true; BREAK_FROM_IMM_USE_STMT (ui); } /* In simple cases we can look through PHI nodes, but we have to be careful with loops and with memory references containing operands that are also operands of PHI nodes. See gcc.c-torture/execute/20051110-*.c. */ else if (gimple_code (use_stmt) == GIMPLE_PHI) { if (temp /* Make sure we are not in a loop latch block. */ || gimple_bb (stmt) == gimple_bb (use_stmt) || dominated_by_p (CDI_DOMINATORS, gimple_bb (stmt), gimple_bb (use_stmt)) /* We can look through PHIs to regions post-dominating the DSE candidate stmt. */ || !dominated_by_p (CDI_POST_DOMINATORS, gimple_bb (stmt), gimple_bb (use_stmt))) { fail = true; BREAK_FROM_IMM_USE_STMT (ui); } temp = use_stmt; } /* If the statement is a use the store is not dead. */ else if (ref_maybe_used_by_stmt_p (use_stmt, gimple_assign_lhs (stmt))) { fail = true; BREAK_FROM_IMM_USE_STMT (ui); } /* If this is a store, remember it or bail out if we have multiple ones (the will be in different CFG parts then). */ else if (gimple_vdef (use_stmt)) { if (temp) { fail = true; BREAK_FROM_IMM_USE_STMT (ui); } temp = use_stmt; } } if (fail) return false; /* If we didn't find any definition this means the store is dead if it isn't a store to global reachable memory. In this case just pretend the stmt makes itself dead. Otherwise fail. */ if (!temp) { if (is_hidden_global_store (stmt)) return false; temp = stmt; break; } }
static void output_gimple_stmt (struct output_block *ob, gimple stmt) { unsigned i; enum gimple_code code; enum LTO_tags tag; struct bitpack_d bp; histogram_value hist; /* Emit identifying tag. */ code = gimple_code (stmt); tag = lto_gimple_code_to_tag (code); streamer_write_record_start (ob, tag); /* Emit the tuple header. */ bp = bitpack_create (ob->main_stream); bp_pack_var_len_unsigned (&bp, gimple_num_ops (stmt)); bp_pack_value (&bp, gimple_no_warning_p (stmt), 1); if (is_gimple_assign (stmt)) bp_pack_value (&bp, gimple_assign_nontemporal_move_p (stmt), 1); bp_pack_value (&bp, gimple_has_volatile_ops (stmt), 1); hist = gimple_histogram_value (cfun, stmt); bp_pack_value (&bp, hist != NULL, 1); bp_pack_var_len_unsigned (&bp, stmt->gsbase.subcode); /* Emit location information for the statement. */ stream_output_location (ob, &bp, LOCATION_LOCUS (gimple_location (stmt))); streamer_write_bitpack (&bp); /* Emit the lexical block holding STMT. */ stream_write_tree (ob, gimple_block (stmt), true); /* Emit the operands. */ switch (gimple_code (stmt)) { case GIMPLE_RESX: streamer_write_hwi (ob, gimple_resx_region (stmt)); break; case GIMPLE_EH_MUST_NOT_THROW: stream_write_tree (ob, gimple_eh_must_not_throw_fndecl (stmt), true); break; case GIMPLE_EH_DISPATCH: streamer_write_hwi (ob, gimple_eh_dispatch_region (stmt)); break; case GIMPLE_ASM: streamer_write_uhwi (ob, gimple_asm_ninputs (stmt)); streamer_write_uhwi (ob, gimple_asm_noutputs (stmt)); streamer_write_uhwi (ob, gimple_asm_nclobbers (stmt)); streamer_write_uhwi (ob, gimple_asm_nlabels (stmt)); streamer_write_string (ob, ob->main_stream, gimple_asm_string (stmt), true); /* Fallthru */ case GIMPLE_ASSIGN: case GIMPLE_CALL: case GIMPLE_RETURN: case GIMPLE_SWITCH: case GIMPLE_LABEL: case GIMPLE_COND: case GIMPLE_GOTO: case GIMPLE_DEBUG: for (i = 0; i < gimple_num_ops (stmt); i++) { tree op = gimple_op (stmt, i); tree *basep = NULL; /* Wrap all uses of non-automatic variables inside MEM_REFs so that we do not have to deal with type mismatches on merged symbols during IL read in. The first operand of GIMPLE_DEBUG must be a decl, not MEM_REF, though. */ if (op && (i || !is_gimple_debug (stmt))) { basep = &op; while (handled_component_p (*basep)) basep = &TREE_OPERAND (*basep, 0); if (TREE_CODE (*basep) == VAR_DECL && !auto_var_in_fn_p (*basep, current_function_decl) && !DECL_REGISTER (*basep)) { bool volatilep = TREE_THIS_VOLATILE (*basep); *basep = build2 (MEM_REF, TREE_TYPE (*basep), build_fold_addr_expr (*basep), build_int_cst (build_pointer_type (TREE_TYPE (*basep)), 0)); TREE_THIS_VOLATILE (*basep) = volatilep; } else basep = NULL; } stream_write_tree (ob, op, true); /* Restore the original base if we wrapped it inside a MEM_REF. */ if (basep) *basep = TREE_OPERAND (TREE_OPERAND (*basep, 0), 0); } if (is_gimple_call (stmt)) { if (gimple_call_internal_p (stmt)) streamer_write_enum (ob->main_stream, internal_fn, IFN_LAST, gimple_call_internal_fn (stmt)); else stream_write_tree (ob, gimple_call_fntype (stmt), true); } break; case GIMPLE_NOP: case GIMPLE_PREDICT: break; case GIMPLE_TRANSACTION: gcc_assert (gimple_transaction_body (stmt) == NULL); stream_write_tree (ob, gimple_transaction_label (stmt), true); break; default: gcc_unreachable (); } if (hist) stream_out_histogram_value (ob, hist); }
bool gimple_simplify (gimple stmt, code_helper *rcode, tree *ops, gimple_seq *seq, tree (*valueize)(tree)) { switch (gimple_code (stmt)) { case GIMPLE_ASSIGN: { enum tree_code code = gimple_assign_rhs_code (stmt); tree type = TREE_TYPE (gimple_assign_lhs (stmt)); switch (gimple_assign_rhs_class (stmt)) { case GIMPLE_SINGLE_RHS: if (code == REALPART_EXPR || code == IMAGPART_EXPR || code == VIEW_CONVERT_EXPR) { tree op0 = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0); if (valueize && TREE_CODE (op0) == SSA_NAME) { tree tem = valueize (op0); if (tem) op0 = tem; } *rcode = code; ops[0] = op0; return gimple_resimplify1 (seq, rcode, type, ops, valueize); } else if (code == BIT_FIELD_REF) { tree rhs1 = gimple_assign_rhs1 (stmt); tree op0 = TREE_OPERAND (rhs1, 0); if (valueize && TREE_CODE (op0) == SSA_NAME) { tree tem = valueize (op0); if (tem) op0 = tem; } *rcode = code; ops[0] = op0; ops[1] = TREE_OPERAND (rhs1, 1); ops[2] = TREE_OPERAND (rhs1, 2); return gimple_resimplify3 (seq, rcode, type, ops, valueize); } else if (code == SSA_NAME && valueize) { tree op0 = gimple_assign_rhs1 (stmt); tree valueized = valueize (op0); if (!valueized || op0 == valueized) return false; ops[0] = valueized; *rcode = TREE_CODE (op0); return true; } break; case GIMPLE_UNARY_RHS: { tree rhs1 = gimple_assign_rhs1 (stmt); if (valueize && TREE_CODE (rhs1) == SSA_NAME) { tree tem = valueize (rhs1); if (tem) rhs1 = tem; } *rcode = code; ops[0] = rhs1; return gimple_resimplify1 (seq, rcode, type, ops, valueize); } case GIMPLE_BINARY_RHS: { tree rhs1 = gimple_assign_rhs1 (stmt); if (valueize && TREE_CODE (rhs1) == SSA_NAME) { tree tem = valueize (rhs1); if (tem) rhs1 = tem; } tree rhs2 = gimple_assign_rhs2 (stmt); if (valueize && TREE_CODE (rhs2) == SSA_NAME) { tree tem = valueize (rhs2); if (tem) rhs2 = tem; } *rcode = code; ops[0] = rhs1; ops[1] = rhs2; return gimple_resimplify2 (seq, rcode, type, ops, valueize); } case GIMPLE_TERNARY_RHS: { tree rhs1 = gimple_assign_rhs1 (stmt); if (valueize && TREE_CODE (rhs1) == SSA_NAME) { tree tem = valueize (rhs1); if (tem) rhs1 = tem; } tree rhs2 = gimple_assign_rhs2 (stmt); if (valueize && TREE_CODE (rhs2) == SSA_NAME) { tree tem = valueize (rhs2); if (tem) rhs2 = tem; } tree rhs3 = gimple_assign_rhs3 (stmt); if (valueize && TREE_CODE (rhs3) == SSA_NAME) { tree tem = valueize (rhs3); if (tem) rhs3 = tem; } *rcode = code; ops[0] = rhs1; ops[1] = rhs2; ops[2] = rhs3; return gimple_resimplify3 (seq, rcode, type, ops, valueize); } default: gcc_unreachable (); } break; } case GIMPLE_CALL: /* ??? This way we can't simplify calls with side-effects. */ if (gimple_call_lhs (stmt) != NULL_TREE) { tree fn = gimple_call_fn (stmt); /* ??? Internal function support missing. */ if (!fn) return false; if (valueize && TREE_CODE (fn) == SSA_NAME) { tree tem = valueize (fn); if (tem) fn = tem; } if (!fn || TREE_CODE (fn) != ADDR_EXPR || TREE_CODE (TREE_OPERAND (fn, 0)) != FUNCTION_DECL || DECL_BUILT_IN_CLASS (TREE_OPERAND (fn, 0)) != BUILT_IN_NORMAL || !builtin_decl_implicit (DECL_FUNCTION_CODE (TREE_OPERAND (fn, 0))) || !gimple_builtin_call_types_compatible_p (stmt, TREE_OPERAND (fn, 0))) return false; tree decl = TREE_OPERAND (fn, 0); tree type = TREE_TYPE (gimple_call_lhs (stmt)); switch (gimple_call_num_args (stmt)) { case 1: { tree arg1 = gimple_call_arg (stmt, 0); if (valueize && TREE_CODE (arg1) == SSA_NAME) { tree tem = valueize (arg1); if (tem) arg1 = tem; } *rcode = DECL_FUNCTION_CODE (decl); ops[0] = arg1; return gimple_resimplify1 (seq, rcode, type, ops, valueize); } case 2: { tree arg1 = gimple_call_arg (stmt, 0); if (valueize && TREE_CODE (arg1) == SSA_NAME) { tree tem = valueize (arg1); if (tem) arg1 = tem; } tree arg2 = gimple_call_arg (stmt, 1); if (valueize && TREE_CODE (arg2) == SSA_NAME) { tree tem = valueize (arg2); if (tem) arg2 = tem; } *rcode = DECL_FUNCTION_CODE (decl); ops[0] = arg1; ops[1] = arg2; return gimple_resimplify2 (seq, rcode, type, ops, valueize); } case 3: { tree arg1 = gimple_call_arg (stmt, 0); if (valueize && TREE_CODE (arg1) == SSA_NAME) { tree tem = valueize (arg1); if (tem) arg1 = tem; } tree arg2 = gimple_call_arg (stmt, 1); if (valueize && TREE_CODE (arg2) == SSA_NAME) { tree tem = valueize (arg2); if (tem) arg2 = tem; } tree arg3 = gimple_call_arg (stmt, 2); if (valueize && TREE_CODE (arg3) == SSA_NAME) { tree tem = valueize (arg3); if (tem) arg3 = tem; } *rcode = DECL_FUNCTION_CODE (decl); ops[0] = arg1; ops[1] = arg2; ops[2] = arg3; return gimple_resimplify3 (seq, rcode, type, ops, valueize); } default: return false; } } break; case GIMPLE_COND: { tree lhs = gimple_cond_lhs (stmt); if (valueize && TREE_CODE (lhs) == SSA_NAME) { tree tem = valueize (lhs); if (tem) lhs = tem; } tree rhs = gimple_cond_rhs (stmt); if (valueize && TREE_CODE (rhs) == SSA_NAME) { tree tem = valueize (rhs); if (tem) rhs = tem; } *rcode = gimple_cond_code (stmt); ops[0] = lhs; ops[1] = rhs; return gimple_resimplify2 (seq, rcode, boolean_type_node, ops, valueize); } default: break; } return false; }
static tree independent_of_stmt_p (tree expr, gimple at, gimple_stmt_iterator gsi) { basic_block bb, call_bb, at_bb; edge e; edge_iterator ei; if (is_gimple_min_invariant (expr)) return expr; if (TREE_CODE (expr) != SSA_NAME) return NULL_TREE; /* Mark the blocks in the chain leading to the end. */ at_bb = gimple_bb (at); call_bb = gimple_bb (gsi_stmt (gsi)); for (bb = call_bb; bb != at_bb; bb = single_succ (bb)) bb->aux = &bb->aux; bb->aux = &bb->aux; while (1) { at = SSA_NAME_DEF_STMT (expr); bb = gimple_bb (at); /* The default definition or defined before the chain. */ if (!bb || !bb->aux) break; if (bb == call_bb) { for (; !gsi_end_p (gsi); gsi_next (&gsi)) if (gsi_stmt (gsi) == at) break; if (!gsi_end_p (gsi)) expr = NULL_TREE; break; } if (gimple_code (at) != GIMPLE_PHI) { expr = NULL_TREE; break; } FOR_EACH_EDGE (e, ei, bb->preds) if (e->src->aux) break; gcc_assert (e); expr = PHI_ARG_DEF_FROM_EDGE (at, e); if (TREE_CODE (expr) != SSA_NAME) { /* The value is a constant. */ break; } } /* Unmark the blocks. */ for (bb = call_bb; bb != at_bb; bb = single_succ (bb)) bb->aux = NULL; bb->aux = NULL; return expr; }
static bool ifcombine_iforif (basic_block inner_cond_bb, basic_block outer_cond_bb) { gimple inner_cond, outer_cond; tree name1, name2, bits1, bits2; inner_cond = last_stmt (inner_cond_bb); if (!inner_cond || gimple_code (inner_cond) != GIMPLE_COND) return false; outer_cond = last_stmt (outer_cond_bb); if (!outer_cond || gimple_code (outer_cond) != GIMPLE_COND) return false; /* See if we have two bit tests of the same name in both tests. In that case remove the outer test and change the inner one to test for name & (bits1 | bits2) != 0. */ if (recognize_bits_test (inner_cond, &name1, &bits1) && recognize_bits_test (outer_cond, &name2, &bits2)) { gimple_stmt_iterator gsi; tree t; /* Find the common name which is bit-tested. */ if (name1 == name2) ; else if (bits1 == bits2) { t = name2; name2 = bits2; bits2 = t; t = name1; name1 = bits1; bits1 = t; } else if (name1 == bits2) { t = name2; name2 = bits2; bits2 = t; } else if (bits1 == name2) { t = name1; name1 = bits1; bits1 = t; } else return false; /* As we strip non-widening conversions in finding a common name that is tested make sure to end up with an integral type for building the bit operations. */ if (TYPE_PRECISION (TREE_TYPE (bits1)) >= TYPE_PRECISION (TREE_TYPE (bits2))) { bits1 = fold_convert (unsigned_type_for (TREE_TYPE (bits1)), bits1); name1 = fold_convert (TREE_TYPE (bits1), name1); bits2 = fold_convert (unsigned_type_for (TREE_TYPE (bits2)), bits2); bits2 = fold_convert (TREE_TYPE (bits1), bits2); } else { bits2 = fold_convert (unsigned_type_for (TREE_TYPE (bits2)), bits2); name1 = fold_convert (TREE_TYPE (bits2), name1); bits1 = fold_convert (unsigned_type_for (TREE_TYPE (bits1)), bits1); bits1 = fold_convert (TREE_TYPE (bits2), bits1); } /* Do it. */ gsi = gsi_for_stmt (inner_cond); t = fold_build2 (BIT_IOR_EXPR, TREE_TYPE (name1), bits1, bits2); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (name1), name1, t); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); t = fold_build2 (NE_EXPR, boolean_type_node, t, build_int_cst (TREE_TYPE (t), 0)); gimple_cond_set_condition_from_tree (inner_cond, t); update_stmt (inner_cond); /* Leave CFG optimization to cfg_cleanup. */ gimple_cond_set_condition_from_tree (outer_cond, boolean_false_node); update_stmt (outer_cond); if (dump_file) { fprintf (dump_file, "optimizing bits or bits test to "); print_generic_expr (dump_file, name1, 0); fprintf (dump_file, " & T != 0\nwith temporary T = "); print_generic_expr (dump_file, bits1, 0); fprintf (dump_file, " | "); print_generic_expr (dump_file, bits2, 0); fprintf (dump_file, "\n"); } return true; } /* See if we have two comparisons that we can merge into one. This happens for C++ operator overloading where for example GE_EXPR is implemented as GT_EXPR || EQ_EXPR. */ else if (TREE_CODE_CLASS (gimple_cond_code (inner_cond)) == tcc_comparison && TREE_CODE_CLASS (gimple_cond_code (outer_cond)) == tcc_comparison && operand_equal_p (gimple_cond_lhs (inner_cond), gimple_cond_lhs (outer_cond), 0) && operand_equal_p (gimple_cond_rhs (inner_cond), gimple_cond_rhs (outer_cond), 0)) { enum tree_code code1 = gimple_cond_code (inner_cond); enum tree_code code2 = gimple_cond_code (outer_cond); enum tree_code code; tree t; #define CHK(a,b) ((code1 == a ## _EXPR && code2 == b ## _EXPR) \ || (code2 == a ## _EXPR && code1 == b ## _EXPR)) /* Merge the two condition codes if possible. */ if (code1 == code2) code = code1; else if (CHK (EQ, LT)) code = LE_EXPR; else if (CHK (EQ, GT)) code = GE_EXPR; else if (CHK (LT, LE)) code = LE_EXPR; else if (CHK (GT, GE)) code = GE_EXPR; else if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (inner_cond))) || flag_unsafe_math_optimizations) { if (CHK (LT, GT)) code = NE_EXPR; else if (CHK (LT, NE)) code = NE_EXPR; else if (CHK (GT, NE)) code = NE_EXPR; else return false; } /* We could check for combinations leading to trivial true/false. */ else return false; #undef CHK /* Do it. */ t = fold_build2 (code, boolean_type_node, gimple_cond_lhs (outer_cond), gimple_cond_rhs (outer_cond)); t = canonicalize_cond_expr_cond (t); if (!t) return false; gimple_cond_set_condition_from_tree (inner_cond, t); update_stmt (inner_cond); /* Leave CFG optimization to cfg_cleanup. */ gimple_cond_set_condition_from_tree (outer_cond, boolean_false_node); update_stmt (outer_cond); if (dump_file) { fprintf (dump_file, "optimizing two comparisons to "); print_generic_expr (dump_file, t, 0); fprintf (dump_file, "\n"); } return true; } return false; }
static bool ifcombine_ifandif (basic_block inner_cond_bb, basic_block outer_cond_bb) { gimple_stmt_iterator gsi; gimple inner_cond, outer_cond; tree name1, name2, bit1, bit2; inner_cond = last_stmt (inner_cond_bb); if (!inner_cond || gimple_code (inner_cond) != GIMPLE_COND) return false; outer_cond = last_stmt (outer_cond_bb); if (!outer_cond || gimple_code (outer_cond) != GIMPLE_COND) return false; /* See if we test a single bit of the same name in both tests. In that case remove the outer test, merging both else edges, and change the inner one to test for name & (bit1 | bit2) == (bit1 | bit2). */ if (recognize_single_bit_test (inner_cond, &name1, &bit1) && recognize_single_bit_test (outer_cond, &name2, &bit2) && name1 == name2) { tree t, t2; /* Do it. */ gsi = gsi_for_stmt (inner_cond); t = fold_build2 (LSHIFT_EXPR, TREE_TYPE (name1), build_int_cst (TREE_TYPE (name1), 1), bit1); t2 = fold_build2 (LSHIFT_EXPR, TREE_TYPE (name1), build_int_cst (TREE_TYPE (name1), 1), bit2); t = fold_build2 (BIT_IOR_EXPR, TREE_TYPE (name1), t, t2); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); t2 = fold_build2 (BIT_AND_EXPR, TREE_TYPE (name1), name1, t); t2 = force_gimple_operand_gsi (&gsi, t2, true, NULL_TREE, true, GSI_SAME_STMT); t = fold_build2 (EQ_EXPR, boolean_type_node, t2, t); gimple_cond_set_condition_from_tree (inner_cond, t); update_stmt (inner_cond); /* Leave CFG optimization to cfg_cleanup. */ gimple_cond_set_condition_from_tree (outer_cond, boolean_true_node); update_stmt (outer_cond); if (dump_file) { fprintf (dump_file, "optimizing double bit test to "); print_generic_expr (dump_file, name1, 0); fprintf (dump_file, " & T == T\nwith temporary T = (1 << "); print_generic_expr (dump_file, bit1, 0); fprintf (dump_file, ") | (1 << "); print_generic_expr (dump_file, bit2, 0); fprintf (dump_file, ")\n"); } return true; } return false; }
static void eliminate_tail_call (struct tailcall *t) { tree param, rslt; gimple stmt, call; tree arg; size_t idx; basic_block bb, first; edge e; gimple phi; gimple_stmt_iterator gsi; gimple orig_stmt; stmt = orig_stmt = gsi_stmt (t->call_gsi); bb = gsi_bb (t->call_gsi); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Eliminated tail recursion in bb %d : ", bb->index); print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM); fprintf (dump_file, "\n"); } gcc_assert (is_gimple_call (stmt)); first = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)); /* Remove the code after call_gsi that will become unreachable. The possibly unreachable code in other blocks is removed later in cfg cleanup. */ gsi = t->call_gsi; gsi_next (&gsi); while (!gsi_end_p (gsi)) { gimple t = gsi_stmt (gsi); /* Do not remove the return statement, so that redirect_edge_and_branch sees how the block ends. */ if (gimple_code (t) == GIMPLE_RETURN) break; gsi_remove (&gsi, true); release_defs (t); } /* Number of executions of function has reduced by the tailcall. */ e = single_succ_edge (gsi_bb (t->call_gsi)); decrease_profile (EXIT_BLOCK_PTR_FOR_FN (cfun), e->count, EDGE_FREQUENCY (e)); decrease_profile (ENTRY_BLOCK_PTR_FOR_FN (cfun), e->count, EDGE_FREQUENCY (e)); if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)) decrease_profile (e->dest, e->count, EDGE_FREQUENCY (e)); /* Replace the call by a jump to the start of function. */ e = redirect_edge_and_branch (single_succ_edge (gsi_bb (t->call_gsi)), first); gcc_assert (e); PENDING_STMT (e) = NULL; /* Add phi node entries for arguments. The ordering of the phi nodes should be the same as the ordering of the arguments. */ for (param = DECL_ARGUMENTS (current_function_decl), idx = 0, gsi = gsi_start_phis (first); param; param = DECL_CHAIN (param), idx++) { if (!arg_needs_copy_p (param)) continue; arg = gimple_call_arg (stmt, idx); phi = gsi_stmt (gsi); gcc_assert (param == SSA_NAME_VAR (PHI_RESULT (phi))); add_phi_arg (phi, arg, e, gimple_location (stmt)); gsi_next (&gsi); } /* Update the values of accumulators. */ adjust_accumulator_values (t->call_gsi, t->mult, t->add, e); call = gsi_stmt (t->call_gsi); rslt = gimple_call_lhs (call); if (rslt != NULL_TREE) { /* Result of the call will no longer be defined. So adjust the SSA_NAME_DEF_STMT accordingly. */ SSA_NAME_DEF_STMT (rslt) = gimple_build_nop (); } gsi_remove (&t->call_gsi, true); release_defs (call); }
static void find_tail_calls (basic_block bb, struct tailcall **ret) { tree ass_var = NULL_TREE, ret_var, func, param; gimple stmt, call = NULL; gimple_stmt_iterator gsi, agsi; bool tail_recursion; struct tailcall *nw; edge e; tree m, a; basic_block abb; size_t idx; tree var; if (!single_succ_p (bb)) return; for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi)) { stmt = gsi_stmt (gsi); /* Ignore labels, returns, clobbers and debug stmts. */ if (gimple_code (stmt) == GIMPLE_LABEL || gimple_code (stmt) == GIMPLE_RETURN || gimple_clobber_p (stmt) || is_gimple_debug (stmt)) continue; /* Check for a call. */ if (is_gimple_call (stmt)) { call = stmt; ass_var = gimple_call_lhs (stmt); break; } /* If the statement references memory or volatile operands, fail. */ if (gimple_references_memory_p (stmt) || gimple_has_volatile_ops (stmt)) return; } if (gsi_end_p (gsi)) { edge_iterator ei; /* Recurse to the predecessors. */ FOR_EACH_EDGE (e, ei, bb->preds) find_tail_calls (e->src, ret); return; } /* If the LHS of our call is not just a simple register, we can't transform this into a tail or sibling call. This situation happens, in (e.g.) "*p = foo()" where foo returns a struct. In this case we won't have a temporary here, but we need to carry out the side effect anyway, so tailcall is impossible. ??? In some situations (when the struct is returned in memory via invisible argument) we could deal with this, e.g. by passing 'p' itself as that argument to foo, but it's too early to do this here, and expand_call() will not handle it anyway. If it ever can, then we need to revisit this here, to allow that situation. */ if (ass_var && !is_gimple_reg (ass_var)) return; /* We found the call, check whether it is suitable. */ tail_recursion = false; func = gimple_call_fndecl (call); if (func && !DECL_BUILT_IN (func) && recursive_call_p (current_function_decl, func)) { tree arg; for (param = DECL_ARGUMENTS (func), idx = 0; param && idx < gimple_call_num_args (call); param = DECL_CHAIN (param), idx ++) { arg = gimple_call_arg (call, idx); if (param != arg) { /* Make sure there are no problems with copying. The parameter have a copyable type and the two arguments must have reasonably equivalent types. The latter requirement could be relaxed if we emitted a suitable type conversion statement. */ if (!is_gimple_reg_type (TREE_TYPE (param)) || !useless_type_conversion_p (TREE_TYPE (param), TREE_TYPE (arg))) break; /* The parameter should be a real operand, so that phi node created for it at the start of the function has the meaning of copying the value. This test implies is_gimple_reg_type from the previous condition, however this one could be relaxed by being more careful with copying the new value of the parameter (emitting appropriate GIMPLE_ASSIGN and updating the virtual operands). */ if (!is_gimple_reg (param)) break; } } if (idx == gimple_call_num_args (call) && !param) tail_recursion = true; } /* Make sure the tail invocation of this function does not refer to local variables. */ FOR_EACH_LOCAL_DECL (cfun, idx, var) { if (TREE_CODE (var) != PARM_DECL && auto_var_in_fn_p (var, cfun->decl) && (ref_maybe_used_by_stmt_p (call, var) || call_may_clobber_ref_p (call, var))) return; } /* Now check the statements after the call. None of them has virtual operands, so they may only depend on the call through its return value. The return value should also be dependent on each of them, since we are running after dce. */ m = NULL_TREE; a = NULL_TREE; abb = bb; agsi = gsi; while (1) { tree tmp_a = NULL_TREE; tree tmp_m = NULL_TREE; gsi_next (&agsi); while (gsi_end_p (agsi)) { ass_var = propagate_through_phis (ass_var, single_succ_edge (abb)); abb = single_succ (abb); agsi = gsi_start_bb (abb); } stmt = gsi_stmt (agsi); if (gimple_code (stmt) == GIMPLE_LABEL) continue; if (gimple_code (stmt) == GIMPLE_RETURN) break; if (gimple_clobber_p (stmt)) continue; if (is_gimple_debug (stmt)) continue; if (gimple_code (stmt) != GIMPLE_ASSIGN) return; /* This is a gimple assign. */ if (! process_assignment (stmt, gsi, &tmp_m, &tmp_a, &ass_var)) return; if (tmp_a) { tree type = TREE_TYPE (tmp_a); if (a) a = fold_build2 (PLUS_EXPR, type, fold_convert (type, a), tmp_a); else a = tmp_a; } if (tmp_m) { tree type = TREE_TYPE (tmp_m); if (m) m = fold_build2 (MULT_EXPR, type, fold_convert (type, m), tmp_m); else m = tmp_m; if (a) a = fold_build2 (MULT_EXPR, type, fold_convert (type, a), tmp_m); } } /* See if this is a tail call we can handle. */ ret_var = gimple_return_retval (stmt); /* We may proceed if there either is no return value, or the return value is identical to the call's return. */ if (ret_var && (ret_var != ass_var)) return; /* If this is not a tail recursive call, we cannot handle addends or multiplicands. */ if (!tail_recursion && (m || a)) return; /* For pointers only allow additions. */ if (m && POINTER_TYPE_P (TREE_TYPE (DECL_RESULT (current_function_decl)))) return; nw = XNEW (struct tailcall); nw->call_gsi = gsi; nw->tail_recursion = tail_recursion; nw->mult = m; nw->add = a; nw->next = *ret; *ret = nw; }