/* Create clone of E in the node N represented by CALL_EXPR the callgraph. */ struct cgraph_edge * cgraph_clone_edge (struct cgraph_edge *e, struct cgraph_node *n, gimple call_stmt, unsigned stmt_uid, gcov_type count_scale, int freq_scale, bool update_original) { struct cgraph_edge *new_edge; gcov_type count = apply_probability (e->count, count_scale); gcov_type freq; /* We do not want to ignore loop nest after frequency drops to 0. */ if (!freq_scale) freq_scale = 1; freq = e->frequency * (gcov_type) freq_scale / CGRAPH_FREQ_BASE; if (freq > CGRAPH_FREQ_MAX) freq = CGRAPH_FREQ_MAX; if (e->indirect_unknown_callee) { tree decl; if (call_stmt && (decl = gimple_call_fndecl (call_stmt))) { struct cgraph_node *callee = cgraph_get_node (decl); gcc_checking_assert (callee); new_edge = cgraph_create_edge (n, callee, call_stmt, count, freq); } else { new_edge = cgraph_create_indirect_edge (n, call_stmt, e->indirect_info->ecf_flags, count, freq); *new_edge->indirect_info = *e->indirect_info; } } else { new_edge = cgraph_create_edge (n, e->callee, call_stmt, count, freq); if (e->indirect_info) { new_edge->indirect_info = ggc_alloc_cleared_cgraph_indirect_call_info (); *new_edge->indirect_info = *e->indirect_info; } } new_edge->inline_failed = e->inline_failed; new_edge->indirect_inlining_edge = e->indirect_inlining_edge; new_edge->lto_stmt_uid = stmt_uid; /* Clone flags that depend on call_stmt availability manually. */ new_edge->can_throw_external = e->can_throw_external; new_edge->call_stmt_cannot_inline_p = e->call_stmt_cannot_inline_p; if (update_original) { e->count -= new_edge->count; if (e->count < 0) e->count = 0; } cgraph_call_edge_duplication_hooks (e, new_edge); return new_edge; }
static void gen_shrink_wrap_conditions (gimple bi_call, vec<gimple> conds, unsigned int *nconds) { gimple call; tree fn; enum built_in_function fnc; gcc_assert (nconds && conds.exists ()); gcc_assert (conds.length () == 0); gcc_assert (is_gimple_call (bi_call)); call = bi_call; fn = gimple_call_fndecl (call); gcc_assert (fn && DECL_BUILT_IN (fn)); fnc = DECL_FUNCTION_CODE (fn); *nconds = 0; if (fnc == BUILT_IN_POW) gen_conditions_for_pow (call, conds, nconds); else { tree arg; inp_domain domain = get_no_error_domain (fnc); *nconds = 0; arg = gimple_call_arg (bi_call, 0); gen_conditions_for_domain (arg, domain, conds, nconds); } return; }
static tree pass_through_call (const_gimple call) { tree callee = gimple_call_fndecl (call); if (callee && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL) switch (DECL_FUNCTION_CODE (callee)) { case BUILT_IN_MEMCPY: case BUILT_IN_MEMMOVE: case BUILT_IN_MEMSET: case BUILT_IN_STRCPY: case BUILT_IN_STRNCPY: case BUILT_IN_STRCAT: case BUILT_IN_STRNCAT: case BUILT_IN_MEMCPY_CHK: case BUILT_IN_MEMMOVE_CHK: case BUILT_IN_MEMSET_CHK: case BUILT_IN_STRCPY_CHK: case BUILT_IN_STRNCPY_CHK: case BUILT_IN_STPNCPY_CHK: case BUILT_IN_STRCAT_CHK: case BUILT_IN_STRNCAT_CHK: case BUILT_IN_ASSUME_ALIGNED: if (gimple_call_num_args (call) >= 1) return gimple_call_arg (call, 0); break; default: break; } return NULL_TREE; }
static void set_callers_may_not_allocate_frame (function *fn) { basic_block bb; gimple_stmt_iterator gsi; gimple *stmt; tree called_fn_tree; function *called_fn; if (fn->cfg == NULL) return; FOR_EACH_BB_FN (bb, fn) { for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { stmt = gsi_stmt (gsi); if (is_gimple_call (stmt)) { called_fn_tree = gimple_call_fndecl (stmt); if (called_fn_tree != NULL) { called_fn = DECL_STRUCT_FUNCTION (called_fn_tree); if (called_fn != NULL) called_fn->machine->callers_may_not_allocate_frame = true; } } } } return; }
static unsigned int execute_return_slot_opt (void) { basic_block bb; FOR_EACH_BB_FN (bb, cfun) { gimple_stmt_iterator gsi; for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple stmt = gsi_stmt (gsi); bool slot_opt_p; if (is_gimple_call (stmt) && gimple_call_lhs (stmt) && !gimple_call_return_slot_opt_p (stmt) && aggregate_value_p (TREE_TYPE (gimple_call_lhs (stmt)), gimple_call_fndecl (stmt))) { /* Check if the location being assigned to is clobbered by the call. */ slot_opt_p = dest_safe_for_nrv_p (stmt); gimple_call_set_return_slot_opt (stmt, slot_opt_p); } } }
static void walk_functions(tree_set *visited, const struct cgraph_node *node) { struct cgraph_edge *e; const_tree caller; if (!node) return; caller = NODE_DECL(node); if (pointer_set_insert(visited, caller)) return; for (e = node->callees; e; e = e->next_callee) { const struct cgraph_node *next_node; tree callee = gimple_call_fndecl(e->call_stmt); if (DECL_BUILT_IN(callee)) continue; print_function(caller, callee); next_node = cgraph_get_node(callee); walk_functions(visited, next_node); } }
static gcall * check_for_named_call (gimple *stmt, const char *funcname, unsigned int num_args) { gcc_assert (funcname); gcall *call = dyn_cast <gcall *> (stmt); if (!call) return NULL; tree fndecl = gimple_call_fndecl (call); if (!fndecl) return NULL; if (strcmp (IDENTIFIER_POINTER (DECL_NAME (fndecl)), funcname)) return NULL; if (gimple_call_num_args (call) != num_args) { error_at (stmt->location, "expected number of args: %i (got %i)", num_args, gimple_call_num_args (call)); return NULL; } return call; }
/* Find all checks in current function and store info about them in check_infos. */ static void chkp_gather_checks_info (void) { basic_block bb; gimple_stmt_iterator i; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Gathering information about checks...\n"); chkp_init_check_info (); FOR_EACH_BB_FN (bb, cfun) { struct bb_checks *bbc = &check_infos[bb->index]; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Searching checks in BB%d...\n", bb->index); for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i)) { gimple *stmt = gsi_stmt (i); if (gimple_code (stmt) != GIMPLE_CALL) continue; if (gimple_call_fndecl (stmt) == chkp_checkl_fndecl || gimple_call_fndecl (stmt) == chkp_checku_fndecl) { struct check_info ci; chkp_fill_check_info (stmt, &ci); bbc->checks.safe_push (ci); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Adding check information:\n"); fprintf (dump_file, " bounds: "); print_generic_expr (dump_file, ci.bounds, 0); fprintf (dump_file, "\n address: "); chkp_print_addr (ci.addr); fprintf (dump_file, "\n check: "); print_gimple_stmt (dump_file, stmt, 0, 0); } } } } }
static unsigned HOST_WIDE_INT alloc_object_size (const_gimple call, int object_size_type) { tree callee, bytes = NULL_TREE; tree alloc_size; int arg1 = -1, arg2 = -1; gcc_assert (is_gimple_call (call)); callee = gimple_call_fndecl (call); if (!callee) return unknown[object_size_type]; alloc_size = lookup_attribute ("alloc_size", TYPE_ATTRIBUTES (TREE_TYPE (callee))); if (alloc_size && TREE_VALUE (alloc_size)) { tree p = TREE_VALUE (alloc_size); arg1 = TREE_INT_CST_LOW (TREE_VALUE (p))-1; if (TREE_CHAIN (p)) arg2 = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (p)))-1; } if (DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL) switch (DECL_FUNCTION_CODE (callee)) { case BUILT_IN_CALLOC: arg2 = 1; /* fall through */ case BUILT_IN_MALLOC: case BUILT_IN_ALLOCA: case BUILT_IN_ALLOCA_WITH_ALIGN: arg1 = 0; default: break; } if (arg1 < 0 || arg1 >= (int)gimple_call_num_args (call) || TREE_CODE (gimple_call_arg (call, arg1)) != INTEGER_CST || (arg2 >= 0 && (arg2 >= (int)gimple_call_num_args (call) || TREE_CODE (gimple_call_arg (call, arg2)) != INTEGER_CST))) return unknown[object_size_type]; if (arg2 >= 0) bytes = size_binop (MULT_EXPR, fold_convert (sizetype, gimple_call_arg (call, arg1)), fold_convert (sizetype, gimple_call_arg (call, arg2))); else if (arg1 >= 0) bytes = fold_convert (sizetype, gimple_call_arg (call, arg1)); if (bytes && host_integerp (bytes, 1)) return tree_low_cst (bytes, 1); return unknown[object_size_type]; }
/* For bounds used in CI check if bounds are produced by intersection and we may use outer bounds instead. If transformation is possible then fix check statement and recompute its info. */ static void chkp_use_outer_bounds_if_possible (struct check_info *ci) { gimple *bnd_def; tree bnd1, bnd2, bnd_res = NULL; int check_res1, check_res2; if (TREE_CODE (ci->bounds) != SSA_NAME) return; bnd_def = SSA_NAME_DEF_STMT (ci->bounds); if (gimple_code (bnd_def) != GIMPLE_CALL || gimple_call_fndecl (bnd_def) != chkp_intersect_fndecl) return; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Check if bounds intersection is redundant: \n"); fprintf (dump_file, " check: "); print_gimple_stmt (dump_file, ci->stmt, 0, 0); fprintf (dump_file, " intersection: "); print_gimple_stmt (dump_file, bnd_def, 0, 0); fprintf (dump_file, "\n"); } bnd1 = gimple_call_arg (bnd_def, 0); bnd2 = gimple_call_arg (bnd_def, 1); check_res1 = chkp_get_check_result (ci, bnd1); check_res2 = chkp_get_check_result (ci, bnd2); if (check_res1 == 1) bnd_res = bnd2; else if (check_res1 == -1) bnd_res = bnd1; else if (check_res2 == 1) bnd_res = bnd1; else if (check_res2 == -1) bnd_res = bnd2; if (bnd_res) { if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, " action: use "); print_generic_expr (dump_file, bnd2, 0); fprintf (dump_file, " instead of "); print_generic_expr (dump_file, ci->bounds, 0); fprintf (dump_file, "\n"); } ci->bounds = bnd_res; gimple_call_set_arg (ci->stmt, 1, bnd_res); update_stmt (ci->stmt); chkp_fill_check_info (ci->stmt, ci); } }
static void afdo_indirect_call (gimple_stmt_iterator *gsi, const icall_target_map &map, bool transform) { gimple gs = gsi_stmt (*gsi); tree callee; if (map.size () == 0) return; gcall *stmt = dyn_cast <gcall *> (gs); if ((!stmt) || gimple_call_fndecl (stmt) != NULL_TREE) return; callee = gimple_call_fn (stmt); histogram_value hist = gimple_alloc_histogram_value ( cfun, HIST_TYPE_INDIR_CALL, stmt, callee); hist->n_counters = 3; hist->hvalue.counters = XNEWVEC (gcov_type, hist->n_counters); gimple_add_histogram_value (cfun, stmt, hist); gcov_type total = 0; icall_target_map::const_iterator max_iter = map.end (); for (icall_target_map::const_iterator iter = map.begin (); iter != map.end (); ++iter) { total += iter->second; if (max_iter == map.end () || max_iter->second < iter->second) max_iter = iter; } hist->hvalue.counters[0] = (unsigned long long)afdo_string_table->get_name (max_iter->first); hist->hvalue.counters[1] = max_iter->second; hist->hvalue.counters[2] = total; if (!transform) return; struct cgraph_edge *indirect_edge = cgraph_node::get (current_function_decl)->get_edge (stmt); struct cgraph_node *direct_call = cgraph_node::get_for_asmname ( get_identifier ((const char *) hist->hvalue.counters[0])); if (direct_call == NULL || !check_ic_target (stmt, direct_call)) return; if (DECL_STRUCT_FUNCTION (direct_call->decl) == NULL) return; struct cgraph_edge *new_edge = indirect_edge->make_speculative (direct_call, 0, 0); new_edge->redirect_call_stmt_to_callee (); gimple_remove_histogram_value (cfun, stmt, hist); inline_call (new_edge, true, NULL, NULL, false); }
/* Fill check_info structure *CI with information about check STMT. */ static void chkp_fill_check_info (gimple *stmt, struct check_info *ci) { ci->addr.pol.create (0); ci->bounds = gimple_call_arg (stmt, 1); chkp_collect_value (gimple_call_arg (stmt, 0), ci->addr); ci->type = (gimple_call_fndecl (stmt) == chkp_checkl_fndecl ? CHECK_LOWER_BOUND : CHECK_UPPER_BOUND); ci->stmt = stmt; }
bool aarch64_gimple_fold_builtin (gimple_stmt_iterator *gsi) { bool changed = false; gimple stmt = gsi_stmt (*gsi); tree call = gimple_call_fn (stmt); tree fndecl; gimple new_stmt = NULL; if (call) { fndecl = gimple_call_fndecl (stmt); if (fndecl) { int fcode = DECL_FUNCTION_CODE (fndecl); int nargs = gimple_call_num_args (stmt); tree *args = (nargs > 0 ? gimple_call_arg_ptr (stmt, 0) : &error_mark_node); /* We use gimple's REDUC_(PLUS|MIN|MAX)_EXPRs for float, signed int and unsigned int; it will distinguish according to the types of the arguments to the __builtin. */ switch (fcode) { BUILTIN_VALL (UNOP, reduc_plus_scal_, 10) new_stmt = gimple_build_assign (gimple_call_lhs (stmt), REDUC_PLUS_EXPR, args[0]); break; BUILTIN_VDQIF (UNOP, reduc_smax_scal_, 10) BUILTIN_VDQ_BHSI (UNOPU, reduc_umax_scal_, 10) new_stmt = gimple_build_assign (gimple_call_lhs (stmt), REDUC_MAX_EXPR, args[0]); break; BUILTIN_VDQIF (UNOP, reduc_smin_scal_, 10) BUILTIN_VDQ_BHSI (UNOPU, reduc_umin_scal_, 10) new_stmt = gimple_build_assign (gimple_call_lhs (stmt), REDUC_MIN_EXPR, args[0]); break; default: break; } } } if (new_stmt) { gsi_replace (gsi, new_stmt, true); changed = true; } return changed; }
static bool is_call_dce_candidate (gimple call) { tree fn; enum built_in_function fnc; /* Only potentially dead calls are considered. */ if (gimple_call_lhs (call)) return false; fn = gimple_call_fndecl (call); if (!fn || !DECL_BUILT_IN (fn) || (DECL_BUILT_IN_CLASS (fn) != BUILT_IN_NORMAL)) return false; fnc = DECL_FUNCTION_CODE (fn); switch (fnc) { /* Trig functions. */ CASE_FLT_FN (BUILT_IN_ACOS): CASE_FLT_FN (BUILT_IN_ASIN): /* Hyperbolic functions. */ CASE_FLT_FN (BUILT_IN_ACOSH): CASE_FLT_FN (BUILT_IN_ATANH): CASE_FLT_FN (BUILT_IN_COSH): CASE_FLT_FN (BUILT_IN_SINH): /* Log functions. */ CASE_FLT_FN (BUILT_IN_LOG): CASE_FLT_FN (BUILT_IN_LOG2): CASE_FLT_FN (BUILT_IN_LOG10): CASE_FLT_FN (BUILT_IN_LOG1P): /* Exp functions. */ CASE_FLT_FN (BUILT_IN_EXP): CASE_FLT_FN (BUILT_IN_EXP2): CASE_FLT_FN (BUILT_IN_EXP10): CASE_FLT_FN (BUILT_IN_EXPM1): CASE_FLT_FN (BUILT_IN_POW10): /* Sqrt. */ CASE_FLT_FN (BUILT_IN_SQRT): return check_builtin_call (call); /* Special one: two argument pow. */ case BUILT_IN_POW: return check_pow (call); default: break; } return false; }
unsigned int cgraph_edge::rebuild_edges (void) { basic_block bb; cgraph_node *node = cgraph_node::get (current_function_decl); gimple_stmt_iterator gsi; node->remove_callees (); node->remove_all_references (); node->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count; FOR_EACH_BB_FN (bb, cfun) { for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple stmt = gsi_stmt (gsi); tree decl; if (gcall *call_stmt = dyn_cast <gcall *> (stmt)) { int freq = compute_call_stmt_bb_frequency (current_function_decl, bb); decl = gimple_call_fndecl (call_stmt); if (decl) node->create_edge (cgraph_node::get_create (decl), call_stmt, bb->count, freq); else if (gimple_call_internal_p (call_stmt)) ; else node->create_indirect_edge (call_stmt, gimple_call_flags (call_stmt), bb->count, freq); } node->record_stmt_references (stmt); } for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi)) node->record_stmt_references (gsi_stmt (gsi)); } record_eh_tables (node, cfun); gcc_assert (!node->global.inlined_to); if (node->instrumented_version && !node->instrumentation_clone) node->create_reference (node->instrumented_version, IPA_REF_CHKP, NULL); return 0; }
static tree get_fn_or_fnptr_decl(const gcall *call_stmt) { const_tree fnptr; const_gimple def_stmt; tree decl = gimple_call_fndecl(call_stmt); if (decl != NULL_TREE) return decl; fnptr = gimple_call_fn(call_stmt); // !!! assertot kell irni 0-ra, mert csak az lehet ott if (is_gimple_constant(fnptr)) return NULL_TREE; def_stmt = get_fnptr_def_stmt(fnptr); return handle_fnptr_assign(def_stmt); }
unsigned int rebuild_cgraph_edges (void) { basic_block bb; struct cgraph_node *node = cgraph_get_node (current_function_decl); gimple_stmt_iterator gsi; cgraph_node_remove_callees (node); ipa_remove_all_references (&node->ref_list); node->count = ENTRY_BLOCK_PTR->count; FOR_EACH_BB (bb) { for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple stmt = gsi_stmt (gsi); tree decl; if (is_gimple_call (stmt)) { int freq = compute_call_stmt_bb_frequency (current_function_decl, bb); decl = gimple_call_fndecl (stmt); if (decl) cgraph_create_edge (node, cgraph_get_create_node (decl), stmt, bb->count, freq); else cgraph_create_indirect_edge (node, stmt, gimple_call_flags (stmt), bb->count, freq); } walk_stmt_load_store_addr_ops (stmt, node, mark_load, mark_store, mark_address); } for (gsi = gsi_start (phi_nodes (bb)); !gsi_end_p (gsi); gsi_next (&gsi)) walk_stmt_load_store_addr_ops (gsi_stmt (gsi), node, mark_load, mark_store, mark_address); } record_eh_tables (node, cfun); gcc_assert (!node->global.inlined_to); return 0; }
static void warn_self_assign (gimple stmt) { tree rhs, lhs; /* Check assigment statement. */ if (gimple_assign_single_p (stmt)) { rhs = get_real_ref_rhs (gimple_assign_rhs1 (stmt)); if (!rhs) return; lhs = gimple_assign_lhs (stmt); if (TREE_CODE (lhs) == SSA_NAME) { lhs = SSA_NAME_VAR (lhs); if (!lhs || DECL_ARTIFICIAL (lhs)) return; } compare_and_warn (stmt, lhs, rhs); } /* Check overloaded operator '=' (if enabled). */ else if (check_operator_eq && is_gimple_call (stmt)) { tree fdecl = gimple_call_fndecl (stmt); if (fdecl && (DECL_NAME (fdecl) == maybe_get_identifier ("operator="))) { /* If 'operator=' takes reference operands, the arguments will be ADDR_EXPR trees. In this case, just remove the address-taken operator before we compare the lhs and rhs. */ lhs = gimple_call_arg (stmt, 0); if (TREE_CODE (lhs) == ADDR_EXPR) lhs = TREE_OPERAND (lhs, 0); rhs = gimple_call_arg (stmt, 1); if (TREE_CODE (rhs) == ADDR_EXPR) rhs = TREE_OPERAND (rhs, 0); compare_and_warn (stmt, lhs, rhs); } } }
static bool edom_only_function (gcall *call) { switch (DECL_FUNCTION_CODE (gimple_call_fndecl (call))) { CASE_FLT_FN (BUILT_IN_ACOS): CASE_FLT_FN (BUILT_IN_ASIN): CASE_FLT_FN (BUILT_IN_ATAN): CASE_FLT_FN (BUILT_IN_COS): CASE_FLT_FN (BUILT_IN_SIGNIFICAND): CASE_FLT_FN (BUILT_IN_SIN): CASE_FLT_FN (BUILT_IN_SQRT): CASE_FLT_FN_FLOATN_NX (BUILT_IN_SQRT): CASE_FLT_FN (BUILT_IN_FMOD): CASE_FLT_FN (BUILT_IN_REMAINDER): return true; default: return false; } }
static bool can_test_argument_range (gcall *call) { switch (DECL_FUNCTION_CODE (gimple_call_fndecl (call))) { /* Trig functions. */ CASE_FLT_FN (BUILT_IN_ACOS): CASE_FLT_FN (BUILT_IN_ASIN): /* Hyperbolic functions. */ CASE_FLT_FN (BUILT_IN_ACOSH): CASE_FLT_FN (BUILT_IN_ATANH): CASE_FLT_FN (BUILT_IN_COSH): CASE_FLT_FN (BUILT_IN_SINH): /* Log functions. */ CASE_FLT_FN (BUILT_IN_LOG): CASE_FLT_FN (BUILT_IN_LOG2): CASE_FLT_FN (BUILT_IN_LOG10): CASE_FLT_FN (BUILT_IN_LOG1P): /* Exp functions. */ CASE_FLT_FN (BUILT_IN_EXP): CASE_FLT_FN (BUILT_IN_EXP2): CASE_FLT_FN (BUILT_IN_EXP10): CASE_FLT_FN (BUILT_IN_EXPM1): CASE_FLT_FN (BUILT_IN_POW10): /* Sqrt. */ CASE_FLT_FN (BUILT_IN_SQRT): CASE_FLT_FN_FLOATN_NX (BUILT_IN_SQRT): return check_builtin_call (call); /* Special one: two argument pow. */ case BUILT_IN_POW: return check_pow (call); default: break; } return false; }
static bool callees_functions_use_frame_header (function *fn) { basic_block bb; gimple_stmt_iterator gsi; gimple *stmt; tree called_fn_tree; function *called_fn; if (fn->cfg == NULL) return true; FOR_EACH_BB_FN (bb, fn) { for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { stmt = gsi_stmt (gsi); if (is_gimple_call (stmt)) { called_fn_tree = gimple_call_fndecl (stmt); if (called_fn_tree != NULL) { called_fn = DECL_STRUCT_FUNCTION (called_fn_tree); if (called_fn == NULL || DECL_WEAK (called_fn_tree) || has_inlined_assembly (called_fn) || !is_leaf_function (called_fn) || !called_fn->machine->does_not_use_frame_header) return true; } else return true; } } } return false; }
static unsigned int build_cgraph_edges (void) { basic_block bb; struct cgraph_node *node = cgraph_get_node (current_function_decl); struct pointer_set_t *visited_nodes = pointer_set_create (); gimple_stmt_iterator gsi; tree decl; unsigned ix; /* Create the callgraph edges and record the nodes referenced by the function. body. */ FOR_EACH_BB_FN (bb, cfun) { for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple stmt = gsi_stmt (gsi); tree decl; if (is_gimple_debug (stmt)) continue; if (is_gimple_call (stmt)) { int freq = compute_call_stmt_bb_frequency (current_function_decl, bb); decl = gimple_call_fndecl (stmt); if (decl) cgraph_create_edge (node, cgraph_get_create_node (decl), stmt, bb->count, freq); else if (gimple_call_internal_p (stmt)) ; else cgraph_create_indirect_edge (node, stmt, gimple_call_flags (stmt), bb->count, freq); } ipa_record_stmt_references (node, stmt); if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL && gimple_omp_parallel_child_fn (stmt)) { tree fn = gimple_omp_parallel_child_fn (stmt); ipa_record_reference (node, cgraph_get_create_node (fn), IPA_REF_ADDR, stmt); } if (gimple_code (stmt) == GIMPLE_OMP_TASK) { tree fn = gimple_omp_task_child_fn (stmt); if (fn) ipa_record_reference (node, cgraph_get_create_node (fn), IPA_REF_ADDR, stmt); fn = gimple_omp_task_copy_fn (stmt); if (fn) ipa_record_reference (node, cgraph_get_create_node (fn), IPA_REF_ADDR, stmt); } } for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi)) ipa_record_stmt_references (node, gsi_stmt (gsi)); } /* Look for initializers of constant variables and private statics. */ FOR_EACH_LOCAL_DECL (cfun, ix, decl) if (TREE_CODE (decl) == VAR_DECL && (TREE_STATIC (decl) && !DECL_EXTERNAL (decl)) && !DECL_HAS_VALUE_EXPR_P (decl)) varpool_finalize_decl (decl); record_eh_tables (node, cfun); pointer_set_destroy (visited_nodes); return 0; }
static bool tree_estimate_loop_size (struct loop *loop, edge exit, edge edge_to_cancel, struct loop_size *size, int upper_bound) { basic_block *body = get_loop_body (loop); gimple_stmt_iterator gsi; unsigned int i; bool after_exit; vec<basic_block> path = get_loop_hot_path (loop); size->overall = 0; size->eliminated_by_peeling = 0; size->last_iteration = 0; size->last_iteration_eliminated_by_peeling = 0; size->num_pure_calls_on_hot_path = 0; size->num_non_pure_calls_on_hot_path = 0; size->non_call_stmts_on_hot_path = 0; size->num_branches_on_hot_path = 0; size->constant_iv = 0; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Estimating sizes for loop %i\n", loop->num); for (i = 0; i < loop->num_nodes; i++) { if (edge_to_cancel && body[i] != edge_to_cancel->src && dominated_by_p (CDI_DOMINATORS, body[i], edge_to_cancel->src)) after_exit = true; else after_exit = false; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " BB: %i, after_exit: %i\n", body[i]->index, after_exit); for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple *stmt = gsi_stmt (gsi); int num = estimate_num_insns (stmt, &eni_size_weights); bool likely_eliminated = false; bool likely_eliminated_last = false; bool likely_eliminated_peeled = false; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, " size: %3i ", num); print_gimple_stmt (dump_file, gsi_stmt (gsi), 0, 0); } /* Look for reasons why we might optimize this stmt away. */ if (gimple_has_side_effects (stmt)) ; /* Exit conditional. */ else if (exit && body[i] == exit->src && stmt == last_stmt (exit->src)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Exit condition will be eliminated " "in peeled copies.\n"); likely_eliminated_peeled = true; } else if (edge_to_cancel && body[i] == edge_to_cancel->src && stmt == last_stmt (edge_to_cancel->src)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Exit condition will be eliminated " "in last copy.\n"); likely_eliminated_last = true; } /* Sets of IV variables */ else if (gimple_code (stmt) == GIMPLE_ASSIGN && constant_after_peeling (gimple_assign_lhs (stmt), stmt, loop)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Induction variable computation will" " be folded away.\n"); likely_eliminated = true; } /* Assignments of IV variables. */ else if (gimple_code (stmt) == GIMPLE_ASSIGN && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME && constant_after_peeling (gimple_assign_rhs1 (stmt), stmt, loop) && (gimple_assign_rhs_class (stmt) != GIMPLE_BINARY_RHS || constant_after_peeling (gimple_assign_rhs2 (stmt), stmt, loop))) { size->constant_iv = true; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Constant expression will be folded away.\n"); likely_eliminated = true; } /* Conditionals. */ else if ((gimple_code (stmt) == GIMPLE_COND && constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop) && constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop) /* We don't simplify all constant compares so make sure they are not both constant already. See PR70288. */ && (! is_gimple_min_invariant (gimple_cond_lhs (stmt)) || ! is_gimple_min_invariant (gimple_cond_rhs (stmt)))) || (gimple_code (stmt) == GIMPLE_SWITCH && constant_after_peeling (gimple_switch_index ( as_a <gswitch *> (stmt)), stmt, loop) && ! is_gimple_min_invariant (gimple_switch_index ( as_a <gswitch *> (stmt))))) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Constant conditional.\n"); likely_eliminated = true; } size->overall += num; if (likely_eliminated || likely_eliminated_peeled) size->eliminated_by_peeling += num; if (!after_exit) { size->last_iteration += num; if (likely_eliminated || likely_eliminated_last) size->last_iteration_eliminated_by_peeling += num; } if ((size->overall * 3 / 2 - size->eliminated_by_peeling - size->last_iteration_eliminated_by_peeling) > upper_bound) { free (body); path.release (); return true; } } } while (path.length ()) { basic_block bb = path.pop (); for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple *stmt = gsi_stmt (gsi); if (gimple_code (stmt) == GIMPLE_CALL) { int flags = gimple_call_flags (stmt); tree decl = gimple_call_fndecl (stmt); if (decl && DECL_IS_BUILTIN (decl) && is_inexpensive_builtin (decl)) ; else if (flags & (ECF_PURE | ECF_CONST)) size->num_pure_calls_on_hot_path++; else size->num_non_pure_calls_on_hot_path++; size->num_branches_on_hot_path ++; } else if (gimple_code (stmt) != GIMPLE_CALL && gimple_code (stmt) != GIMPLE_DEBUG) size->non_call_stmts_on_hot_path++; if (((gimple_code (stmt) == GIMPLE_COND && (!constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop) || constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop))) || (gimple_code (stmt) == GIMPLE_SWITCH && !constant_after_peeling (gimple_switch_index ( as_a <gswitch *> (stmt)), stmt, loop))) && (!exit || bb != exit->src)) size->num_branches_on_hot_path++; } } path.release (); if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "size: %i-%i, last_iteration: %i-%i\n", size->overall, size->eliminated_by_peeling, size->last_iteration, size->last_iteration_eliminated_by_peeling); free (body); return false; }
unsigned int execute_fixup_cfg (void) { basic_block bb; gimple_stmt_iterator gsi; int todo = gimple_in_ssa_p (cfun) ? TODO_verify_ssa : 0; gcov_type count_scale; edge e; edge_iterator ei; count_scale = GCOV_COMPUTE_SCALE (cgraph_get_node (current_function_decl)->count, ENTRY_BLOCK_PTR->count); ENTRY_BLOCK_PTR->count = cgraph_get_node (current_function_decl)->count; EXIT_BLOCK_PTR->count = apply_scale (EXIT_BLOCK_PTR->count, count_scale); FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) e->count = apply_scale (e->count, count_scale); FOR_EACH_BB (bb) { bb->count = apply_scale (bb->count, count_scale); for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple stmt = gsi_stmt (gsi); tree decl = is_gimple_call (stmt) ? gimple_call_fndecl (stmt) : NULL; if (decl) { int flags = gimple_call_flags (stmt); if (flags & (ECF_CONST | ECF_PURE | ECF_LOOPING_CONST_OR_PURE)) { if (gimple_purge_dead_abnormal_call_edges (bb)) todo |= TODO_cleanup_cfg; if (gimple_in_ssa_p (cfun)) { todo |= TODO_update_ssa | TODO_cleanup_cfg; update_stmt (stmt); } } if (flags & ECF_NORETURN && fixup_noreturn_call (stmt)) todo |= TODO_cleanup_cfg; } if (maybe_clean_eh_stmt (stmt) && gimple_purge_dead_eh_edges (bb)) todo |= TODO_cleanup_cfg; } FOR_EACH_EDGE (e, ei, bb->succs) e->count = apply_scale (e->count, count_scale); /* If we have a basic block with no successors that does not end with a control statement or a noreturn call end it with a call to __builtin_unreachable. This situation can occur when inlining a noreturn call that does in fact return. */ if (EDGE_COUNT (bb->succs) == 0) { gimple stmt = last_stmt (bb); if (!stmt || (!is_ctrl_stmt (stmt) && (!is_gimple_call (stmt) || (gimple_call_flags (stmt) & ECF_NORETURN) == 0))) { stmt = gimple_build_call (builtin_decl_implicit (BUILT_IN_UNREACHABLE), 0); gimple_stmt_iterator gsi = gsi_last_bb (bb); gsi_insert_after (&gsi, stmt, GSI_NEW_STMT); } } } if (count_scale != REG_BR_PROB_BASE) compute_function_frequency (); /* We just processed all calls. */ if (cfun->gimple_df) vec_free (MODIFIED_NORETURN_CALLS (cfun)); /* Dump a textual representation of the flowgraph. */ if (dump_file) gimple_dump_cfg (dump_file, dump_flags); if (current_loops && (todo & TODO_cleanup_cfg)) loops_state_set (LOOPS_NEED_FIXUP); return todo; }
static void lower_stmt (gimple_stmt_iterator *gsi, struct lower_data *data) { gimple stmt = gsi_stmt (*gsi); gimple_set_block (stmt, data->block); switch (gimple_code (stmt)) { case GIMPLE_BIND: lower_gimple_bind (gsi, data); /* Propagate fallthruness. */ return; case GIMPLE_COND: case GIMPLE_GOTO: case GIMPLE_SWITCH: data->cannot_fallthru = true; gsi_next (gsi); return; case GIMPLE_RETURN: if (data->cannot_fallthru) { gsi_remove (gsi, false); /* Propagate fallthruness. */ } else { lower_gimple_return (gsi, data); data->cannot_fallthru = true; } return; case GIMPLE_TRY: if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH) lower_try_catch (gsi, data); else { /* It must be a GIMPLE_TRY_FINALLY. */ bool cannot_fallthru; lower_sequence (gimple_try_eval_ptr (stmt), data); cannot_fallthru = data->cannot_fallthru; /* The finally clause is always executed after the try clause, so if it does not fall through, then the try-finally will not fall through. Otherwise, if the try clause does not fall through, then when the finally clause falls through it will resume execution wherever the try clause was going. So the whole try-finally will only fall through if both the try clause and the finally clause fall through. */ data->cannot_fallthru = false; lower_sequence (gimple_try_cleanup_ptr (stmt), data); data->cannot_fallthru |= cannot_fallthru; gsi_next (gsi); } return; case GIMPLE_EH_ELSE: lower_sequence (gimple_eh_else_n_body_ptr (stmt), data); lower_sequence (gimple_eh_else_e_body_ptr (stmt), data); break; case GIMPLE_NOP: case GIMPLE_ASM: case GIMPLE_ASSIGN: case GIMPLE_PREDICT: case GIMPLE_LABEL: case GIMPLE_EH_MUST_NOT_THROW: case GIMPLE_OMP_FOR: case GIMPLE_OMP_SECTIONS: case GIMPLE_OMP_SECTIONS_SWITCH: case GIMPLE_OMP_SECTION: case GIMPLE_OMP_SINGLE: case GIMPLE_OMP_MASTER: case GIMPLE_OMP_ORDERED: case GIMPLE_OMP_CRITICAL: case GIMPLE_OMP_RETURN: case GIMPLE_OMP_ATOMIC_LOAD: case GIMPLE_OMP_ATOMIC_STORE: case GIMPLE_OMP_CONTINUE: break; case GIMPLE_CALL: { tree decl = gimple_call_fndecl (stmt); unsigned i; for (i = 0; i < gimple_call_num_args (stmt); i++) { tree arg = gimple_call_arg (stmt, i); if (EXPR_P (arg)) TREE_SET_BLOCK (arg, data->block); } if (decl && DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL && DECL_FUNCTION_CODE (decl) == BUILT_IN_SETJMP) { lower_builtin_setjmp (gsi); data->cannot_fallthru = false; data->calls_builtin_setjmp = true; return; } if (decl && (flags_from_decl_or_type (decl) & ECF_NORETURN)) { data->cannot_fallthru = true; gsi_next (gsi); return; } } break; case GIMPLE_OMP_PARALLEL: case GIMPLE_OMP_TASK: data->cannot_fallthru = false; lower_omp_directive (gsi, data); data->cannot_fallthru = false; return; case GIMPLE_TRANSACTION: lower_sequence (gimple_transaction_body_ptr (stmt), data); break; default: gcc_unreachable (); } data->cannot_fallthru = false; gsi_next (gsi); }
static void check_call (funct_state local, gimple call, bool ipa) { int flags = gimple_call_flags (call); tree callee_t = gimple_call_fndecl (call); bool possibly_throws = stmt_could_throw_p (call); bool possibly_throws_externally = (possibly_throws && stmt_can_throw_external (call)); if (possibly_throws) { unsigned int i; for (i = 0; i < gimple_num_ops (call); i++) if (gimple_op (call, i) && tree_could_throw_p (gimple_op (call, i))) { if (possibly_throws && cfun->can_throw_non_call_exceptions) { if (dump_file) fprintf (dump_file, " operand can throw; looping\n"); local->looping = true; } if (possibly_throws_externally) { if (dump_file) fprintf (dump_file, " operand can throw externally\n"); local->can_throw = true; } } } /* The const and pure flags are set by a variety of places in the compiler (including here). If someone has already set the flags for the callee, (such as for some of the builtins) we will use them, otherwise we will compute our own information. Const and pure functions have less clobber effects than other functions so we process these first. Otherwise if it is a call outside the compilation unit or an indirect call we punt. This leaves local calls which will be processed by following the call graph. */ if (callee_t) { enum pure_const_state_e call_state; bool call_looping; if (special_builtin_state (&call_state, &call_looping, callee_t)) { worse_state (&local->pure_const_state, &local->looping, call_state, call_looping); return; } /* When bad things happen to bad functions, they cannot be const or pure. */ if (setjmp_call_p (callee_t)) { if (dump_file) fprintf (dump_file, " setjmp is not const/pure\n"); local->looping = true; local->pure_const_state = IPA_NEITHER; } if (DECL_BUILT_IN_CLASS (callee_t) == BUILT_IN_NORMAL) switch (DECL_FUNCTION_CODE (callee_t)) { case BUILT_IN_LONGJMP: case BUILT_IN_NONLOCAL_GOTO: if (dump_file) fprintf (dump_file, " longjmp and nonlocal goto is not const/pure\n"); local->pure_const_state = IPA_NEITHER; local->looping = true; break; default: break; } } /* When not in IPA mode, we can still handle self recursion. */ if (!ipa && callee_t == current_function_decl) { if (dump_file) fprintf (dump_file, " Recursive call can loop.\n"); local->looping = true; } /* Either callee is unknown or we are doing local analysis. Look to see if there are any bits available for the callee (such as by declaration or because it is builtin) and process solely on the basis of those bits. */ else if (!ipa) { enum pure_const_state_e call_state; bool call_looping; if (possibly_throws && cfun->can_throw_non_call_exceptions) { if (dump_file) fprintf (dump_file, " can throw; looping\n"); local->looping = true; } if (possibly_throws_externally) { if (dump_file) { fprintf (dump_file, " can throw externally to lp %i\n", lookup_stmt_eh_lp (call)); if (callee_t) fprintf (dump_file, " callee:%s\n", IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (callee_t))); } local->can_throw = true; } if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " checking flags for call:"); state_from_flags (&call_state, &call_looping, flags, ((flags & (ECF_NORETURN | ECF_NOTHROW)) == (ECF_NORETURN | ECF_NOTHROW)) || (!flag_exceptions && (flags & ECF_NORETURN))); worse_state (&local->pure_const_state, &local->looping, call_state, call_looping); } /* Direct functions calls are handled by IPA propagation. */ }
/* Verify cgraph nodes of given cgraph node. */ void verify_cgraph_node (struct cgraph_node *node) { struct cgraph_edge *e; struct cgraph_node *main_clone; struct function *this_cfun = DECL_STRUCT_FUNCTION (node->decl); struct function *saved_cfun = cfun; basic_block this_block; gimple_stmt_iterator gsi; bool error_found = false; if (errorcount || sorrycount) return; timevar_push (TV_CGRAPH_VERIFY); /* debug_generic_stmt needs correct cfun */ set_cfun (this_cfun); for (e = node->callees; e; e = e->next_callee) if (e->aux) { error ("aux field set for edge %s->%s", cgraph_node_name (e->caller), cgraph_node_name (e->callee)); error_found = true; } if (node->count < 0) { error ("Execution count is negative"); error_found = true; } for (e = node->callers; e; e = e->next_caller) { if (e->count < 0) { error ("caller edge count is negative"); error_found = true; } if (e->frequency < 0) { error ("caller edge frequency is negative"); error_found = true; } if (e->frequency > CGRAPH_FREQ_MAX) { error ("caller edge frequency is too large"); error_found = true; } if (!e->inline_failed) { if (node->global.inlined_to != (e->caller->global.inlined_to ? e->caller->global.inlined_to : e->caller)) { error ("inlined_to pointer is wrong"); error_found = true; } if (node->callers->next_caller) { error ("multiple inline callers"); error_found = true; } } else if (node->global.inlined_to) { error ("inlined_to pointer set for noninline callers"); error_found = true; } } if (!node->callers && node->global.inlined_to) { error ("inlined_to pointer is set but no predecessors found"); error_found = true; } if (node->global.inlined_to == node) { error ("inlined_to pointer refers to itself"); error_found = true; } for (main_clone = cgraph_node (node->decl); main_clone; main_clone = main_clone->next_clone) if (main_clone == node) break; if (!cgraph_node (node->decl)) { error ("node not found in cgraph_hash"); error_found = true; } if (node->analyzed && !TREE_ASM_WRITTEN (node->decl) && (!DECL_EXTERNAL (node->decl) || node->global.inlined_to)) { if (this_cfun->cfg) { /* The nodes we're interested in are never shared, so walk the tree ignoring duplicates. */ struct pointer_set_t *visited_nodes = pointer_set_create (); /* Reach the trees by walking over the CFG, and note the enclosing basic-blocks in the call edges. */ FOR_EACH_BB_FN (this_block, this_cfun) for (gsi = gsi_start_bb (this_block); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple stmt = gsi_stmt (gsi); tree decl; if (is_gimple_call (stmt) && (decl = gimple_call_fndecl (stmt))) { struct cgraph_edge *e = cgraph_edge (node, stmt); if (e) { if (e->aux) { error ("shared call_stmt:"); debug_gimple_stmt (stmt); error_found = true; } if (e->callee->decl != cgraph_node (decl)->decl && e->inline_failed) { error ("edge points to wrong declaration:"); debug_tree (e->callee->decl); fprintf (stderr," Instead of:"); debug_tree (decl); } e->aux = (void *)1; } else { error ("missing callgraph edge for call stmt:"); debug_gimple_stmt (stmt); error_found = true; } } } pointer_set_destroy (visited_nodes); } else /* No CFG available?! */ gcc_unreachable (); for (e = node->callees; e; e = e->next_callee) { if (!e->aux && !e->indirect_call) { error ("edge %s->%s has no corresponding call_stmt", cgraph_node_name (e->caller), cgraph_node_name (e->callee)); debug_gimple_stmt (e->call_stmt); error_found = true; } e->aux = 0; } }
static void find_tail_calls (basic_block bb, struct tailcall **ret) { tree ass_var = NULL_TREE, ret_var, func, param; gimple stmt, call = NULL; gimple_stmt_iterator gsi, agsi; bool tail_recursion; struct tailcall *nw; edge e; tree m, a; basic_block abb; size_t idx; tree var; if (!single_succ_p (bb)) return; for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi)) { stmt = gsi_stmt (gsi); /* Ignore labels, returns, clobbers and debug stmts. */ if (gimple_code (stmt) == GIMPLE_LABEL || gimple_code (stmt) == GIMPLE_RETURN || gimple_clobber_p (stmt) || is_gimple_debug (stmt)) continue; /* Check for a call. */ if (is_gimple_call (stmt)) { call = stmt; ass_var = gimple_call_lhs (stmt); break; } /* If the statement references memory or volatile operands, fail. */ if (gimple_references_memory_p (stmt) || gimple_has_volatile_ops (stmt)) return; } if (gsi_end_p (gsi)) { edge_iterator ei; /* Recurse to the predecessors. */ FOR_EACH_EDGE (e, ei, bb->preds) find_tail_calls (e->src, ret); return; } /* If the LHS of our call is not just a simple register, we can't transform this into a tail or sibling call. This situation happens, in (e.g.) "*p = foo()" where foo returns a struct. In this case we won't have a temporary here, but we need to carry out the side effect anyway, so tailcall is impossible. ??? In some situations (when the struct is returned in memory via invisible argument) we could deal with this, e.g. by passing 'p' itself as that argument to foo, but it's too early to do this here, and expand_call() will not handle it anyway. If it ever can, then we need to revisit this here, to allow that situation. */ if (ass_var && !is_gimple_reg (ass_var)) return; /* We found the call, check whether it is suitable. */ tail_recursion = false; func = gimple_call_fndecl (call); if (func && !DECL_BUILT_IN (func) && recursive_call_p (current_function_decl, func)) { tree arg; for (param = DECL_ARGUMENTS (func), idx = 0; param && idx < gimple_call_num_args (call); param = DECL_CHAIN (param), idx ++) { arg = gimple_call_arg (call, idx); if (param != arg) { /* Make sure there are no problems with copying. The parameter have a copyable type and the two arguments must have reasonably equivalent types. The latter requirement could be relaxed if we emitted a suitable type conversion statement. */ if (!is_gimple_reg_type (TREE_TYPE (param)) || !useless_type_conversion_p (TREE_TYPE (param), TREE_TYPE (arg))) break; /* The parameter should be a real operand, so that phi node created for it at the start of the function has the meaning of copying the value. This test implies is_gimple_reg_type from the previous condition, however this one could be relaxed by being more careful with copying the new value of the parameter (emitting appropriate GIMPLE_ASSIGN and updating the virtual operands). */ if (!is_gimple_reg (param)) break; } } if (idx == gimple_call_num_args (call) && !param) tail_recursion = true; } /* Make sure the tail invocation of this function does not refer to local variables. */ FOR_EACH_LOCAL_DECL (cfun, idx, var) { if (TREE_CODE (var) != PARM_DECL && auto_var_in_fn_p (var, cfun->decl) && (ref_maybe_used_by_stmt_p (call, var) || call_may_clobber_ref_p (call, var))) return; } /* Now check the statements after the call. None of them has virtual operands, so they may only depend on the call through its return value. The return value should also be dependent on each of them, since we are running after dce. */ m = NULL_TREE; a = NULL_TREE; abb = bb; agsi = gsi; while (1) { tree tmp_a = NULL_TREE; tree tmp_m = NULL_TREE; gsi_next (&agsi); while (gsi_end_p (agsi)) { ass_var = propagate_through_phis (ass_var, single_succ_edge (abb)); abb = single_succ (abb); agsi = gsi_start_bb (abb); } stmt = gsi_stmt (agsi); if (gimple_code (stmt) == GIMPLE_LABEL) continue; if (gimple_code (stmt) == GIMPLE_RETURN) break; if (gimple_clobber_p (stmt)) continue; if (is_gimple_debug (stmt)) continue; if (gimple_code (stmt) != GIMPLE_ASSIGN) return; /* This is a gimple assign. */ if (! process_assignment (stmt, gsi, &tmp_m, &tmp_a, &ass_var)) return; if (tmp_a) { tree type = TREE_TYPE (tmp_a); if (a) a = fold_build2 (PLUS_EXPR, type, fold_convert (type, a), tmp_a); else a = tmp_a; } if (tmp_m) { tree type = TREE_TYPE (tmp_m); if (m) m = fold_build2 (MULT_EXPR, type, fold_convert (type, m), tmp_m); else m = tmp_m; if (a) a = fold_build2 (MULT_EXPR, type, fold_convert (type, a), tmp_m); } } /* See if this is a tail call we can handle. */ ret_var = gimple_return_retval (stmt); /* We may proceed if there either is no return value, or the return value is identical to the call's return. */ if (ret_var && (ret_var != ass_var)) return; /* If this is not a tail recursive call, we cannot handle addends or multiplicands. */ if (!tail_recursion && (m || a)) return; /* For pointers only allow additions. */ if (m && POINTER_TYPE_P (TREE_TYPE (DECL_RESULT (current_function_decl)))) return; nw = XNEW (struct tailcall); nw->call_gsi = gsi; nw->tail_recursion = tail_recursion; nw->mult = m; nw->add = a; nw->next = *ret; *ret = nw; }
void gimple_regimplify_operands (gimple stmt, gimple_stmt_iterator *gsi_p) { size_t i, num_ops; tree lhs; gimple_seq pre = NULL; gimple post_stmt = NULL; push_gimplify_context (gimple_in_ssa_p (cfun)); switch (gimple_code (stmt)) { case GIMPLE_COND: gimplify_expr (gimple_cond_lhs_ptr (stmt), &pre, NULL, is_gimple_val, fb_rvalue); gimplify_expr (gimple_cond_rhs_ptr (stmt), &pre, NULL, is_gimple_val, fb_rvalue); break; case GIMPLE_SWITCH: gimplify_expr (gimple_switch_index_ptr (stmt), &pre, NULL, is_gimple_val, fb_rvalue); break; case GIMPLE_OMP_ATOMIC_LOAD: gimplify_expr (gimple_omp_atomic_load_rhs_ptr (stmt), &pre, NULL, is_gimple_val, fb_rvalue); break; case GIMPLE_ASM: { size_t i, noutputs = gimple_asm_noutputs (stmt); const char *constraint, **oconstraints; bool allows_mem, allows_reg, is_inout; oconstraints = (const char **) alloca ((noutputs) * sizeof (const char *)); for (i = 0; i < noutputs; i++) { tree op = gimple_asm_output_op (stmt, i); constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (op))); oconstraints[i] = constraint; parse_output_constraint (&constraint, i, 0, 0, &allows_mem, &allows_reg, &is_inout); gimplify_expr (&TREE_VALUE (op), &pre, NULL, is_inout ? is_gimple_min_lval : is_gimple_lvalue, fb_lvalue | fb_mayfail); } for (i = 0; i < gimple_asm_ninputs (stmt); i++) { tree op = gimple_asm_input_op (stmt, i); constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (op))); parse_input_constraint (&constraint, 0, 0, noutputs, 0, oconstraints, &allows_mem, &allows_reg); if (TREE_ADDRESSABLE (TREE_TYPE (TREE_VALUE (op))) && allows_mem) allows_reg = 0; if (!allows_reg && allows_mem) gimplify_expr (&TREE_VALUE (op), &pre, NULL, is_gimple_lvalue, fb_lvalue | fb_mayfail); else gimplify_expr (&TREE_VALUE (op), &pre, NULL, is_gimple_asm_val, fb_rvalue); } } break; default: /* NOTE: We start gimplifying operands from last to first to make sure that side-effects on the RHS of calls, assignments and ASMs are executed before the LHS. The ordering is not important for other statements. */ num_ops = gimple_num_ops (stmt); for (i = num_ops; i > 0; i--) { tree op = gimple_op (stmt, i - 1); if (op == NULL_TREE) continue; if (i == 1 && (is_gimple_call (stmt) || is_gimple_assign (stmt))) gimplify_expr (&op, &pre, NULL, is_gimple_lvalue, fb_lvalue); else if (i == 2 && is_gimple_assign (stmt) && num_ops == 2 && get_gimple_rhs_class (gimple_expr_code (stmt)) == GIMPLE_SINGLE_RHS) gimplify_expr (&op, &pre, NULL, rhs_predicate_for (gimple_assign_lhs (stmt)), fb_rvalue); else if (i == 2 && is_gimple_call (stmt)) { if (TREE_CODE (op) == FUNCTION_DECL) continue; gimplify_expr (&op, &pre, NULL, is_gimple_call_addr, fb_rvalue); } else gimplify_expr (&op, &pre, NULL, is_gimple_val, fb_rvalue); gimple_set_op (stmt, i - 1, op); } lhs = gimple_get_lhs (stmt); /* If the LHS changed it in a way that requires a simple RHS, create temporary. */ if (lhs && !is_gimple_reg (lhs)) { bool need_temp = false; if (is_gimple_assign (stmt) && num_ops == 2 && get_gimple_rhs_class (gimple_expr_code (stmt)) == GIMPLE_SINGLE_RHS) gimplify_expr (gimple_assign_rhs1_ptr (stmt), &pre, NULL, rhs_predicate_for (gimple_assign_lhs (stmt)), fb_rvalue); else if (is_gimple_reg (lhs)) { if (is_gimple_reg_type (TREE_TYPE (lhs))) { if (is_gimple_call (stmt)) { i = gimple_call_flags (stmt); if ((i & ECF_LOOPING_CONST_OR_PURE) || !(i & (ECF_CONST | ECF_PURE))) need_temp = true; } if (stmt_can_throw_internal (stmt)) need_temp = true; } } else { if (is_gimple_reg_type (TREE_TYPE (lhs))) need_temp = true; else if (TYPE_MODE (TREE_TYPE (lhs)) != BLKmode) { if (is_gimple_call (stmt)) { tree fndecl = gimple_call_fndecl (stmt); if (!aggregate_value_p (TREE_TYPE (lhs), fndecl) && !(fndecl && DECL_RESULT (fndecl) && DECL_BY_REFERENCE (DECL_RESULT (fndecl)))) need_temp = true; } else need_temp = true; } } if (need_temp) { tree temp = create_tmp_reg (TREE_TYPE (lhs), NULL); if (gimple_in_ssa_p (cfun)) temp = make_ssa_name (temp, NULL); gimple_set_lhs (stmt, temp); post_stmt = gimple_build_assign (lhs, temp); } } break; } if (!gimple_seq_empty_p (pre)) gsi_insert_seq_before (gsi_p, pre, GSI_SAME_STMT); if (post_stmt) gsi_insert_after (gsi_p, post_stmt, GSI_NEW_STMT); pop_gimplify_context (NULL); }
unsigned int execute_fixup_cfg (void) { basic_block bb; gimple_stmt_iterator gsi; int todo = gimple_in_ssa_p (cfun) ? TODO_verify_ssa : 0; gcov_type count_scale; edge e; edge_iterator ei; if (ENTRY_BLOCK_PTR->count) count_scale = (cgraph_node (current_function_decl)->count * REG_BR_PROB_BASE + ENTRY_BLOCK_PTR->count / 2) / ENTRY_BLOCK_PTR->count; else count_scale = REG_BR_PROB_BASE; ENTRY_BLOCK_PTR->count = cgraph_node (current_function_decl)->count; EXIT_BLOCK_PTR->count = (EXIT_BLOCK_PTR->count * count_scale + REG_BR_PROB_BASE / 2) / REG_BR_PROB_BASE; FOR_EACH_BB (bb) { bb->count = (bb->count * count_scale + REG_BR_PROB_BASE / 2) / REG_BR_PROB_BASE; for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple stmt = gsi_stmt (gsi); tree decl = is_gimple_call (stmt) ? gimple_call_fndecl (stmt) : NULL; if (decl && gimple_call_flags (stmt) & (ECF_CONST | ECF_PURE | ECF_LOOPING_CONST_OR_PURE)) { if (gimple_in_ssa_p (cfun)) { todo |= TODO_update_ssa | TODO_cleanup_cfg; mark_symbols_for_renaming (stmt); update_stmt (stmt); } } maybe_clean_eh_stmt (stmt); } if (gimple_purge_dead_eh_edges (bb)) todo |= TODO_cleanup_cfg; FOR_EACH_EDGE (e, ei, bb->succs) e->count = (e->count * count_scale + REG_BR_PROB_BASE / 2) / REG_BR_PROB_BASE; } if (count_scale != REG_BR_PROB_BASE) compute_function_frequency (); /* Dump a textual representation of the flowgraph. */ if (dump_file) gimple_dump_cfg (dump_file, dump_flags); return todo; }