static tree independent_of_stmt_p (tree expr, gimple at, gimple_stmt_iterator gsi) { basic_block bb, call_bb, at_bb; edge e; edge_iterator ei; if (is_gimple_min_invariant (expr)) return expr; if (TREE_CODE (expr) != SSA_NAME) return NULL_TREE; /* Mark the blocks in the chain leading to the end. */ at_bb = gimple_bb (at); call_bb = gimple_bb (gsi_stmt (gsi)); for (bb = call_bb; bb != at_bb; bb = single_succ (bb)) bb->aux = &bb->aux; bb->aux = &bb->aux; while (1) { at = SSA_NAME_DEF_STMT (expr); bb = gimple_bb (at); /* The default definition or defined before the chain. */ if (!bb || !bb->aux) break; if (bb == call_bb) { for (; !gsi_end_p (gsi); gsi_next (&gsi)) if (gsi_stmt (gsi) == at) break; if (!gsi_end_p (gsi)) expr = NULL_TREE; break; } if (gimple_code (at) != GIMPLE_PHI) { expr = NULL_TREE; break; } FOR_EACH_EDGE (e, ei, bb->preds) if (e->src->aux) break; gcc_assert (e); expr = PHI_ARG_DEF_FROM_EDGE (at, e); if (TREE_CODE (expr) != SSA_NAME) { /* The value is a constant. */ break; } } /* Unmark the blocks. */ for (bb = call_bb; bb != at_bb; bb = single_succ (bb)) bb->aux = NULL; bb->aux = NULL; return expr; }
static bool init_dont_simulate_again (void) { basic_block bb; gimple_stmt_iterator gsi; gimple phi; bool saw_a_complex_op = false; FOR_EACH_BB (bb) { for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { phi = gsi_stmt (gsi); prop_set_simulate_again (phi, is_complex_reg (gimple_phi_result (phi))); } for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple stmt; tree op0, op1; bool sim_again_p; stmt = gsi_stmt (gsi); op0 = op1 = NULL_TREE; /* Most control-altering statements must be initially simulated, else we won't cover the entire cfg. */ sim_again_p = stmt_ends_bb_p (stmt); switch (gimple_code (stmt)) { case GIMPLE_CALL: if (gimple_call_lhs (stmt)) sim_again_p = is_complex_reg (gimple_call_lhs (stmt)); break; case GIMPLE_ASSIGN: sim_again_p = is_complex_reg (gimple_assign_lhs (stmt)); if (gimple_assign_rhs_code (stmt) == REALPART_EXPR || gimple_assign_rhs_code (stmt) == IMAGPART_EXPR) op0 = TREE_OPERAND (gimple_assign_rhs1 (stmt), 0); else op0 = gimple_assign_rhs1 (stmt); if (gimple_num_ops (stmt) > 2) op1 = gimple_assign_rhs2 (stmt); break; case GIMPLE_COND: op0 = gimple_cond_lhs (stmt); op1 = gimple_cond_rhs (stmt); break; default: break; } if (op0 || op1) switch (gimple_expr_code (stmt)) { case EQ_EXPR: case NE_EXPR: case PLUS_EXPR: case MINUS_EXPR: case MULT_EXPR: case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case RDIV_EXPR: if (TREE_CODE (TREE_TYPE (op0)) == COMPLEX_TYPE || TREE_CODE (TREE_TYPE (op1)) == COMPLEX_TYPE) saw_a_complex_op = true; break; case NEGATE_EXPR: case CONJ_EXPR: if (TREE_CODE (TREE_TYPE (op0)) == COMPLEX_TYPE) saw_a_complex_op = true; break; case REALPART_EXPR: case IMAGPART_EXPR: /* The total store transformation performed during gimplification creates such uninitialized loads and we need to lower the statement to be able to fix things up. */ if (TREE_CODE (op0) == SSA_NAME && ssa_undefined_value_p (op0)) saw_a_complex_op = true; break; default: break; } prop_set_simulate_again (stmt, sim_again_p); } } return saw_a_complex_op; }
/* Find memcpy, mempcpy, memmove and memset calls, perform checks before call and then call no_chk version of functions. We do it on O2 to enable inlining of these functions during expand. Also try to find memcpy, mempcpy, memmove and memset calls which are known to not write pointers to memory and use faster function versions for them. */ static void chkp_optimize_string_function_calls (void) { basic_block bb; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Searching for replaceable string function calls...\n"); FOR_EACH_BB_FN (bb, cfun) { gimple_stmt_iterator i; for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i)) { gimple *stmt = gsi_stmt (i); tree fndecl; if (gimple_code (stmt) != GIMPLE_CALL || !gimple_call_with_bounds_p (stmt)) continue; fndecl = gimple_call_fndecl (stmt); if (!fndecl || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL) continue; if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_MEMCPY_CHKP || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_MEMPCPY_CHKP || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_MEMMOVE_CHKP || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_MEMSET_CHKP) { tree dst = gimple_call_arg (stmt, 0); tree dst_bnd = gimple_call_arg (stmt, 1); bool is_memset = DECL_FUNCTION_CODE (fndecl) == BUILT_IN_MEMSET_CHKP; tree size = gimple_call_arg (stmt, is_memset ? 3 : 4); tree fndecl_nochk; gimple_stmt_iterator j; basic_block check_bb; address_t size_val; int sign; bool known; /* We may replace call with corresponding __chkp_*_nobnd call in case destination pointer base type is not void or pointer. */ if (POINTER_TYPE_P (TREE_TYPE (dst)) && !VOID_TYPE_P (TREE_TYPE (TREE_TYPE (dst))) && !chkp_type_has_pointer (TREE_TYPE (TREE_TYPE (dst)))) { tree fndecl_nobnd = chkp_get_nobnd_fndecl (DECL_FUNCTION_CODE (fndecl)); if (fndecl_nobnd) fndecl = fndecl_nobnd; } fndecl_nochk = chkp_get_nochk_fndecl (DECL_FUNCTION_CODE (fndecl)); if (fndecl_nochk) fndecl = fndecl_nochk; if (fndecl != gimple_call_fndecl (stmt)) { if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Replacing call: "); print_gimple_stmt (dump_file, stmt, 0, TDF_VOPS|TDF_MEMSYMS); } gimple_call_set_fndecl (stmt, fndecl); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "With a new call: "); print_gimple_stmt (dump_file, stmt, 0, TDF_VOPS|TDF_MEMSYMS); } } /* If there is no nochk version of function then do nothing. Otherwise insert checks before the call. */ if (!fndecl_nochk) continue; /* If size passed to call is known and > 0 then we may insert checks unconditionally. */ size_val.pol.create (0); chkp_collect_value (size, size_val); known = chkp_is_constant_addr (size_val, &sign); size_val.pol.release (); /* If we are not sure size is not zero then we have to perform runtime check for size and perform checks only when size is not zero. */ if (!known) { gimple *check = gimple_build_cond (NE_EXPR, size, size_zero_node, NULL_TREE, NULL_TREE); /* Split block before string function call. */ gsi_prev (&i); check_bb = insert_cond_bb (bb, gsi_stmt (i), check); /* Set position for checks. */ j = gsi_last_bb (check_bb); /* The block was splitted and therefore we need to set iterator to its end. */ i = gsi_last_bb (bb); } /* If size is known to be zero then no checks should be performed. */ else if (!sign) continue; else j = i; size = size_binop (MINUS_EXPR, size, size_one_node); if (!is_memset) { tree src = gimple_call_arg (stmt, 2); tree src_bnd = gimple_call_arg (stmt, 3); chkp_check_mem_access (src, fold_build_pointer_plus (src, size), src_bnd, j, gimple_location (stmt), integer_zero_node); } chkp_check_mem_access (dst, fold_build_pointer_plus (dst, size), dst_bnd, j, gimple_location (stmt), integer_one_node); } } }
static bool gimple_find_edge_insert_loc (edge e, gimple_stmt_iterator *gsi, basic_block *new_bb) { basic_block dest, src; gimple tmp; dest = e->dest; /* If the destination has one predecessor which has no PHI nodes, insert there. Except for the exit block. The requirement for no PHI nodes could be relaxed. Basically we would have to examine the PHIs to prove that none of them used the value set by the statement we want to insert on E. That hardly seems worth the effort. */ restart: if (single_pred_p (dest) && gimple_seq_empty_p (phi_nodes (dest)) && dest != EXIT_BLOCK_PTR) { *gsi = gsi_start_bb (dest); if (gsi_end_p (*gsi)) return true; /* Make sure we insert after any leading labels. */ tmp = gsi_stmt (*gsi); while (gimple_code (tmp) == GIMPLE_LABEL) { gsi_next (gsi); if (gsi_end_p (*gsi)) break; tmp = gsi_stmt (*gsi); } if (gsi_end_p (*gsi)) { *gsi = gsi_last_bb (dest); return true; } else return false; } /* If the source has one successor, the edge is not abnormal and the last statement does not end a basic block, insert there. Except for the entry block. */ src = e->src; if ((e->flags & EDGE_ABNORMAL) == 0 && single_succ_p (src) && src != ENTRY_BLOCK_PTR) { *gsi = gsi_last_bb (src); if (gsi_end_p (*gsi)) return true; tmp = gsi_stmt (*gsi); if (!stmt_ends_bb_p (tmp)) return true; switch (gimple_code (tmp)) { case GIMPLE_RETURN: case GIMPLE_RESX: return false; default: break; } } /* Otherwise, create a new basic block, and split this edge. */ dest = split_edge (e); if (new_bb) *new_bb = dest; e = single_pred_edge (dest); goto restart; }
static unsigned int rename_ssa_copies (void) { var_map map; basic_block bb; gimple_stmt_iterator gsi; tree var, part_var; gimple stmt, phi; unsigned x; FILE *debug; memset (&stats, 0, sizeof (stats)); if (dump_file && (dump_flags & TDF_DETAILS)) debug = dump_file; else debug = NULL; map = init_var_map (num_ssa_names); FOR_EACH_BB (bb) { /* Scan for real copies. */ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { stmt = gsi_stmt (gsi); if (gimple_assign_ssa_name_copy_p (stmt)) { tree lhs = gimple_assign_lhs (stmt); tree rhs = gimple_assign_rhs1 (stmt); copy_rename_partition_coalesce (map, lhs, rhs, debug); } } } FOR_EACH_BB (bb) { /* Treat PHI nodes as copies between the result and each argument. */ for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { size_t i; tree res; phi = gsi_stmt (gsi); res = gimple_phi_result (phi); /* Do not process virtual SSA_NAMES. */ if (virtual_operand_p (res)) continue; /* Make sure to only use the same partition for an argument as the result but never the other way around. */ if (SSA_NAME_VAR (res) && !DECL_IGNORED_P (SSA_NAME_VAR (res))) for (i = 0; i < gimple_phi_num_args (phi); i++) { tree arg = PHI_ARG_DEF (phi, i); if (TREE_CODE (arg) == SSA_NAME) copy_rename_partition_coalesce (map, res, arg, debug); } /* Else if all arguments are in the same partition try to merge it with the result. */ else { int all_p_same = -1; int p = -1; for (i = 0; i < gimple_phi_num_args (phi); i++) { tree arg = PHI_ARG_DEF (phi, i); if (TREE_CODE (arg) != SSA_NAME) { all_p_same = 0; break; } else if (all_p_same == -1) { p = partition_find (map->var_partition, SSA_NAME_VERSION (arg)); all_p_same = 1; } else if (all_p_same == 1 && p != partition_find (map->var_partition, SSA_NAME_VERSION (arg))) { all_p_same = 0; break; } } if (all_p_same == 1) copy_rename_partition_coalesce (map, res, PHI_ARG_DEF (phi, 0), debug); } } } if (debug) dump_var_map (debug, map); /* Now one more pass to make all elements of a partition share the same root variable. */ for (x = 1; x < num_ssa_names; x++) { part_var = partition_to_var (map, x); if (!part_var) continue; var = ssa_name (x); if (SSA_NAME_VAR (var) == SSA_NAME_VAR (part_var)) continue; if (debug) { fprintf (debug, "Coalesced "); print_generic_expr (debug, var, TDF_SLIM); fprintf (debug, " to "); print_generic_expr (debug, part_var, TDF_SLIM); fprintf (debug, "\n"); } stats.coalesced++; replace_ssa_name_symbol (var, SSA_NAME_VAR (part_var)); } statistics_counter_event (cfun, "copies coalesced", stats.coalesced); delete_var_map (map); return 0; }
bool empty_block_p (basic_block bb) { /* BB must have no executable statements. */ return gsi_end_p (gsi_after_labels (bb)); }
static void shrink_wrap_one_built_in_call_with_conds (gcall *bi_call, vec <gimple *> conds, unsigned int nconds) { gimple_stmt_iterator bi_call_bsi; basic_block bi_call_bb, join_tgt_bb, guard_bb; edge join_tgt_in_edge_from_call, join_tgt_in_edge_fall_thru; edge bi_call_in_edge0, guard_bb_in_edge; unsigned tn_cond_stmts; unsigned ci; gimple *cond_expr = NULL; gimple *cond_expr_start; /* The cfg we want to create looks like this: [guard n-1] <- guard_bb (old block) | \ | [guard n-2] } | / \ } | / ... } new blocks | / [guard 0] } | / / | } [ call ] | <- bi_call_bb } | \ | | \ | | [ join ] <- join_tgt_bb (old iff call must end bb) | possible EH edges (only if [join] is old) When [join] is new, the immediate dominators for these blocks are: 1. [guard n-1]: unchanged 2. [call]: [guard n-1] 3. [guard m]: [guard m+1] for 0 <= m <= n-2 4. [join]: [guard n-1] We punt for the more complex case case of [join] being old and simply free the dominance info. We also punt on postdominators, which aren't expected to be available at this point anyway. */ bi_call_bb = gimple_bb (bi_call); /* Now find the join target bb -- split bi_call_bb if needed. */ if (stmt_ends_bb_p (bi_call)) { /* We checked that there was a fallthrough edge in can_guard_call_p. */ join_tgt_in_edge_from_call = find_fallthru_edge (bi_call_bb->succs); gcc_assert (join_tgt_in_edge_from_call); /* We don't want to handle PHIs. */ if (EDGE_COUNT (join_tgt_in_edge_from_call->dest->preds) > 1) join_tgt_bb = split_edge (join_tgt_in_edge_from_call); else { join_tgt_bb = join_tgt_in_edge_from_call->dest; /* We may have degenerate PHIs in the destination. Propagate those out. */ for (gphi_iterator i = gsi_start_phis (join_tgt_bb); !gsi_end_p (i);) { gphi *phi = i.phi (); replace_uses_by (gimple_phi_result (phi), gimple_phi_arg_def (phi, 0)); remove_phi_node (&i, true); } } } else { join_tgt_in_edge_from_call = split_block (bi_call_bb, bi_call); join_tgt_bb = join_tgt_in_edge_from_call->dest; } bi_call_bsi = gsi_for_stmt (bi_call); /* Now it is time to insert the first conditional expression into bi_call_bb and split this bb so that bi_call is shrink-wrapped. */ tn_cond_stmts = conds.length (); cond_expr = NULL; cond_expr_start = conds[0]; for (ci = 0; ci < tn_cond_stmts; ci++) { gimple *c = conds[ci]; gcc_assert (c || ci != 0); if (!c) break; gsi_insert_before (&bi_call_bsi, c, GSI_SAME_STMT); cond_expr = c; } ci++; gcc_assert (cond_expr && gimple_code (cond_expr) == GIMPLE_COND); typedef std::pair<edge, edge> edge_pair; auto_vec<edge_pair, 8> edges; bi_call_in_edge0 = split_block (bi_call_bb, cond_expr); bi_call_in_edge0->flags &= ~EDGE_FALLTHRU; bi_call_in_edge0->flags |= EDGE_FALSE_VALUE; guard_bb = bi_call_bb; bi_call_bb = bi_call_in_edge0->dest; join_tgt_in_edge_fall_thru = make_edge (guard_bb, join_tgt_bb, EDGE_TRUE_VALUE); edges.reserve (nconds); edges.quick_push (edge_pair (bi_call_in_edge0, join_tgt_in_edge_fall_thru)); /* Code generation for the rest of the conditions */ for (unsigned int i = 1; i < nconds; ++i) { unsigned ci0; edge bi_call_in_edge; gimple_stmt_iterator guard_bsi = gsi_for_stmt (cond_expr_start); ci0 = ci; cond_expr_start = conds[ci0]; for (; ci < tn_cond_stmts; ci++) { gimple *c = conds[ci]; gcc_assert (c || ci != ci0); if (!c) break; gsi_insert_before (&guard_bsi, c, GSI_SAME_STMT); cond_expr = c; } ci++; gcc_assert (cond_expr && gimple_code (cond_expr) == GIMPLE_COND); guard_bb_in_edge = split_block (guard_bb, cond_expr); guard_bb_in_edge->flags &= ~EDGE_FALLTHRU; guard_bb_in_edge->flags |= EDGE_TRUE_VALUE; bi_call_in_edge = make_edge (guard_bb, bi_call_bb, EDGE_FALSE_VALUE); edges.quick_push (edge_pair (bi_call_in_edge, guard_bb_in_edge)); } /* Now update the probability and profile information, processing the guards in order of execution. There are two approaches we could take here. On the one hand we could assign a probability of X to the call block and distribute that probability among its incoming edges. On the other hand we could assign a probability of X to each individual call edge. The choice only affects calls that have more than one condition. In those cases, the second approach would give the call block a greater probability than the first. However, the difference is only small, and our chosen X is a pure guess anyway. Here we take the second approach because it's slightly simpler and because it's easy to see that it doesn't lose profile counts. */ bi_call_bb->count = profile_count::zero (); while (!edges.is_empty ()) { edge_pair e = edges.pop (); edge call_edge = e.first; edge nocall_edge = e.second; basic_block src_bb = call_edge->src; gcc_assert (src_bb == nocall_edge->src); call_edge->probability = profile_probability::very_unlikely (); nocall_edge->probability = profile_probability::always () - call_edge->probability; bi_call_bb->count += call_edge->count (); if (nocall_edge->dest != join_tgt_bb) nocall_edge->dest->count = src_bb->count - bi_call_bb->count; } if (dom_info_available_p (CDI_DOMINATORS)) { /* The split_blocks leave [guard 0] as the immediate dominator of [call] and [call] as the immediate dominator of [join]. Fix them up. */ set_immediate_dominator (CDI_DOMINATORS, bi_call_bb, guard_bb); set_immediate_dominator (CDI_DOMINATORS, join_tgt_bb, guard_bb); } if (dump_file && (dump_flags & TDF_DETAILS)) { location_t loc; loc = gimple_location (bi_call); fprintf (dump_file, "%s:%d: note: function call is shrink-wrapped" " into error conditions.\n", LOCATION_FILE (loc), LOCATION_LINE (loc)); } }
/* Synthesize a CALL_EXPR and a TRY_FINALLY_EXPR, for this chain of _DECLs if appropriate. Arrange to call the __mf_register function now, and the __mf_unregister function later for each. Return the gimple sequence after synthesis. */ gimple_seq mx_register_decls (tree decl, gimple_seq seq, location_t location) { gimple_seq finally_stmts = NULL; gimple_stmt_iterator initially_stmts = gsi_start (seq); while (decl != NULL_TREE) { if (mf_decl_eligible_p (decl) /* Not already processed. */ && ! mf_marked_p (decl) /* Automatic variable. */ && ! DECL_EXTERNAL (decl) && ! TREE_STATIC (decl)) { tree size = NULL_TREE, variable_name; gimple unregister_fncall, register_fncall; tree unregister_fncall_param, register_fncall_param; /* Variable-sized objects should have sizes already been gimplified when we got here. */ size = fold_convert (size_type_node, TYPE_SIZE_UNIT (TREE_TYPE (decl))); gcc_assert (is_gimple_val (size)); unregister_fncall_param = mf_mark (build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (decl)), decl)); /* __mf_unregister (&VARIABLE, sizeof (VARIABLE), __MF_TYPE_STACK) */ unregister_fncall = gimple_build_call (mf_unregister_fndecl, 3, unregister_fncall_param, size, integer_three_node); variable_name = mf_varname_tree (decl); register_fncall_param = mf_mark (build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (decl)), decl)); /* __mf_register (&VARIABLE, sizeof (VARIABLE), __MF_TYPE_STACK, "name") */ register_fncall = gimple_build_call (mf_register_fndecl, 4, register_fncall_param, size, integer_three_node, variable_name); /* Accumulate the two calls. */ gimple_set_location (register_fncall, location); gimple_set_location (unregister_fncall, location); /* Add the __mf_register call at the current appending point. */ if (gsi_end_p (initially_stmts)) { if (!mf_artificial (decl)) warning (OPT_Wmudflap, "mudflap cannot track %qE in stub function", DECL_NAME (decl)); } else { gsi_insert_before (&initially_stmts, register_fncall, GSI_SAME_STMT); /* Accumulate the FINALLY piece. */ gimple_seq_add_stmt (&finally_stmts, unregister_fncall); } mf_mark (decl); } decl = DECL_CHAIN (decl); } /* Actually, (initially_stmts!=NULL) <=> (finally_stmts!=NULL) */ if (finally_stmts != NULL) { gimple stmt = gimple_build_try (seq, finally_stmts, GIMPLE_TRY_FINALLY); gimple_seq new_seq = NULL; gimple_seq_add_stmt (&new_seq, stmt); return new_seq; } else return seq; }
static void mf_build_check_statement_for (tree base, tree limit, gimple_stmt_iterator *instr_gsi, location_t location, tree dirflag) { gimple_stmt_iterator gsi; basic_block cond_bb, then_bb, join_bb; edge e; tree cond, t, u, v; tree mf_base; tree mf_elem; tree mf_limit; gimple g; gimple_seq seq, stmts; /* We first need to split the current basic block, and start altering the CFG. This allows us to insert the statements we're about to construct into the right basic blocks. */ cond_bb = gimple_bb (gsi_stmt (*instr_gsi)); gsi = *instr_gsi; gsi_prev (&gsi); if (! gsi_end_p (gsi)) e = split_block (cond_bb, gsi_stmt (gsi)); else e = split_block_after_labels (cond_bb); cond_bb = e->src; join_bb = e->dest; /* A recap at this point: join_bb is the basic block at whose head is the gimple statement for which this check expression is being built. cond_bb is the (possibly new, synthetic) basic block the end of which will contain the cache-lookup code, and a conditional that jumps to the cache-miss code or, much more likely, over to join_bb. */ /* Create the bb that contains the cache-miss fallback block (mf_check). */ then_bb = create_empty_bb (cond_bb); make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE); make_single_succ_edge (then_bb, join_bb, EDGE_FALLTHRU); /* Mark the pseudo-fallthrough edge from cond_bb to join_bb. */ e = find_edge (cond_bb, join_bb); e->flags = EDGE_FALSE_VALUE; e->count = cond_bb->count; e->probability = REG_BR_PROB_BASE; /* Update dominance info. Note that bb_join's data was updated by split_block. */ if (dom_info_available_p (CDI_DOMINATORS)) { set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb); set_immediate_dominator (CDI_DOMINATORS, join_bb, cond_bb); } /* Update loop info. */ if (current_loops) add_bb_to_loop (then_bb, cond_bb->loop_father); /* Build our local variables. */ mf_elem = create_tmp_reg (mf_cache_structptr_type, "__mf_elem"); mf_base = create_tmp_reg (mf_uintptr_type, "__mf_base"); mf_limit = create_tmp_reg (mf_uintptr_type, "__mf_limit"); /* Build: __mf_base = (uintptr_t) <base address expression>. */ seq = NULL; t = fold_convert_loc (location, mf_uintptr_type, unshare_expr (base)); t = force_gimple_operand (t, &stmts, false, NULL_TREE); gimple_seq_add_seq (&seq, stmts); g = gimple_build_assign (mf_base, t); gimple_set_location (g, location); gimple_seq_add_stmt (&seq, g); /* Build: __mf_limit = (uintptr_t) <limit address expression>. */ t = fold_convert_loc (location, mf_uintptr_type, unshare_expr (limit)); t = force_gimple_operand (t, &stmts, false, NULL_TREE); gimple_seq_add_seq (&seq, stmts); g = gimple_build_assign (mf_limit, t); gimple_set_location (g, location); gimple_seq_add_stmt (&seq, g); /* Build: __mf_elem = &__mf_lookup_cache [(__mf_base >> __mf_shift) & __mf_mask]. */ t = build2 (RSHIFT_EXPR, mf_uintptr_type, mf_base, flag_mudflap_threads ? mf_cache_shift_decl : mf_cache_shift_decl_l); t = build2 (BIT_AND_EXPR, mf_uintptr_type, t, flag_mudflap_threads ? mf_cache_mask_decl : mf_cache_mask_decl_l); t = build4 (ARRAY_REF, TREE_TYPE (TREE_TYPE (mf_cache_array_decl)), mf_cache_array_decl, t, NULL_TREE, NULL_TREE); t = build1 (ADDR_EXPR, mf_cache_structptr_type, t); t = force_gimple_operand (t, &stmts, false, NULL_TREE); gimple_seq_add_seq (&seq, stmts); g = gimple_build_assign (mf_elem, t); gimple_set_location (g, location); gimple_seq_add_stmt (&seq, g); /* Quick validity check. if (__mf_elem->low > __mf_base || (__mf_elem_high < __mf_limit)) { __mf_check (); ... and only if single-threaded: __mf_lookup_shift_1 = f...; __mf_lookup_mask_l = ...; } It is expected that this body of code is rarely executed so we mark the edge to the THEN clause of the conditional jump as unlikely. */ /* Construct t <-- '__mf_elem->low > __mf_base'. */ t = build3 (COMPONENT_REF, mf_uintptr_type, build1 (INDIRECT_REF, mf_cache_struct_type, mf_elem), TYPE_FIELDS (mf_cache_struct_type), NULL_TREE); t = build2 (GT_EXPR, boolean_type_node, t, mf_base); /* Construct '__mf_elem->high < __mf_limit'. First build: 1) u <-- '__mf_elem->high' 2) v <-- '__mf_limit'. Then build 'u <-- (u < v). */ u = build3 (COMPONENT_REF, mf_uintptr_type, build1 (INDIRECT_REF, mf_cache_struct_type, mf_elem), DECL_CHAIN (TYPE_FIELDS (mf_cache_struct_type)), NULL_TREE); v = mf_limit; u = build2 (LT_EXPR, boolean_type_node, u, v); /* Build the composed conditional: t <-- 't || u'. Then store the result of the evaluation of 't' in a temporary variable which we can use as the condition for the conditional jump. */ t = build2 (TRUTH_OR_EXPR, boolean_type_node, t, u); t = force_gimple_operand (t, &stmts, false, NULL_TREE); gimple_seq_add_seq (&seq, stmts); cond = create_tmp_reg (boolean_type_node, "__mf_unlikely_cond"); g = gimple_build_assign (cond, t); gimple_set_location (g, location); gimple_seq_add_stmt (&seq, g); /* Build the conditional jump. 'cond' is just a temporary so we can simply build a void COND_EXPR. We do need labels in both arms though. */ g = gimple_build_cond (NE_EXPR, cond, boolean_false_node, NULL_TREE, NULL_TREE); gimple_set_location (g, location); gimple_seq_add_stmt (&seq, g); /* At this point, after so much hard work, we have only constructed the conditional jump, if (__mf_elem->low > __mf_base || (__mf_elem_high < __mf_limit)) The lowered GIMPLE tree representing this code is in the statement list starting at 'head'. We can insert this now in the current basic block, i.e. the one that the statement we're instrumenting was originally in. */ gsi = gsi_last_bb (cond_bb); gsi_insert_seq_after (&gsi, seq, GSI_CONTINUE_LINKING); /* Now build up the body of the cache-miss handling: __mf_check(); refresh *_l vars. This is the body of the conditional. */ seq = NULL; /* u is a string, so it is already a gimple value. */ u = mf_file_function_line_tree (location); /* NB: we pass the overall [base..limit] range to mf_check. */ v = fold_build2_loc (location, PLUS_EXPR, mf_uintptr_type, fold_build2_loc (location, MINUS_EXPR, mf_uintptr_type, mf_limit, mf_base), build_int_cst (mf_uintptr_type, 1)); v = force_gimple_operand (v, &stmts, true, NULL_TREE); gimple_seq_add_seq (&seq, stmts); g = gimple_build_call (mf_check_fndecl, 4, mf_base, v, dirflag, u); gimple_seq_add_stmt (&seq, g); if (! flag_mudflap_threads) { if (stmt_ends_bb_p (g)) { gsi = gsi_start_bb (then_bb); gsi_insert_seq_after (&gsi, seq, GSI_CONTINUE_LINKING); e = split_block (then_bb, g); then_bb = e->dest; seq = NULL; } g = gimple_build_assign (mf_cache_shift_decl_l, mf_cache_shift_decl); gimple_seq_add_stmt (&seq, g); g = gimple_build_assign (mf_cache_mask_decl_l, mf_cache_mask_decl); gimple_seq_add_stmt (&seq, g); } /* Insert the check code in the THEN block. */ gsi = gsi_start_bb (then_bb); gsi_insert_seq_after (&gsi, seq, GSI_CONTINUE_LINKING); *instr_gsi = gsi_start_bb (join_bb); }
static unsigned int tree_profiling (void) { struct cgraph_node *node; /* Don't profile functions produced at destruction time, particularly the gcov datastructure initializer. Don't profile if it has been already instrumented either (when OpenMP expansion creates child function from already instrumented body). */ if (cgraph_state == CGRAPH_STATE_FINISHED) return 0; init_node_map(); FOR_EACH_DEFINED_FUNCTION (node) { if (!gimple_has_body_p (node->symbol.decl)) continue; /* Don't profile functions produced for builtin stuff. */ if (DECL_SOURCE_LOCATION (node->symbol.decl) == BUILTINS_LOCATION || DECL_STRUCT_FUNCTION (node->symbol.decl)->after_tree_profile) continue; push_cfun (DECL_STRUCT_FUNCTION (node->symbol.decl)); current_function_decl = node->symbol.decl; /* Re-set global shared temporary variable for edge-counters. */ gcov_type_tmp_var = NULL_TREE; /* Local pure-const may imply need to fixup the cfg. */ if (execute_fixup_cfg () & TODO_cleanup_cfg) cleanup_tree_cfg (); branch_prob (); if (! flag_branch_probabilities && flag_profile_values) gimple_gen_ic_func_profiler (); if (flag_branch_probabilities && flag_profile_values && flag_value_profile_transformations) gimple_value_profile_transformations (); /* The above could hose dominator info. Currently there is none coming in, this is a safety valve. It should be easy to adjust it, if and when there is some. */ free_dominance_info (CDI_DOMINATORS); free_dominance_info (CDI_POST_DOMINATORS); current_function_decl = NULL; pop_cfun (); } /* Drop pure/const flags from instrumented functions. */ FOR_EACH_DEFINED_FUNCTION (node) { if (!gimple_has_body_p (node->symbol.decl) || !(!node->clone_of || node->symbol.decl != node->clone_of->symbol.decl)) continue; /* Don't profile functions produced for builtin stuff. */ if (DECL_SOURCE_LOCATION (node->symbol.decl) == BUILTINS_LOCATION || DECL_STRUCT_FUNCTION (node->symbol.decl)->after_tree_profile) continue; cgraph_set_const_flag (node, false, false); cgraph_set_pure_flag (node, false, false); } /* Update call statements and rebuild the cgraph. */ FOR_EACH_DEFINED_FUNCTION (node) { basic_block bb; if (!gimple_has_body_p (node->symbol.decl) || !(!node->clone_of || node->symbol.decl != node->clone_of->symbol.decl)) continue; /* Don't profile functions produced for builtin stuff. */ if (DECL_SOURCE_LOCATION (node->symbol.decl) == BUILTINS_LOCATION || DECL_STRUCT_FUNCTION (node->symbol.decl)->after_tree_profile) continue; push_cfun (DECL_STRUCT_FUNCTION (node->symbol.decl)); current_function_decl = node->symbol.decl; FOR_EACH_BB (bb) { gimple_stmt_iterator gsi; for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple stmt = gsi_stmt (gsi); if (is_gimple_call (stmt)) update_stmt (stmt); } } cfun->after_tree_profile = 1; update_ssa (TODO_update_ssa); rebuild_cgraph_edges (); current_function_decl = NULL; pop_cfun (); } del_node_map(); return 0; }
static bool tree_estimate_loop_size (struct loop *loop, edge exit, edge edge_to_cancel, struct loop_size *size, int upper_bound) { basic_block *body = get_loop_body (loop); gimple_stmt_iterator gsi; unsigned int i; bool after_exit; vec<basic_block> path = get_loop_hot_path (loop); size->overall = 0; size->eliminated_by_peeling = 0; size->last_iteration = 0; size->last_iteration_eliminated_by_peeling = 0; size->num_pure_calls_on_hot_path = 0; size->num_non_pure_calls_on_hot_path = 0; size->non_call_stmts_on_hot_path = 0; size->num_branches_on_hot_path = 0; size->constant_iv = 0; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Estimating sizes for loop %i\n", loop->num); for (i = 0; i < loop->num_nodes; i++) { if (edge_to_cancel && body[i] != edge_to_cancel->src && dominated_by_p (CDI_DOMINATORS, body[i], edge_to_cancel->src)) after_exit = true; else after_exit = false; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " BB: %i, after_exit: %i\n", body[i]->index, after_exit); for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple stmt = gsi_stmt (gsi); int num = estimate_num_insns (stmt, &eni_size_weights); bool likely_eliminated = false; bool likely_eliminated_last = false; bool likely_eliminated_peeled = false; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, " size: %3i ", num); print_gimple_stmt (dump_file, gsi_stmt (gsi), 0, 0); } /* Look for reasons why we might optimize this stmt away. */ if (gimple_has_side_effects (stmt)) ; /* Exit conditional. */ else if (exit && body[i] == exit->src && stmt == last_stmt (exit->src)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Exit condition will be eliminated " "in peeled copies.\n"); likely_eliminated_peeled = true; } else if (edge_to_cancel && body[i] == edge_to_cancel->src && stmt == last_stmt (edge_to_cancel->src)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Exit condition will be eliminated " "in last copy.\n"); likely_eliminated_last = true; } /* Sets of IV variables */ else if (gimple_code (stmt) == GIMPLE_ASSIGN && constant_after_peeling (gimple_assign_lhs (stmt), stmt, loop)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Induction variable computation will" " be folded away.\n"); likely_eliminated = true; } /* Assignments of IV variables. */ else if (gimple_code (stmt) == GIMPLE_ASSIGN && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME && constant_after_peeling (gimple_assign_rhs1 (stmt), stmt, loop) && (gimple_assign_rhs_class (stmt) != GIMPLE_BINARY_RHS || constant_after_peeling (gimple_assign_rhs2 (stmt), stmt, loop))) { size->constant_iv = true; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Constant expression will be folded away.\n"); likely_eliminated = true; } /* Conditionals. */ else if ((gimple_code (stmt) == GIMPLE_COND && constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop) && constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop)) || (gimple_code (stmt) == GIMPLE_SWITCH && constant_after_peeling (gimple_switch_index ( as_a <gswitch *> (stmt)), stmt, loop))) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Constant conditional.\n"); likely_eliminated = true; } size->overall += num; if (likely_eliminated || likely_eliminated_peeled) size->eliminated_by_peeling += num; if (!after_exit) { size->last_iteration += num; if (likely_eliminated || likely_eliminated_last) size->last_iteration_eliminated_by_peeling += num; } if ((size->overall * 3 / 2 - size->eliminated_by_peeling - size->last_iteration_eliminated_by_peeling) > upper_bound) { free (body); path.release (); return true; } } } while (path.length ()) { basic_block bb = path.pop (); for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple stmt = gsi_stmt (gsi); if (gimple_code (stmt) == GIMPLE_CALL) { int flags = gimple_call_flags (stmt); tree decl = gimple_call_fndecl (stmt); if (decl && DECL_IS_BUILTIN (decl) && is_inexpensive_builtin (decl)) ; else if (flags & (ECF_PURE | ECF_CONST)) size->num_pure_calls_on_hot_path++; else size->num_non_pure_calls_on_hot_path++; size->num_branches_on_hot_path ++; } else if (gimple_code (stmt) != GIMPLE_CALL && gimple_code (stmt) != GIMPLE_DEBUG) size->non_call_stmts_on_hot_path++; if (((gimple_code (stmt) == GIMPLE_COND && (!constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop) || constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop))) || (gimple_code (stmt) == GIMPLE_SWITCH && !constant_after_peeling (gimple_switch_index ( as_a <gswitch *> (stmt)), stmt, loop))) && (!exit || bb != exit->src)) size->num_branches_on_hot_path++; } } path.release (); if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "size: %i-%i, last_iteration: %i-%i\n", size->overall, size->eliminated_by_peeling, size->last_iteration, size->last_iteration_eliminated_by_peeling); free (body); return false; }
static void build_constructors (gimple swtch) { unsigned i, branch_num = gimple_switch_num_labels (swtch); tree pos = info.range_min; for (i = 1; i < branch_num; i++) { tree cs = gimple_switch_label (swtch, i); basic_block bb = label_to_block (CASE_LABEL (cs)); edge e; tree high; gimple_stmt_iterator gsi; int j; if (bb == info.final_bb) e = find_edge (info.switch_bb, bb); else e = single_succ_edge (bb); gcc_assert (e); while (tree_int_cst_lt (pos, CASE_LOW (cs))) { int k; for (k = 0; k < info.phi_count; k++) { constructor_elt *elt; elt = VEC_quick_push (constructor_elt, info.constructors[k], NULL); elt->index = int_const_binop (MINUS_EXPR, pos, info.range_min); elt->value = info.default_values[k]; } pos = int_const_binop (PLUS_EXPR, pos, integer_one_node); } gcc_assert (tree_int_cst_equal (pos, CASE_LOW (cs))); j = 0; if (CASE_HIGH (cs)) high = CASE_HIGH (cs); else high = CASE_LOW (cs); for (gsi = gsi_start_phis (info.final_bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple phi = gsi_stmt (gsi); tree val = PHI_ARG_DEF_FROM_EDGE (phi, e); tree low = CASE_LOW (cs); pos = CASE_LOW (cs); do { constructor_elt *elt; elt = VEC_quick_push (constructor_elt, info.constructors[j], NULL); elt->index = int_const_binop (MINUS_EXPR, pos, info.range_min); elt->value = val; pos = int_const_binop (PLUS_EXPR, pos, integer_one_node); } while (!tree_int_cst_lt (high, pos) && tree_int_cst_lt (low, pos)); j++; } } }
static unsigned int on_execute_pass(void) { basic_block bb; gimple_stmt_iterator gsi; const char* name; const char* file = EXPR_FILENAME(cfun->decl); const unsigned int line = EXPR_LINENO(cfun->decl); TRACE(); if (DECL_ASSEMBLER_NAME(cfun->decl) == NULL) { printf("--- skipping anonymous function\n"); return 0; } name = IDENTIFIER_POINTER(DECL_ASSEMBLER_NAME(cfun->decl)); #if 0 /* debug */ printf("--- passing on function: %s\n", name); #endif track_pragmed_func(file, line, name); FOR_EACH_BB(bb) { for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) { const_gimple stmt = gsi_stmt(gsi); const enum gimple_code code = gimple_code(stmt); if (code == GIMPLE_CALL) { const char* const name = get_called_name(stmt); const tracked_func_t* const tf = find_tracked_func(name); printf("CALL%s: %s()\n", tf ? "_TASK" : "", name); if (tf != NULL) handle_task_call(gsi, tf); } #if 0 /* debug */ if (gimple_has_location(stmt)) { const location_t loc = gimple_location(stmt); const char* type = "STMT"; if (code == GIMPLE_CALL) printf ( "%s locus: .%s/%u.\n", type, LOCATION_FILE(loc), LOCATION_LINE(loc) ); } #endif /* debug */ } } return 0; }
static unsigned int rename_ssa_copies (void) { var_map map; basic_block bb; gimple_stmt_iterator gsi; tree var, part_var; gimple stmt, phi; unsigned x; FILE *debug; bool updated = false; if (dump_file && (dump_flags & TDF_DETAILS)) debug = dump_file; else debug = NULL; map = init_var_map (num_ssa_names); FOR_EACH_BB (bb) { /* Scan for real copies. */ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { stmt = gsi_stmt (gsi); if (gimple_assign_ssa_name_copy_p (stmt)) { tree lhs = gimple_assign_lhs (stmt); tree rhs = gimple_assign_rhs1 (stmt); updated |= copy_rename_partition_coalesce (map, lhs, rhs, debug); } } } FOR_EACH_BB (bb) { /* Treat PHI nodes as copies between the result and each argument. */ for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { size_t i; tree res; phi = gsi_stmt (gsi); res = gimple_phi_result (phi); /* Do not process virtual SSA_NAMES. */ if (!is_gimple_reg (SSA_NAME_VAR (res))) continue; for (i = 0; i < gimple_phi_num_args (phi); i++) { tree arg = gimple_phi_arg (phi, i)->def; if (TREE_CODE (arg) == SSA_NAME) updated |= copy_rename_partition_coalesce (map, res, arg, debug); } } } if (debug) dump_var_map (debug, map); /* Now one more pass to make all elements of a partition share the same root variable. */ for (x = 1; x < num_ssa_names; x++) { part_var = partition_to_var (map, x); if (!part_var) continue; var = ssa_name (x); if (debug) { if (SSA_NAME_VAR (var) != SSA_NAME_VAR (part_var)) { fprintf (debug, "Coalesced "); print_generic_expr (debug, var, TDF_SLIM); fprintf (debug, " to "); print_generic_expr (debug, part_var, TDF_SLIM); fprintf (debug, "\n"); } } replace_ssa_name_symbol (var, SSA_NAME_VAR (part_var)); } delete_var_map (map); return updated ? TODO_remove_unused_locals : 0; }
static void find_tail_calls (basic_block bb, struct tailcall **ret) { tree ass_var = NULL_TREE, ret_var, func, param; gimple stmt, call = NULL; gimple_stmt_iterator gsi, agsi; bool tail_recursion; struct tailcall *nw; edge e; tree m, a; basic_block abb; size_t idx; tree var; if (!single_succ_p (bb)) return; for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi)) { stmt = gsi_stmt (gsi); /* Ignore labels, returns, clobbers and debug stmts. */ if (gimple_code (stmt) == GIMPLE_LABEL || gimple_code (stmt) == GIMPLE_RETURN || gimple_clobber_p (stmt) || is_gimple_debug (stmt)) continue; /* Check for a call. */ if (is_gimple_call (stmt)) { call = stmt; ass_var = gimple_call_lhs (stmt); break; } /* If the statement references memory or volatile operands, fail. */ if (gimple_references_memory_p (stmt) || gimple_has_volatile_ops (stmt)) return; } if (gsi_end_p (gsi)) { edge_iterator ei; /* Recurse to the predecessors. */ FOR_EACH_EDGE (e, ei, bb->preds) find_tail_calls (e->src, ret); return; } /* If the LHS of our call is not just a simple register, we can't transform this into a tail or sibling call. This situation happens, in (e.g.) "*p = foo()" where foo returns a struct. In this case we won't have a temporary here, but we need to carry out the side effect anyway, so tailcall is impossible. ??? In some situations (when the struct is returned in memory via invisible argument) we could deal with this, e.g. by passing 'p' itself as that argument to foo, but it's too early to do this here, and expand_call() will not handle it anyway. If it ever can, then we need to revisit this here, to allow that situation. */ if (ass_var && !is_gimple_reg (ass_var)) return; /* We found the call, check whether it is suitable. */ tail_recursion = false; func = gimple_call_fndecl (call); if (func && !DECL_BUILT_IN (func) && recursive_call_p (current_function_decl, func)) { tree arg; for (param = DECL_ARGUMENTS (func), idx = 0; param && idx < gimple_call_num_args (call); param = DECL_CHAIN (param), idx ++) { arg = gimple_call_arg (call, idx); if (param != arg) { /* Make sure there are no problems with copying. The parameter have a copyable type and the two arguments must have reasonably equivalent types. The latter requirement could be relaxed if we emitted a suitable type conversion statement. */ if (!is_gimple_reg_type (TREE_TYPE (param)) || !useless_type_conversion_p (TREE_TYPE (param), TREE_TYPE (arg))) break; /* The parameter should be a real operand, so that phi node created for it at the start of the function has the meaning of copying the value. This test implies is_gimple_reg_type from the previous condition, however this one could be relaxed by being more careful with copying the new value of the parameter (emitting appropriate GIMPLE_ASSIGN and updating the virtual operands). */ if (!is_gimple_reg (param)) break; } } if (idx == gimple_call_num_args (call) && !param) tail_recursion = true; } /* Make sure the tail invocation of this function does not refer to local variables. */ FOR_EACH_LOCAL_DECL (cfun, idx, var) { if (TREE_CODE (var) != PARM_DECL && auto_var_in_fn_p (var, cfun->decl) && (ref_maybe_used_by_stmt_p (call, var) || call_may_clobber_ref_p (call, var))) return; } /* Now check the statements after the call. None of them has virtual operands, so they may only depend on the call through its return value. The return value should also be dependent on each of them, since we are running after dce. */ m = NULL_TREE; a = NULL_TREE; abb = bb; agsi = gsi; while (1) { tree tmp_a = NULL_TREE; tree tmp_m = NULL_TREE; gsi_next (&agsi); while (gsi_end_p (agsi)) { ass_var = propagate_through_phis (ass_var, single_succ_edge (abb)); abb = single_succ (abb); agsi = gsi_start_bb (abb); } stmt = gsi_stmt (agsi); if (gimple_code (stmt) == GIMPLE_LABEL) continue; if (gimple_code (stmt) == GIMPLE_RETURN) break; if (gimple_clobber_p (stmt)) continue; if (is_gimple_debug (stmt)) continue; if (gimple_code (stmt) != GIMPLE_ASSIGN) return; /* This is a gimple assign. */ if (! process_assignment (stmt, gsi, &tmp_m, &tmp_a, &ass_var)) return; if (tmp_a) { tree type = TREE_TYPE (tmp_a); if (a) a = fold_build2 (PLUS_EXPR, type, fold_convert (type, a), tmp_a); else a = tmp_a; } if (tmp_m) { tree type = TREE_TYPE (tmp_m); if (m) m = fold_build2 (MULT_EXPR, type, fold_convert (type, m), tmp_m); else m = tmp_m; if (a) a = fold_build2 (MULT_EXPR, type, fold_convert (type, a), tmp_m); } } /* See if this is a tail call we can handle. */ ret_var = gimple_return_retval (stmt); /* We may proceed if there either is no return value, or the return value is identical to the call's return. */ if (ret_var && (ret_var != ass_var)) return; /* If this is not a tail recursive call, we cannot handle addends or multiplicands. */ if (!tail_recursion && (m || a)) return; /* For pointers only allow additions. */ if (m && POINTER_TYPE_P (TREE_TYPE (DECL_RESULT (current_function_decl)))) return; nw = XNEW (struct tailcall); nw->call_gsi = gsi; nw->tail_recursion = tail_recursion; nw->mult = m; nw->add = a; nw->next = *ret; *ret = nw; }
static void lower_try_catch (gimple_stmt_iterator *gsi, struct lower_data *data) { bool cannot_fallthru; gimple *stmt = gsi_stmt (*gsi); gimple_stmt_iterator i; /* We don't handle GIMPLE_TRY_FINALLY. */ gcc_assert (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH); lower_sequence (gimple_try_eval_ptr (stmt), data); cannot_fallthru = data->cannot_fallthru; i = gsi_start (*gimple_try_cleanup_ptr (stmt)); switch (gimple_code (gsi_stmt (i))) { case GIMPLE_CATCH: /* We expect to see a sequence of GIMPLE_CATCH stmts, each with a catch expression and a body. The whole try/catch may fall through iff any of the catch bodies falls through. */ for (; !gsi_end_p (i); gsi_next (&i)) { data->cannot_fallthru = false; lower_sequence (gimple_catch_handler_ptr ( as_a <gcatch *> (gsi_stmt (i))), data); if (!data->cannot_fallthru) cannot_fallthru = false; } break; case GIMPLE_EH_FILTER: /* The exception filter expression only matters if there is an exception. If the exception does not match EH_FILTER_TYPES, we will execute EH_FILTER_FAILURE, and we will fall through if that falls through. If the exception does match EH_FILTER_TYPES, the stack unwinder will continue up the stack, so we will not fall through. We don't know whether we will throw an exception which matches EH_FILTER_TYPES or not, so we just ignore EH_FILTER_TYPES and assume that we might throw an exception which doesn't match. */ data->cannot_fallthru = false; lower_sequence (gimple_eh_filter_failure_ptr (gsi_stmt (i)), data); if (!data->cannot_fallthru) cannot_fallthru = false; break; case GIMPLE_DEBUG: gcc_checking_assert (gimple_debug_begin_stmt_p (stmt)); break; default: /* This case represents statements to be executed when an exception occurs. Those statements are implicitly followed by a GIMPLE_RESX to resume execution after the exception. So in this case the try/catch never falls through. */ data->cannot_fallthru = false; lower_sequence (gimple_try_cleanup_ptr (stmt), data); break; } data->cannot_fallthru = cannot_fallthru; gsi_next (gsi); }
static void eliminate_tail_call (struct tailcall *t) { tree param, rslt; gimple stmt, call; tree arg; size_t idx; basic_block bb, first; edge e; gimple phi; gimple_stmt_iterator gsi; gimple orig_stmt; stmt = orig_stmt = gsi_stmt (t->call_gsi); bb = gsi_bb (t->call_gsi); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Eliminated tail recursion in bb %d : ", bb->index); print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM); fprintf (dump_file, "\n"); } gcc_assert (is_gimple_call (stmt)); first = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)); /* Remove the code after call_gsi that will become unreachable. The possibly unreachable code in other blocks is removed later in cfg cleanup. */ gsi = t->call_gsi; gsi_next (&gsi); while (!gsi_end_p (gsi)) { gimple t = gsi_stmt (gsi); /* Do not remove the return statement, so that redirect_edge_and_branch sees how the block ends. */ if (gimple_code (t) == GIMPLE_RETURN) break; gsi_remove (&gsi, true); release_defs (t); } /* Number of executions of function has reduced by the tailcall. */ e = single_succ_edge (gsi_bb (t->call_gsi)); decrease_profile (EXIT_BLOCK_PTR_FOR_FN (cfun), e->count, EDGE_FREQUENCY (e)); decrease_profile (ENTRY_BLOCK_PTR_FOR_FN (cfun), e->count, EDGE_FREQUENCY (e)); if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)) decrease_profile (e->dest, e->count, EDGE_FREQUENCY (e)); /* Replace the call by a jump to the start of function. */ e = redirect_edge_and_branch (single_succ_edge (gsi_bb (t->call_gsi)), first); gcc_assert (e); PENDING_STMT (e) = NULL; /* Add phi node entries for arguments. The ordering of the phi nodes should be the same as the ordering of the arguments. */ for (param = DECL_ARGUMENTS (current_function_decl), idx = 0, gsi = gsi_start_phis (first); param; param = DECL_CHAIN (param), idx++) { if (!arg_needs_copy_p (param)) continue; arg = gimple_call_arg (stmt, idx); phi = gsi_stmt (gsi); gcc_assert (param == SSA_NAME_VAR (PHI_RESULT (phi))); add_phi_arg (phi, arg, e, gimple_location (stmt)); gsi_next (&gsi); } /* Update the values of accumulators. */ adjust_accumulator_values (t->call_gsi, t->mult, t->add, e); call = gsi_stmt (t->call_gsi); rslt = gimple_call_lhs (call); if (rslt != NULL_TREE) { /* Result of the call will no longer be defined. So adjust the SSA_NAME_DEF_STMT accordingly. */ SSA_NAME_DEF_STMT (rslt) = gimple_build_nop (); } gsi_remove (&t->call_gsi, true); release_defs (call); }
static unsigned int tree_profiling (void) { struct cgraph_node *node; /* This is a small-ipa pass that gets called only once, from cgraphunit.c:ipa_passes(). */ gcc_assert (cgraph_state == CGRAPH_STATE_IPA_SSA); init_node_map(); FOR_EACH_DEFINED_FUNCTION (node) { if (!gimple_has_body_p (node->symbol.decl)) continue; /* Don't profile functions produced for builtin stuff. */ if (DECL_SOURCE_LOCATION (node->symbol.decl) == BUILTINS_LOCATION) continue; push_cfun (DECL_STRUCT_FUNCTION (node->symbol.decl)); /* Local pure-const may imply need to fixup the cfg. */ if (execute_fixup_cfg () & TODO_cleanup_cfg) cleanup_tree_cfg (); branch_prob (); if (! flag_branch_probabilities && flag_profile_values) gimple_gen_ic_func_profiler (); if (flag_branch_probabilities && flag_profile_values && flag_value_profile_transformations) gimple_value_profile_transformations (); /* The above could hose dominator info. Currently there is none coming in, this is a safety valve. It should be easy to adjust it, if and when there is some. */ free_dominance_info (CDI_DOMINATORS); free_dominance_info (CDI_POST_DOMINATORS); pop_cfun (); } /* Drop pure/const flags from instrumented functions. */ FOR_EACH_DEFINED_FUNCTION (node) { if (!gimple_has_body_p (node->symbol.decl) || !(!node->clone_of || node->symbol.decl != node->clone_of->symbol.decl)) continue; /* Don't profile functions produced for builtin stuff. */ if (DECL_SOURCE_LOCATION (node->symbol.decl) == BUILTINS_LOCATION) continue; cgraph_set_const_flag (node, false, false); cgraph_set_pure_flag (node, false, false); } /* Update call statements and rebuild the cgraph. */ FOR_EACH_DEFINED_FUNCTION (node) { basic_block bb; if (!gimple_has_body_p (node->symbol.decl) || !(!node->clone_of || node->symbol.decl != node->clone_of->symbol.decl)) continue; /* Don't profile functions produced for builtin stuff. */ if (DECL_SOURCE_LOCATION (node->symbol.decl) == BUILTINS_LOCATION) continue; push_cfun (DECL_STRUCT_FUNCTION (node->symbol.decl)); FOR_EACH_BB (bb) { gimple_stmt_iterator gsi; for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple stmt = gsi_stmt (gsi); if (is_gimple_call (stmt)) update_stmt (stmt); } } update_ssa (TODO_update_ssa); rebuild_cgraph_edges (); pop_cfun (); } del_node_map(); return 0; }
/* Verify cgraph nodes of given cgraph node. */ void verify_cgraph_node (struct cgraph_node *node) { struct cgraph_edge *e; struct cgraph_node *main_clone; struct function *this_cfun = DECL_STRUCT_FUNCTION (node->decl); struct function *saved_cfun = cfun; basic_block this_block; gimple_stmt_iterator gsi; bool error_found = false; if (errorcount || sorrycount) return; timevar_push (TV_CGRAPH_VERIFY); /* debug_generic_stmt needs correct cfun */ set_cfun (this_cfun); for (e = node->callees; e; e = e->next_callee) if (e->aux) { error ("aux field set for edge %s->%s", cgraph_node_name (e->caller), cgraph_node_name (e->callee)); error_found = true; } if (node->count < 0) { error ("Execution count is negative"); error_found = true; } for (e = node->callers; e; e = e->next_caller) { if (e->count < 0) { error ("caller edge count is negative"); error_found = true; } if (e->frequency < 0) { error ("caller edge frequency is negative"); error_found = true; } if (e->frequency > CGRAPH_FREQ_MAX) { error ("caller edge frequency is too large"); error_found = true; } if (!e->inline_failed) { if (node->global.inlined_to != (e->caller->global.inlined_to ? e->caller->global.inlined_to : e->caller)) { error ("inlined_to pointer is wrong"); error_found = true; } if (node->callers->next_caller) { error ("multiple inline callers"); error_found = true; } } else if (node->global.inlined_to) { error ("inlined_to pointer set for noninline callers"); error_found = true; } } if (!node->callers && node->global.inlined_to) { error ("inlined_to pointer is set but no predecessors found"); error_found = true; } if (node->global.inlined_to == node) { error ("inlined_to pointer refers to itself"); error_found = true; } for (main_clone = cgraph_node (node->decl); main_clone; main_clone = main_clone->next_clone) if (main_clone == node) break; if (!cgraph_node (node->decl)) { error ("node not found in cgraph_hash"); error_found = true; } if (node->analyzed && !TREE_ASM_WRITTEN (node->decl) && (!DECL_EXTERNAL (node->decl) || node->global.inlined_to)) { if (this_cfun->cfg) { /* The nodes we're interested in are never shared, so walk the tree ignoring duplicates. */ struct pointer_set_t *visited_nodes = pointer_set_create (); /* Reach the trees by walking over the CFG, and note the enclosing basic-blocks in the call edges. */ FOR_EACH_BB_FN (this_block, this_cfun) for (gsi = gsi_start_bb (this_block); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple stmt = gsi_stmt (gsi); tree decl; if (is_gimple_call (stmt) && (decl = gimple_call_fndecl (stmt))) { struct cgraph_edge *e = cgraph_edge (node, stmt); if (e) { if (e->aux) { error ("shared call_stmt:"); debug_gimple_stmt (stmt); error_found = true; } if (e->callee->decl != cgraph_node (decl)->decl && e->inline_failed) { error ("edge points to wrong declaration:"); debug_tree (e->callee->decl); fprintf (stderr," Instead of:"); debug_tree (decl); } e->aux = (void *)1; } else { error ("missing callgraph edge for call stmt:"); debug_gimple_stmt (stmt); error_found = true; } } } pointer_set_destroy (visited_nodes); } else /* No CFG available?! */ gcc_unreachable (); for (e = node->callees; e; e = e->next_callee) { if (!e->aux && !e->indirect_call) { error ("edge %s->%s has no corresponding call_stmt", cgraph_node_name (e->caller), cgraph_node_name (e->callee)); debug_gimple_stmt (e->call_stmt); error_found = true; } e->aux = 0; } }
static bool should_duplicate_loop_header_p (basic_block header, struct loop *loop, int *limit) { gimple_stmt_iterator bsi; gimple *last; gcc_assert (!header->aux); /* Loop header copying usually increases size of the code. This used not to be true, since quite often it is possible to verify that the condition is satisfied in the first iteration and therefore to eliminate it. Jump threading handles these cases now. */ if (optimize_loop_for_size_p (loop)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Not duplicating bb %i: optimizing for size.\n", header->index); return false; } gcc_assert (EDGE_COUNT (header->succs) > 0); if (single_succ_p (header)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Not duplicating bb %i: it is single succ.\n", header->index); return false; } if (flow_bb_inside_loop_p (loop, EDGE_SUCC (header, 0)->dest) && flow_bb_inside_loop_p (loop, EDGE_SUCC (header, 1)->dest)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Not duplicating bb %i: both sucessors are in loop.\n", loop->num); return false; } /* If this is not the original loop header, we want it to have just one predecessor in order to match the && pattern. */ if (header != loop->header && !single_pred_p (header)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Not duplicating bb %i: it has mutiple predecestors.\n", header->index); return false; } last = last_stmt (header); if (gimple_code (last) != GIMPLE_COND) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Not duplicating bb %i: it does not end by conditional.\n", header->index); return false; } /* Count number of instructions and punt on calls. */ for (bsi = gsi_start_bb (header); !gsi_end_p (bsi); gsi_next (&bsi)) { last = gsi_stmt (bsi); if (gimple_code (last) == GIMPLE_LABEL) continue; if (is_gimple_debug (last)) continue; if (gimple_code (last) == GIMPLE_CALL && !gimple_inexpensive_call_p (as_a <gcall *> (last))) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Not duplicating bb %i: it contains call.\n", header->index); return false; } *limit -= estimate_num_insns (last, &eni_size_weights); if (*limit < 0) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Not duplicating bb %i contains too many insns.\n", header->index); return false; } } if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Will duplicate bb %i\n", header->index); return true; }
static void init_copy_prop (void) { basic_block bb; copy_of = XCNEWVEC (prop_value_t, num_ssa_names); FOR_EACH_BB (bb) { gimple_stmt_iterator si; int depth = bb->loop_depth; for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) { gimple stmt = gsi_stmt (si); ssa_op_iter iter; tree def; /* The only statements that we care about are those that may generate useful copies. We also need to mark conditional jumps so that their outgoing edges are added to the work lists of the propagator. Avoid copy propagation from an inner into an outer loop. Otherwise, this may move loop variant variables outside of their loops and prevent coalescing opportunities. If the value was loop invariant, it will be hoisted by LICM and exposed for copy propagation. ??? This doesn't make sense. */ if (stmt_ends_bb_p (stmt)) prop_set_simulate_again (stmt, true); else if (stmt_may_generate_copy (stmt) /* Since we are iterating over the statements in BB, not the phi nodes, STMT will always be an assignment. */ && loop_depth_of_name (gimple_assign_rhs1 (stmt)) <= depth) prop_set_simulate_again (stmt, true); else prop_set_simulate_again (stmt, false); /* Mark all the outputs of this statement as not being the copy of anything. */ FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS) if (!prop_simulate_again_p (stmt)) set_copy_of_val (def, def); } for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) { gimple phi = gsi_stmt (si); tree def; def = gimple_phi_result (phi); if (!is_gimple_reg (def)) prop_set_simulate_again (phi, false); else prop_set_simulate_again (phi, true); if (!prop_simulate_again_p (phi)) set_copy_of_val (def, def); } } }
static bool generate_builtin (struct loop *loop, bitmap partition, bool copy_p) { bool res = false; unsigned i, x = 0; basic_block *bbs; gimple write = NULL; gimple_stmt_iterator bsi; tree nb_iter = number_of_exit_cond_executions (loop); if (!nb_iter || nb_iter == chrec_dont_know) return false; bbs = get_loop_body_in_dom_order (loop); for (i = 0; i < loop->num_nodes; i++) { basic_block bb = bbs[i]; for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi)) x++; for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi)) { gimple stmt = gsi_stmt (bsi); if (bitmap_bit_p (partition, x++) && is_gimple_assign (stmt) && !is_gimple_reg (gimple_assign_lhs (stmt))) { /* Don't generate the builtins when there are more than one memory write. */ if (write != NULL) goto end; write = stmt; if (bb == loop->latch) nb_iter = number_of_latch_executions (loop); } } } if (!stmt_with_adjacent_zero_store_dr_p (write)) goto end; /* The new statements will be placed before LOOP. */ bsi = gsi_last_bb (loop_preheader_edge (loop)->src); generate_memset_zero (write, gimple_assign_lhs (write), nb_iter, bsi); res = true; /* If this is the last partition for which we generate code, we have to destroy the loop. */ if (!copy_p) { unsigned nbbs = loop->num_nodes; edge exit = single_exit (loop); basic_block src = loop_preheader_edge (loop)->src, dest = exit->dest; redirect_edge_pred (exit, src); exit->flags &= ~(EDGE_TRUE_VALUE|EDGE_FALSE_VALUE); exit->flags |= EDGE_FALLTHRU; cancel_loop_tree (loop); rescan_loop_exit (exit, false, true); for (i = 0; i < nbbs; i++) delete_basic_block (bbs[i]); set_immediate_dominator (CDI_DOMINATORS, dest, recompute_dominator (CDI_DOMINATORS, dest)); } end: free (bbs); return res; }