static enum in_charge_use compute_use_thunks (tree fn) { tree last_arg, fn_parm; if (DECL_HAS_VTT_PARM_P (fn)) return NO_THUNKS; if (flag_apple_kext) return NO_THUNKS; if (flag_clone_structors) return NO_THUNKS; /* Functions that are too small will just get inlined back in anyway. Let the inliner do something useful instead. */ if (flag_inline_functions && estimate_num_insns (DECL_SAVED_TREE (fn)) < MAX_INLINE_INSNS_AUTO) return NO_THUNKS; /* If function accepts variable arguments, give up. */ last_arg = tree_last (TYPE_ARG_TYPES (TREE_TYPE (fn))); if ( ! VOID_TYPE_P (TREE_VALUE (last_arg))) return NO_THUNKS; /* If constructor expects vector (AltiVec) arguments, give up. */ for (fn_parm = DECL_ARGUMENTS (fn); fn_parm; fn_parm = TREE_CHAIN (fn_parm)) if (TREE_CODE (fn_parm) == VECTOR_TYPE) return NO_THUNKS; if (DECL_HAS_IN_CHARGE_PARM_P (fn)) { int parmno; struct thunk_tree_walk_data data; for (parmno = 0, fn_parm = DECL_ARGUMENTS (fn); fn_parm; ++parmno, fn_parm = TREE_CHAIN (fn_parm)) if (parmno == 1) { data.in_charge_parm = fn_parm; break; } /* If every use of the in-charge parameter ANDs it with 1, then the functions that have in-charge set to 1 and 3 are equivalent, likewise 0 and 2. Check for this (common in practice). Likewise, if every use tests for equality with 0, then values 1, 2 and 3 are equivalent. */ gcc_assert (data.in_charge_parm != NULL_TREE); data.in_charge_use = ALL_THUNKS; walk_tree_without_duplicates (&DECL_SAVED_TREE (fn), examine_tree_for_in_charge_use, &data); return data.in_charge_use; } return ALL_THUNKS; }
static bool should_duplicate_loop_header_p (basic_block header, struct loop *loop, int *limit) { gimple_stmt_iterator bsi; gimple last; /* Do not copy one block more than once (we do not really want to do loop peeling here). */ if (header->aux) return false; /* Loop header copying usually increases size of the code. This used not to be true, since quite often it is possible to verify that the condition is satisfied in the first iteration and therefore to eliminate it. Jump threading handles these cases now. */ if (optimize_loop_for_size_p (loop)) return false; gcc_assert (EDGE_COUNT (header->succs) > 0); if (single_succ_p (header)) return false; if (flow_bb_inside_loop_p (loop, EDGE_SUCC (header, 0)->dest) && flow_bb_inside_loop_p (loop, EDGE_SUCC (header, 1)->dest)) return false; /* If this is not the original loop header, we want it to have just one predecessor in order to match the && pattern. */ if (header != loop->header && !single_pred_p (header)) return false; last = last_stmt (header); if (gimple_code (last) != GIMPLE_COND) return false; /* Approximately copy the conditions that used to be used in jump.c -- at most 20 insns and no calls. */ for (bsi = gsi_start_bb (header); !gsi_end_p (bsi); gsi_next (&bsi)) { last = gsi_stmt (bsi); if (gimple_code (last) == GIMPLE_LABEL) continue; if (is_gimple_debug (last)) continue; if (is_gimple_call (last)) return false; *limit -= estimate_num_insns (last, &eni_size_weights); if (*limit < 0) return false; } return true; }
static int count_insns (basic_block bb) { gimple_stmt_iterator gsi; gimple stmt; int n = 0; for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { stmt = gsi_stmt (gsi); n += estimate_num_insns (stmt, &eni_size_weights); } return n; }
unsigned tree_num_loop_insns (struct loop *loop) { basic_block *body = get_loop_body (loop); block_stmt_iterator bsi; unsigned size = 1, i; for (i = 0; i < loop->num_nodes; i++) for (bsi = bsi_start (body[i]); !bsi_end_p (bsi); bsi_next (&bsi)) size += estimate_num_insns (bsi_stmt (bsi)); free (body); return size; }
unsigned tree_num_loop_insns (struct loop *loop, eni_weights *weights) { basic_block *body = get_loop_body (loop); gimple_stmt_iterator gsi; unsigned size = 0, i; for (i = 0; i < loop->num_nodes; i++) for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi)) size += estimate_num_insns (gsi_stmt (gsi), weights); free (body); return size; }
static bool should_duplicate_loop_header_p (basic_block header, struct loop *loop, int *limit) { block_stmt_iterator bsi; tree last; /* Do not copy one block more than once (we do not really want to do loop peeling here). */ if (header->aux) return false; gcc_assert (EDGE_COUNT (header->succs) > 0); if (EDGE_COUNT (header->succs) == 1) return false; if (flow_bb_inside_loop_p (loop, EDGE_SUCC (header, 0)->dest) && flow_bb_inside_loop_p (loop, EDGE_SUCC (header, 1)->dest)) return false; /* If this is not the original loop header, we want it to have just one predecessor in order to match the && pattern. */ if (header != loop->header && EDGE_COUNT (header->preds) >= 2) return false; last = last_stmt (header); if (TREE_CODE (last) != COND_EXPR) return false; /* Approximately copy the conditions that used to be used in jump.c -- at most 20 insns and no calls. */ for (bsi = bsi_start (header); !bsi_end_p (bsi); bsi_next (&bsi)) { last = bsi_stmt (bsi); if (TREE_CODE (last) == LABEL_EXPR) continue; if (get_call_expr_in (last)) return false; *limit -= estimate_num_insns (last); if (*limit < 0) return false; } return true; }
static bool tree_estimate_loop_size (struct loop *loop, edge exit, edge edge_to_cancel, struct loop_size *size, int upper_bound) { basic_block *body = get_loop_body (loop); gimple_stmt_iterator gsi; unsigned int i; bool after_exit; vec<basic_block> path = get_loop_hot_path (loop); size->overall = 0; size->eliminated_by_peeling = 0; size->last_iteration = 0; size->last_iteration_eliminated_by_peeling = 0; size->num_pure_calls_on_hot_path = 0; size->num_non_pure_calls_on_hot_path = 0; size->non_call_stmts_on_hot_path = 0; size->num_branches_on_hot_path = 0; size->constant_iv = 0; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Estimating sizes for loop %i\n", loop->num); for (i = 0; i < loop->num_nodes; i++) { if (edge_to_cancel && body[i] != edge_to_cancel->src && dominated_by_p (CDI_DOMINATORS, body[i], edge_to_cancel->src)) after_exit = true; else after_exit = false; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " BB: %i, after_exit: %i\n", body[i]->index, after_exit); for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple *stmt = gsi_stmt (gsi); int num = estimate_num_insns (stmt, &eni_size_weights); bool likely_eliminated = false; bool likely_eliminated_last = false; bool likely_eliminated_peeled = false; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, " size: %3i ", num); print_gimple_stmt (dump_file, gsi_stmt (gsi), 0, 0); } /* Look for reasons why we might optimize this stmt away. */ if (gimple_has_side_effects (stmt)) ; /* Exit conditional. */ else if (exit && body[i] == exit->src && stmt == last_stmt (exit->src)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Exit condition will be eliminated " "in peeled copies.\n"); likely_eliminated_peeled = true; } else if (edge_to_cancel && body[i] == edge_to_cancel->src && stmt == last_stmt (edge_to_cancel->src)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Exit condition will be eliminated " "in last copy.\n"); likely_eliminated_last = true; } /* Sets of IV variables */ else if (gimple_code (stmt) == GIMPLE_ASSIGN && constant_after_peeling (gimple_assign_lhs (stmt), stmt, loop)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Induction variable computation will" " be folded away.\n"); likely_eliminated = true; } /* Assignments of IV variables. */ else if (gimple_code (stmt) == GIMPLE_ASSIGN && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME && constant_after_peeling (gimple_assign_rhs1 (stmt), stmt, loop) && (gimple_assign_rhs_class (stmt) != GIMPLE_BINARY_RHS || constant_after_peeling (gimple_assign_rhs2 (stmt), stmt, loop))) { size->constant_iv = true; if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Constant expression will be folded away.\n"); likely_eliminated = true; } /* Conditionals. */ else if ((gimple_code (stmt) == GIMPLE_COND && constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop) && constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop) /* We don't simplify all constant compares so make sure they are not both constant already. See PR70288. */ && (! is_gimple_min_invariant (gimple_cond_lhs (stmt)) || ! is_gimple_min_invariant (gimple_cond_rhs (stmt)))) || (gimple_code (stmt) == GIMPLE_SWITCH && constant_after_peeling (gimple_switch_index ( as_a <gswitch *> (stmt)), stmt, loop) && ! is_gimple_min_invariant (gimple_switch_index ( as_a <gswitch *> (stmt))))) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Constant conditional.\n"); likely_eliminated = true; } size->overall += num; if (likely_eliminated || likely_eliminated_peeled) size->eliminated_by_peeling += num; if (!after_exit) { size->last_iteration += num; if (likely_eliminated || likely_eliminated_last) size->last_iteration_eliminated_by_peeling += num; } if ((size->overall * 3 / 2 - size->eliminated_by_peeling - size->last_iteration_eliminated_by_peeling) > upper_bound) { free (body); path.release (); return true; } } } while (path.length ()) { basic_block bb = path.pop (); for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple *stmt = gsi_stmt (gsi); if (gimple_code (stmt) == GIMPLE_CALL) { int flags = gimple_call_flags (stmt); tree decl = gimple_call_fndecl (stmt); if (decl && DECL_IS_BUILTIN (decl) && is_inexpensive_builtin (decl)) ; else if (flags & (ECF_PURE | ECF_CONST)) size->num_pure_calls_on_hot_path++; else size->num_non_pure_calls_on_hot_path++; size->num_branches_on_hot_path ++; } else if (gimple_code (stmt) != GIMPLE_CALL && gimple_code (stmt) != GIMPLE_DEBUG) size->non_call_stmts_on_hot_path++; if (((gimple_code (stmt) == GIMPLE_COND && (!constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop) || constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop))) || (gimple_code (stmt) == GIMPLE_SWITCH && !constant_after_peeling (gimple_switch_index ( as_a <gswitch *> (stmt)), stmt, loop))) && (!exit || bb != exit->src)) size->num_branches_on_hot_path++; } } path.release (); if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "size: %i-%i, last_iteration: %i-%i\n", size->overall, size->eliminated_by_peeling, size->last_iteration, size->last_iteration_eliminated_by_peeling); free (body); return false; }
static bool should_duplicate_loop_header_p (basic_block header, struct loop *loop, int *limit) { gimple_stmt_iterator bsi; gimple *last; gcc_assert (!header->aux); /* Loop header copying usually increases size of the code. This used not to be true, since quite often it is possible to verify that the condition is satisfied in the first iteration and therefore to eliminate it. Jump threading handles these cases now. */ if (optimize_loop_for_size_p (loop)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Not duplicating bb %i: optimizing for size.\n", header->index); return false; } gcc_assert (EDGE_COUNT (header->succs) > 0); if (single_succ_p (header)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Not duplicating bb %i: it is single succ.\n", header->index); return false; } if (flow_bb_inside_loop_p (loop, EDGE_SUCC (header, 0)->dest) && flow_bb_inside_loop_p (loop, EDGE_SUCC (header, 1)->dest)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Not duplicating bb %i: both sucessors are in loop.\n", loop->num); return false; } /* If this is not the original loop header, we want it to have just one predecessor in order to match the && pattern. */ if (header != loop->header && !single_pred_p (header)) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Not duplicating bb %i: it has mutiple predecestors.\n", header->index); return false; } last = last_stmt (header); if (gimple_code (last) != GIMPLE_COND) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Not duplicating bb %i: it does not end by conditional.\n", header->index); return false; } /* Count number of instructions and punt on calls. */ for (bsi = gsi_start_bb (header); !gsi_end_p (bsi); gsi_next (&bsi)) { last = gsi_stmt (bsi); if (gimple_code (last) == GIMPLE_LABEL) continue; if (is_gimple_debug (last)) continue; if (gimple_code (last) == GIMPLE_CALL && !gimple_inexpensive_call_p (as_a <gcall *> (last))) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Not duplicating bb %i: it contains call.\n", header->index); return false; } *limit -= estimate_num_insns (last, &eni_size_weights); if (*limit < 0) { if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Not duplicating bb %i contains too many insns.\n", header->index); return false; } } if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, " Will duplicate bb %i\n", header->index); return true; }