static bool generate_loops_for_partition (struct loop *loop, bitmap partition, bool copy_p) { unsigned i, x; gimple_stmt_iterator bsi; basic_block *bbs; if (copy_p) { loop = copy_loop_before (loop); create_preheader (loop, CP_SIMPLE_PREHEADERS); create_bb_after_loop (loop); } if (loop == NULL) return false; /* Remove stmts not in the PARTITION bitmap. The order in which we visit the phi nodes and the statements is exactly as in stmts_from_loop. */ bbs = get_loop_body_in_dom_order (loop); for (x = 0, i = 0; i < loop->num_nodes; i++) { basic_block bb = bbs[i]; for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi);) if (!bitmap_bit_p (partition, x++)) { gimple phi = gsi_stmt (bsi); if (!is_gimple_reg (gimple_phi_result (phi))) mark_virtual_phi_result_for_renaming (phi); remove_phi_node (&bsi, true); } else gsi_next (&bsi); for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi);) { gimple stmt = gsi_stmt (bsi); if (gimple_code (gsi_stmt (bsi)) != GIMPLE_LABEL && !bitmap_bit_p (partition, x++)) { unlink_stmt_vdef (stmt); gsi_remove (&bsi, true); release_defs (stmt); } else gsi_next (&bsi); } } free (bbs); return true; }
static void process_uses (df_ref use, int top_flag) { for (; use; use = DF_REF_NEXT_LOC (use)) if ((DF_REF_FLAGS (use) & DF_REF_AT_TOP) == top_flag) { unsigned int uregno = DF_REF_REGNO (use); if (reg_defs[uregno] && !bitmap_bit_p (local_md, uregno) && bitmap_bit_p (local_lr, uregno)) use_def_ref[DF_REF_ID (use)] = reg_defs[uregno]; } }
static void process_uses (df_ref *use_rec, int top_flag) { df_ref use; while ((use = *use_rec++) != NULL) if ((DF_REF_FLAGS (use) & DF_REF_AT_TOP) == top_flag) { unsigned int uregno = DF_REF_REGNO (use); if (reg_defs[uregno] && !bitmap_bit_p (local_md, uregno) && bitmap_bit_p (local_lr, uregno)) use_def_ref[DF_REF_ID (use)] = reg_defs[uregno]; } }
static void process_uses (df_ref *use_rec, int top_flag) { df_ref use; while ((use = *use_rec++) != NULL) if ((DF_REF_FLAGS (use) & DF_REF_AT_TOP) == top_flag) { unsigned int uregno = DF_REF_REGNO (use); if (VEC_index (df_ref, reg_defs, uregno) && !bitmap_bit_p (local_md, uregno) && bitmap_bit_p (local_lr, uregno)) VEC_replace (df_ref, use_def_ref, DF_REF_ID (use), VEC_index (df_ref, reg_defs, uregno)); } }
static void mark_nodes_having_upstream_mem_writes (struct graph *rdg) { int v, x; bitmap seen = BITMAP_ALLOC (NULL); for (v = rdg->n_vertices - 1; v >= 0; v--) if (!bitmap_bit_p (seen, v)) { unsigned i; VEC (int, heap) *nodes = VEC_alloc (int, heap, 3); graphds_dfs (rdg, &v, 1, &nodes, false, NULL); FOR_EACH_VEC_ELT (int, nodes, i, x) { if (!bitmap_set_bit (seen, x)) continue; if (RDG_MEM_WRITE_STMT (rdg, x) || predecessor_has_mem_write (rdg, &(rdg->vertices[x])) /* In anti dependences the read should occur before the write, this is why both the read and the write should be placed in the same partition. */ || has_anti_dependence (&(rdg->vertices[x]))) { bitmap_set_bit (upstream_mem_writes, x); } } VEC_free (int, heap, nodes); }
static bool sese_bad_liveouts_use (sese region, bitmap liveouts, basic_block bb, tree use) { unsigned ver; basic_block def_bb; if (TREE_CODE (use) != SSA_NAME) return false; ver = SSA_NAME_VERSION (use); /* If it's in liveouts, the variable will get a new PHI node, and the debug use will be properly adjusted. */ if (bitmap_bit_p (liveouts, ver)) return false; def_bb = gimple_bb (SSA_NAME_DEF_STMT (use)); if (!def_bb || !bb_in_sese_p (def_bb, region) || bb_in_sese_p (bb, region)) return false; return true; }
static inline bool has_proper_scope_for_analysis (tree t) { /* If the variable has the "used" attribute, treat it as if it had a been touched by the devil. */ if (lookup_attribute ("used", DECL_ATTRIBUTES (t))) return false; /* Do not want to do anything with volatile except mark any function that uses one to be not const or pure. */ if (TREE_THIS_VOLATILE (t)) return false; /* Do not care about a local automatic that is not static. */ if (!TREE_STATIC (t) && !DECL_EXTERNAL (t)) return false; if (DECL_EXTERNAL (t) || TREE_PUBLIC (t)) return false; /* This is a variable we care about. Check if we have seen it before, and if not add it the set of variables we care about. */ if (!bitmap_bit_p (all_module_statics, DECL_UID (t))) add_static_var (t); return true; }
/* Estimate cost of cloning NODE. */ static long ipcp_estimate_cloning_cost (struct cgraph_node *node) { int freq_sum = 1; gcov_type count_sum = 1; struct cgraph_edge *e; int cost; cost = ipcp_estimate_growth (node) * 1000; if (!cost) { if (dump_file) fprintf (dump_file, "Versioning of %s will save code size\n", cgraph_node_name (node)); return 0; } for (e = node->callers; e; e = e->next_caller) if (!bitmap_bit_p (dead_nodes, e->caller->uid) && !ipcp_need_redirect_p (e)) { count_sum += e->count; freq_sum += e->frequency + 1; } if (max_count) cost /= count_sum * 1000 / max_count + 1; else cost /= freq_sum * 1000 / REG_BR_PROB_BASE + 1; if (dump_file) fprintf (dump_file, "Cost of versioning %s is %i, (size: %i, freq: %i)\n", cgraph_node_name (node), cost, node->local.inline_summary.self_size, freq_sum); return cost + 1; }
static bool reachable_at_most_once (basic_block va_arg_bb, basic_block va_start_bb) { vec<edge> stack = vNULL; edge e; edge_iterator ei; sbitmap visited; bool ret; if (va_arg_bb == va_start_bb) return true; if (! dominated_by_p (CDI_DOMINATORS, va_arg_bb, va_start_bb)) return false; visited = sbitmap_alloc (last_basic_block_for_fn (cfun)); bitmap_clear (visited); ret = true; FOR_EACH_EDGE (e, ei, va_arg_bb->preds) stack.safe_push (e); while (! stack.is_empty ()) { basic_block src; e = stack.pop (); src = e->src; if (e->flags & EDGE_COMPLEX) { ret = false; break; } if (src == va_start_bb) continue; /* va_arg_bb can be executed more times than va_start_bb. */ if (src == va_arg_bb) { ret = false; break; } gcc_assert (src != ENTRY_BLOCK_PTR_FOR_FN (cfun)); if (! bitmap_bit_p (visited, src->index)) { bitmap_set_bit (visited, src->index); FOR_EACH_EDGE (e, ei, src->preds) stack.safe_push (e); } } stack.release (); sbitmap_free (visited); return ret; }
static inline int marked_insn_p (rtx_insn *insn) { /* Artificial defs are always needed and they do not have an insn. We should never see them here. */ gcc_assert (insn); return bitmap_bit_p (marked, INSN_UID (insn)); }
static bool ptr_deref_may_alias_decl_p (tree ptr, tree decl) { struct ptr_info_def *pi; gcc_assert ((TREE_CODE (ptr) == SSA_NAME || TREE_CODE (ptr) == ADDR_EXPR || TREE_CODE (ptr) == INTEGER_CST) && (TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == PARM_DECL || TREE_CODE (decl) == RESULT_DECL)); /* Non-aliased variables can not be pointed to. */ if (!may_be_aliased (decl)) return false; /* ADDR_EXPR pointers either just offset another pointer or directly specify the pointed-to set. */ if (TREE_CODE (ptr) == ADDR_EXPR) { tree base = get_base_address (TREE_OPERAND (ptr, 0)); if (base && INDIRECT_REF_P (base)) ptr = TREE_OPERAND (base, 0); else if (base && SSA_VAR_P (base)) return operand_equal_p (base, decl, 0); else if (base && CONSTANT_CLASS_P (base)) return false; else return true; } /* We can end up with dereferencing constant pointers. Just bail out in this case. */ if (TREE_CODE (ptr) == INTEGER_CST) return true; /* If we do not have useful points-to information for this pointer we cannot disambiguate anything else. */ pi = SSA_NAME_PTR_INFO (ptr); if (!pi) return true; /* If the decl can be used as a restrict tag and we have a restrict pointer and that pointers points-to set doesn't contain this decl then they can't alias. */ if (DECL_RESTRICTED_P (decl) && TYPE_RESTRICT (TREE_TYPE (ptr)) && pi->pt.vars_contains_restrict) return bitmap_bit_p (pi->pt.vars, DECL_UID (decl)); return pt_solution_includes (&pi->pt, decl); }
static inline void add_static_var (tree var) { int uid = DECL_UID (var); if (!bitmap_bit_p (all_module_statics, uid)) { splay_tree_insert (reference_vars_to_consider, uid, (splay_tree_value)var); bitmap_set_bit (all_module_statics, uid); } }
FOR_EACH_REFERENCED_VAR (var, rvi) { var_ann_t ann; ann = var_ann (var); if (!MTAG_P (var) && ann->is_aliased && !bitmap_bit_p (visited, DECL_UID (var))) { error ("addressable variable that is aliased but is not in any alias set"); goto err; } }
static unsigned int copyprop_hardreg_forward (void) { struct value_data *all_vd; basic_block bb; sbitmap visited; bool analyze_called = false; all_vd = XNEWVEC (struct value_data, last_basic_block_for_fn (cfun)); visited = sbitmap_alloc (last_basic_block_for_fn (cfun)); bitmap_clear (visited); if (MAY_HAVE_DEBUG_INSNS) debug_insn_changes_pool = create_alloc_pool ("debug insn changes pool", sizeof (struct queued_debug_insn_change), 256); FOR_EACH_BB_FN (bb, cfun) { bitmap_set_bit (visited, bb->index); /* If a block has a single predecessor, that we've already processed, begin with the value data that was live at the end of the predecessor block. */ /* ??? Ought to use more intelligent queuing of blocks. */ if (single_pred_p (bb) && bitmap_bit_p (visited, single_pred (bb)->index) && ! (single_pred_edge (bb)->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))) { all_vd[bb->index] = all_vd[single_pred (bb)->index]; if (all_vd[bb->index].n_debug_insn_changes) { unsigned int regno; for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) { if (all_vd[bb->index].e[regno].debug_insn_changes) { all_vd[bb->index].e[regno].debug_insn_changes = NULL; if (--all_vd[bb->index].n_debug_insn_changes == 0) break; } } } } else init_value_data (all_vd + bb->index); copyprop_hardreg_forward_1 (bb, all_vd + bb->index); }
static bool predecessor_has_mem_write (struct graph *rdg, struct vertex *v) { struct graph_edge *e; if (v->pred) for (e = v->pred; e; e = e->pred_next) if (bitmap_bit_p (upstream_mem_writes, e->src) /* Don't consider flow channels: a write to memory followed by a read from memory. These channels allow the split of the RDG in different partitions. */ && !RDG_MEM_WRITE_STMT (rdg, e->src)) return true; return false; }
static void cfg_blocks_add (basic_block bb) { bool head = false; gcc_assert (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)); gcc_assert (!bitmap_bit_p (bb_in_list, bb->index)); if (cfg_blocks_empty_p ()) { cfg_blocks_tail = cfg_blocks_head = 0; cfg_blocks_num = 1; } else { cfg_blocks_num++; if (cfg_blocks_num > cfg_blocks.length ()) { /* We have to grow the array now. Adjust to queue to occupy the full space of the original array. We do not need to initialize the newly allocated portion of the array because we keep track of CFG_BLOCKS_HEAD and CFG_BLOCKS_HEAD. */ cfg_blocks_tail = cfg_blocks.length (); cfg_blocks_head = 0; cfg_blocks.safe_grow (2 * cfg_blocks_tail); } /* Minor optimization: we prefer to see blocks with more predecessors later, because there is more of a chance that the incoming edges will be executable. */ else if (EDGE_COUNT (bb->preds) >= EDGE_COUNT (cfg_blocks[cfg_blocks_head]->preds)) cfg_blocks_tail = ((cfg_blocks_tail + 1) % cfg_blocks.length ()); else { if (cfg_blocks_head == 0) cfg_blocks_head = cfg_blocks.length (); --cfg_blocks_head; head = true; } } cfg_blocks[head ? cfg_blocks_head : cfg_blocks_tail] = bb; bitmap_set_bit (bb_in_list, bb->index); }
/* Return true if pseudo REGNO can be potentially coalesced. Use SPLIT_PSEUDO_BITMAP to find pseudos whose live ranges were split. */ static bool coalescable_pseudo_p (int regno, bitmap split_origin_bitmap) { lra_assert (regno >= FIRST_PSEUDO_REGISTER); /* Don't coalesce inheritance pseudos because spilled inheritance pseudos will be removed in subsequent 'undo inheritance' pass. */ return (lra_reg_info[regno].restore_regno < 0 /* We undo splits for spilled pseudos whose live ranges were split. So don't coalesce them, it is not necessary and the undo transformations would be wrong. */ && ! bitmap_bit_p (split_origin_bitmap, regno) /* We don't want to coalesce regnos with equivalences, at least without updating this info. */ && ira_reg_equiv[regno].constant == NULL_RTX && ira_reg_equiv[regno].memory == NULL_RTX && ira_reg_equiv[regno].invariant == NULL_RTX); }
static inline struct graph_edge * foll_in_subgraph (struct graph_edge *e, bool forward, bitmap subgraph) { int d; if (!subgraph) return e; while (e) { d = dfs_edge_dest (e, forward); if (bitmap_bit_p (subgraph, d)) return e; e = forward ? e->succ_next : e->pred_next; } return e; }
/* Spill pseudos which are assigned to hard registers in SET. Add affected insns for processing in the subsequent constraint pass. */ static void spill_pseudos (HARD_REG_SET set) { int i; bitmap_head to_process; rtx_insn *insn; if (hard_reg_set_empty_p (set)) return; if (lra_dump_file != NULL) { fprintf (lra_dump_file, " Spilling non-eliminable hard regs:"); for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (TEST_HARD_REG_BIT (set, i)) fprintf (lra_dump_file, " %d", i); fprintf (lra_dump_file, "\n"); } bitmap_initialize (&to_process, ®_obstack); for (i = FIRST_PSEUDO_REGISTER; i < max_reg_num (); i++) if (lra_reg_info[i].nrefs != 0 && reg_renumber[i] >= 0 && overlaps_hard_reg_set_p (set, PSEUDO_REGNO_MODE (i), reg_renumber[i])) { if (lra_dump_file != NULL) fprintf (lra_dump_file, " Spilling r%d(%d)\n", i, reg_renumber[i]); reg_renumber[i] = -1; bitmap_ior_into (&to_process, &lra_reg_info[i].insn_bitmap); } IOR_HARD_REG_SET (lra_no_alloc_regs, set); for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn)) if (bitmap_bit_p (&to_process, INSN_UID (insn))) { lra_push_insn (insn); lra_set_used_insn_alternative (insn, -1); } bitmap_clear (&to_process); }
static void collect_pattern_seqs (void) { htab_iterator hti0, hti1, hti2; p_hash_bucket hash_bucket; p_hash_elem e0, e1; #if defined STACK_REGS || defined HAVE_cc0 basic_block bb; bitmap_head dont_collect; /* Extra initialization step to ensure that no stack registers (if present) or cc0 code (if present) are live across abnormal edges. Set a flag in DONT_COLLECT for an insn if a stack register is live after the insn or the insn is cc0 setter or user. */ bitmap_initialize (&dont_collect, NULL); #ifdef STACK_REGS FOR_EACH_BB (bb) { regset_head live; rtx insn; rtx prev; /* Initialize liveness propagation. */ INIT_REG_SET (&live); bitmap_copy (&live, DF_LR_OUT (bb)); df_simulate_initialize_backwards (bb, &live); /* Propagate liveness info and mark insns where a stack reg is live. */ insn = BB_END (bb); for (insn = BB_END (bb); ; insn = prev) { prev = PREV_INSN (insn); if (INSN_P (insn)) { int reg; for (reg = FIRST_STACK_REG; reg <= LAST_STACK_REG; reg++) { if (REGNO_REG_SET_P (&live, reg)) { bitmap_set_bit (&dont_collect, INSN_UID (insn)); break; } } } if (insn == BB_HEAD (bb)) break; df_simulate_one_insn_backwards (bb, insn, &live); insn = prev; } /* Free unused data. */ CLEAR_REG_SET (&live); } #endif #ifdef HAVE_cc0 /* Mark CC0 setters and users as ineligible for collection into sequences. This is an over-conservative fix, since it is OK to include a cc0_setter, but only if we also include the corresponding cc0_user, and vice versa. */ FOR_EACH_BB (bb) { rtx insn; rtx next_tail; next_tail = NEXT_INSN (BB_END (bb)); for (insn = BB_HEAD (bb); insn != next_tail; insn = NEXT_INSN (insn)) { if (INSN_P (insn) && reg_mentioned_p (cc0_rtx, PATTERN (insn))) bitmap_set_bit (&dont_collect, INSN_UID (insn)); } } #endif #endif /* defined STACK_REGS || defined HAVE_cc0 */ /* Initialize PATTERN_SEQS to empty. */ pattern_seqs = 0; /* Try to match every abstractable insn with every other insn in the same HASH_BUCKET. */ FOR_EACH_HTAB_ELEMENT (hash_buckets, hash_bucket, p_hash_bucket, hti0) if (htab_elements (hash_bucket->seq_candidates) > 1) FOR_EACH_HTAB_ELEMENT (hash_bucket->seq_candidates, e0, p_hash_elem, hti1) FOR_EACH_HTAB_ELEMENT (hash_bucket->seq_candidates, e1, p_hash_elem, hti2) if (e0 != e1 #if defined STACK_REGS || defined HAVE_cc0 && !bitmap_bit_p (&dont_collect, INSN_UID (e0->insn)) && !bitmap_bit_p (&dont_collect, INSN_UID (e1->insn)) #endif ) match_seqs (e0, e1); #if defined STACK_REGS || defined HAVE_cc0 /* Free unused data. */ bitmap_clear (&dont_collect); #endif }
void find_comparison_dom_walker::before_dom_children (basic_block bb) { struct comparison *last_cmp; rtx_insn *insn, *next, *last_clobber; bool last_cmp_valid; bool need_purge = false; bitmap killed; killed = BITMAP_ALLOC (NULL); /* The last comparison that was made. Will be reset to NULL once the flags are clobbered. */ last_cmp = NULL; /* True iff the last comparison has not been clobbered, nor have its inputs. Used to eliminate duplicate compares. */ last_cmp_valid = false; /* The last insn that clobbered the flags, if that insn is of a form that may be valid for eliminating a following compare. To be reset to NULL once the flags are set otherwise. */ last_clobber = NULL; /* Propagate the last live comparison throughout the extended basic block. */ if (single_pred_p (bb)) { last_cmp = (struct comparison *) single_pred (bb)->aux; if (last_cmp) last_cmp_valid = last_cmp->inputs_valid; } for (insn = BB_HEAD (bb); insn; insn = next) { rtx src; next = (insn == BB_END (bb) ? NULL : NEXT_INSN (insn)); if (!NONDEBUG_INSN_P (insn)) continue; /* Compute the set of registers modified by this instruction. */ bitmap_clear (killed); df_simulate_find_defs (insn, killed); src = conforming_compare (insn); if (src) { rtx eh_note = NULL; if (cfun->can_throw_non_call_exceptions) eh_note = find_reg_note (insn, REG_EH_REGION, NULL); if (last_cmp_valid && can_eliminate_compare (src, eh_note, last_cmp)) { if (eh_note) need_purge = true; delete_insn (insn); continue; } last_cmp = XCNEW (struct comparison); last_cmp->insn = insn; last_cmp->prev_clobber = last_clobber; last_cmp->in_a = XEXP (src, 0); last_cmp->in_b = XEXP (src, 1); last_cmp->eh_note = eh_note; last_cmp->orig_mode = GET_MODE (src); all_compares.safe_push (last_cmp); /* It's unusual, but be prepared for comparison patterns that also clobber an input, or perhaps a scratch. */ last_clobber = NULL; last_cmp_valid = true; } /* Notice if this instruction kills the flags register. */ else if (bitmap_bit_p (killed, targetm.flags_regnum)) { /* See if this insn could be the "clobber" that eliminates a future comparison. */ last_clobber = (arithmetic_flags_clobber_p (insn) ? insn : NULL); /* In either case, the previous compare is no longer valid. */ last_cmp = NULL; last_cmp_valid = false; } /* Notice if this instruction uses the flags register. */ else if (last_cmp) find_flags_uses_in_insn (last_cmp, insn); /* Notice if any of the inputs to the comparison have changed. */ if (last_cmp_valid && (bitmap_bit_p (killed, REGNO (last_cmp->in_a)) || (REG_P (last_cmp->in_b) && bitmap_bit_p (killed, REGNO (last_cmp->in_b))))) last_cmp_valid = false; }
unsigned HOST_WIDE_INT compute_builtin_object_size (tree ptr, int object_size_type) { gcc_assert (object_size_type >= 0 && object_size_type <= 3); if (! offset_limit) init_offset_limit (); if (TREE_CODE (ptr) == ADDR_EXPR) return addr_object_size (ptr, object_size_type); else if (TREE_CODE (ptr) == CALL_EXPR) { tree arg = pass_through_call (ptr); if (arg) return compute_builtin_object_size (arg, object_size_type); else return alloc_object_size (ptr, object_size_type); } else if (TREE_CODE (ptr) == SSA_NAME && POINTER_TYPE_P (TREE_TYPE (ptr)) && object_sizes[object_size_type] != NULL) { if (!bitmap_bit_p (computed[object_size_type], SSA_NAME_VERSION (ptr))) { struct object_size_info osi; bitmap_iterator bi; unsigned int i; if (dump_file) { fprintf (dump_file, "Computing %s %sobject size for ", (object_size_type & 2) ? "minimum" : "maximum", (object_size_type & 1) ? "sub" : ""); print_generic_expr (dump_file, ptr, dump_flags); fprintf (dump_file, ":\n"); } osi.visited = BITMAP_ALLOC (NULL); osi.reexamine = BITMAP_ALLOC (NULL); osi.object_size_type = object_size_type; osi.depths = NULL; osi.stack = NULL; osi.tos = NULL; /* First pass: walk UD chains, compute object sizes that can be computed. osi.reexamine bitmap at the end will contain what variables were found in dependency cycles and therefore need to be reexamined. */ osi.pass = 0; osi.changed = false; collect_object_sizes_for (&osi, ptr); /* Second pass: keep recomputing object sizes of variables that need reexamination, until no object sizes are increased or all object sizes are computed. */ if (! bitmap_empty_p (osi.reexamine)) { bitmap reexamine = BITMAP_ALLOC (NULL); /* If looking for minimum instead of maximum object size, detect cases where a pointer is increased in a loop. Although even without this detection pass 2 would eventually terminate, it could take a long time. If a pointer is increasing this way, we need to assume 0 object size. E.g. p = &buf[0]; while (cond) p = p + 4; */ if (object_size_type & 2) { osi.depths = xcalloc (num_ssa_names, sizeof (unsigned int)); osi.stack = xmalloc (num_ssa_names * sizeof (unsigned int)); osi.tos = osi.stack; osi.pass = 1; /* collect_object_sizes_for is changing osi.reexamine bitmap, so iterate over a copy. */ bitmap_copy (reexamine, osi.reexamine); EXECUTE_IF_SET_IN_BITMAP (reexamine, 0, i, bi) if (bitmap_bit_p (osi.reexamine, i)) check_for_plus_in_loops (&osi, ssa_name (i)); free (osi.depths); osi.depths = NULL; free (osi.stack); osi.stack = NULL; osi.tos = NULL; } do { osi.pass = 2; osi.changed = false; /* collect_object_sizes_for is changing osi.reexamine bitmap, so iterate over a copy. */ bitmap_copy (reexamine, osi.reexamine); EXECUTE_IF_SET_IN_BITMAP (reexamine, 0, i, bi) if (bitmap_bit_p (osi.reexamine, i)) { collect_object_sizes_for (&osi, ssa_name (i)); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Reexamining "); print_generic_expr (dump_file, ssa_name (i), dump_flags); fprintf (dump_file, "\n"); } } } while (osi.changed); BITMAP_FREE (reexamine); } EXECUTE_IF_SET_IN_BITMAP (osi.reexamine, 0, i, bi) bitmap_set_bit (computed[object_size_type], i); /* Debugging dumps. */ if (dump_file) { EXECUTE_IF_SET_IN_BITMAP (osi.visited, 0, i, bi) if (object_sizes[object_size_type][i] != unknown[object_size_type]) { print_generic_expr (dump_file, ssa_name (i), dump_flags); fprintf (dump_file, ": %s %sobject size " HOST_WIDE_INT_PRINT_UNSIGNED "\n", (object_size_type & 2) ? "minimum" : "maximum", (object_size_type & 1) ? "sub" : "", object_sizes[object_size_type][i]); } } BITMAP_FREE (osi.reexamine); BITMAP_FREE (osi.visited); }
static bool generate_builtin (struct loop *loop, bitmap partition, bool copy_p) { bool res = false; unsigned i, x = 0; basic_block *bbs; gimple write = NULL; tree op0, op1; gimple_stmt_iterator bsi; tree nb_iter = number_of_exit_cond_executions (loop); if (!nb_iter || nb_iter == chrec_dont_know) return false; bbs = get_loop_body_in_dom_order (loop); for (i = 0; i < loop->num_nodes; i++) { basic_block bb = bbs[i]; for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi)) x++; for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi)) { gimple stmt = gsi_stmt (bsi); if (bitmap_bit_p (partition, x++) && is_gimple_assign (stmt) && !is_gimple_reg (gimple_assign_lhs (stmt))) { /* Don't generate the builtins when there are more than one memory write. */ if (write != NULL) goto end; write = stmt; } } } if (!write) goto end; op0 = gimple_assign_lhs (write); op1 = gimple_assign_rhs1 (write); if (!(TREE_CODE (op0) == ARRAY_REF || TREE_CODE (op0) == INDIRECT_REF)) goto end; /* The new statements will be placed before LOOP. */ bsi = gsi_last_bb (loop_preheader_edge (loop)->src); if (gimple_assign_rhs_code (write) == INTEGER_CST && (integer_zerop (op1) || real_zerop (op1))) res = generate_memset_zero (write, op0, nb_iter, bsi); /* If this is the last partition for which we generate code, we have to destroy the loop. */ if (res && !copy_p) { unsigned nbbs = loop->num_nodes; basic_block src = loop_preheader_edge (loop)->src; basic_block dest = single_exit (loop)->dest; prop_phis (dest); make_edge (src, dest, EDGE_FALLTHRU); cancel_loop_tree (loop); for (i = 0; i < nbbs; i++) delete_basic_block (bbs[i]); set_immediate_dominator (CDI_DOMINATORS, dest, recompute_dominator (CDI_DOMINATORS, dest)); } end: free (bbs); return res; }
void find_comparison_dom_walker::before_dom_children (basic_block bb) { struct comparison *last_cmp; rtx insn, next, last_clobber; bool last_cmp_valid; bool need_purge = false; bitmap killed; killed = BITMAP_ALLOC (NULL); /* The last comparison that was made. Will be reset to NULL once the flags are clobbered. */ last_cmp = NULL; /* True iff the last comparison has not been clobbered, nor have its inputs. Used to eliminate duplicate compares. */ last_cmp_valid = false; /* The last insn that clobbered the flags, if that insn is of a form that may be valid for eliminating a following compare. To be reset to NULL once the flags are set otherwise. */ last_clobber = NULL; /* Propagate the last live comparison throughout the extended basic block. */ if (single_pred_p (bb)) { last_cmp = (struct comparison *) single_pred (bb)->aux; if (last_cmp) last_cmp_valid = last_cmp->inputs_valid; } for (insn = BB_HEAD (bb); insn; insn = next) { rtx src; next = (insn == BB_END (bb) ? NULL_RTX : NEXT_INSN (insn)); if (!NONDEBUG_INSN_P (insn)) continue; /* Compute the set of registers modified by this instruction. */ bitmap_clear (killed); df_simulate_find_defs (insn, killed); src = conforming_compare (insn); if (src) { enum machine_mode src_mode = GET_MODE (src); rtx eh_note = NULL; if (flag_non_call_exceptions) eh_note = find_reg_note (insn, REG_EH_REGION, NULL); if (!last_cmp_valid) goto dont_delete; /* Take care that it's in the same EH region. */ if (flag_non_call_exceptions && !rtx_equal_p (eh_note, last_cmp->eh_note)) goto dont_delete; /* Make sure the compare is redundant with the previous. */ if (!rtx_equal_p (last_cmp->in_a, XEXP (src, 0)) || !rtx_equal_p (last_cmp->in_b, XEXP (src, 1))) goto dont_delete; /* New mode must be compatible with the previous compare mode. */ { enum machine_mode new_mode = targetm.cc_modes_compatible (last_cmp->orig_mode, src_mode); if (new_mode == VOIDmode) goto dont_delete; if (new_mode != last_cmp->orig_mode) { rtx x, flags = gen_rtx_REG (src_mode, targetm.flags_regnum); /* Generate new comparison for substitution. */ x = gen_rtx_COMPARE (new_mode, XEXP (src, 0), XEXP (src, 1)); x = gen_rtx_SET (VOIDmode, flags, x); if (!validate_change (last_cmp->insn, &PATTERN (last_cmp->insn), x, false)) goto dont_delete; last_cmp->orig_mode = new_mode; } } /* All tests and substitutions succeeded! */ if (eh_note) need_purge = true; delete_insn (insn); continue; dont_delete: last_cmp = XCNEW (struct comparison); last_cmp->insn = insn; last_cmp->prev_clobber = last_clobber; last_cmp->in_a = XEXP (src, 0); last_cmp->in_b = XEXP (src, 1); last_cmp->eh_note = eh_note; last_cmp->orig_mode = src_mode; all_compares.safe_push (last_cmp); /* It's unusual, but be prepared for comparison patterns that also clobber an input, or perhaps a scratch. */ last_clobber = NULL; last_cmp_valid = true; } /* Notice if this instruction kills the flags register. */ else if (bitmap_bit_p (killed, targetm.flags_regnum)) { /* See if this insn could be the "clobber" that eliminates a future comparison. */ last_clobber = (arithmetic_flags_clobber_p (insn) ? insn : NULL); /* In either case, the previous compare is no longer valid. */ last_cmp = NULL; last_cmp_valid = false; continue; } /* Notice if this instruction uses the flags register. */ else if (last_cmp) find_flags_uses_in_insn (last_cmp, insn); /* Notice if any of the inputs to the comparison have changed. */ if (last_cmp_valid && (bitmap_bit_p (killed, REGNO (last_cmp->in_a)) || (REG_P (last_cmp->in_b) && bitmap_bit_p (killed, REGNO (last_cmp->in_b))))) last_cmp_valid = false; }
static unsigned HOST_WIDE_INT addr_object_size (struct object_size_info *osi, const_tree ptr, int object_size_type) { tree pt_var, pt_var_size = NULL_TREE, var_size, bytes; gcc_assert (TREE_CODE (ptr) == ADDR_EXPR); pt_var = TREE_OPERAND (ptr, 0); while (handled_component_p (pt_var)) pt_var = TREE_OPERAND (pt_var, 0); if (pt_var && TREE_CODE (pt_var) == MEM_REF) { unsigned HOST_WIDE_INT sz; if (!osi || (object_size_type & 1) != 0 || TREE_CODE (TREE_OPERAND (pt_var, 0)) != SSA_NAME) { sz = compute_builtin_object_size (TREE_OPERAND (pt_var, 0), object_size_type & ~1); } else { tree var = TREE_OPERAND (pt_var, 0); if (osi->pass == 0) collect_object_sizes_for (osi, var); if (bitmap_bit_p (computed[object_size_type], SSA_NAME_VERSION (var))) sz = object_sizes[object_size_type][SSA_NAME_VERSION (var)]; else sz = unknown[object_size_type]; } if (sz != unknown[object_size_type]) { double_int dsz = double_int::from_uhwi (sz) - mem_ref_offset (pt_var); if (dsz.is_negative ()) sz = 0; else if (dsz.fits_uhwi ()) sz = dsz.to_uhwi (); else sz = unknown[object_size_type]; } if (sz != unknown[object_size_type] && sz < offset_limit) pt_var_size = size_int (sz); } else if (pt_var && DECL_P (pt_var) && host_integerp (DECL_SIZE_UNIT (pt_var), 1) && (unsigned HOST_WIDE_INT) tree_low_cst (DECL_SIZE_UNIT (pt_var), 1) < offset_limit) pt_var_size = DECL_SIZE_UNIT (pt_var); else if (pt_var && TREE_CODE (pt_var) == STRING_CST && TYPE_SIZE_UNIT (TREE_TYPE (pt_var)) && host_integerp (TYPE_SIZE_UNIT (TREE_TYPE (pt_var)), 1) && (unsigned HOST_WIDE_INT) tree_low_cst (TYPE_SIZE_UNIT (TREE_TYPE (pt_var)), 1) < offset_limit) pt_var_size = TYPE_SIZE_UNIT (TREE_TYPE (pt_var)); else return unknown[object_size_type]; if (pt_var != TREE_OPERAND (ptr, 0)) { tree var; if (object_size_type & 1) { var = TREE_OPERAND (ptr, 0); while (var != pt_var && TREE_CODE (var) != BIT_FIELD_REF && TREE_CODE (var) != COMPONENT_REF && TREE_CODE (var) != ARRAY_REF && TREE_CODE (var) != ARRAY_RANGE_REF && TREE_CODE (var) != REALPART_EXPR && TREE_CODE (var) != IMAGPART_EXPR) var = TREE_OPERAND (var, 0); if (var != pt_var && TREE_CODE (var) == ARRAY_REF) var = TREE_OPERAND (var, 0); if (! TYPE_SIZE_UNIT (TREE_TYPE (var)) || ! host_integerp (TYPE_SIZE_UNIT (TREE_TYPE (var)), 1) || (pt_var_size && tree_int_cst_lt (pt_var_size, TYPE_SIZE_UNIT (TREE_TYPE (var))))) var = pt_var; else if (var != pt_var && TREE_CODE (pt_var) == MEM_REF) { tree v = var; /* For &X->fld, compute object size only if fld isn't the last field, as struct { int i; char c[1]; } is often used instead of flexible array member. */ while (v && v != pt_var) switch (TREE_CODE (v)) { case ARRAY_REF: if (TYPE_SIZE_UNIT (TREE_TYPE (TREE_OPERAND (v, 0))) && TREE_CODE (TREE_OPERAND (v, 1)) == INTEGER_CST) { tree domain = TYPE_DOMAIN (TREE_TYPE (TREE_OPERAND (v, 0))); if (domain && TYPE_MAX_VALUE (domain) && TREE_CODE (TYPE_MAX_VALUE (domain)) == INTEGER_CST && tree_int_cst_lt (TREE_OPERAND (v, 1), TYPE_MAX_VALUE (domain))) { v = NULL_TREE; break; } } v = TREE_OPERAND (v, 0); break; case REALPART_EXPR: case IMAGPART_EXPR: v = NULL_TREE; break; case COMPONENT_REF: if (TREE_CODE (TREE_TYPE (v)) != ARRAY_TYPE) { v = NULL_TREE; break; } while (v != pt_var && TREE_CODE (v) == COMPONENT_REF) if (TREE_CODE (TREE_TYPE (TREE_OPERAND (v, 0))) != UNION_TYPE && TREE_CODE (TREE_TYPE (TREE_OPERAND (v, 0))) != QUAL_UNION_TYPE) break; else v = TREE_OPERAND (v, 0); if (TREE_CODE (v) == COMPONENT_REF && TREE_CODE (TREE_TYPE (TREE_OPERAND (v, 0))) == RECORD_TYPE) { tree fld_chain = DECL_CHAIN (TREE_OPERAND (v, 1)); for (; fld_chain; fld_chain = DECL_CHAIN (fld_chain)) if (TREE_CODE (fld_chain) == FIELD_DECL) break; if (fld_chain) { v = NULL_TREE; break; } v = TREE_OPERAND (v, 0); } while (v != pt_var && TREE_CODE (v) == COMPONENT_REF) if (TREE_CODE (TREE_TYPE (TREE_OPERAND (v, 0))) != UNION_TYPE && TREE_CODE (TREE_TYPE (TREE_OPERAND (v, 0))) != QUAL_UNION_TYPE) break; else v = TREE_OPERAND (v, 0); if (v != pt_var) v = NULL_TREE; else v = pt_var; break; default: v = pt_var; break; } if (v == pt_var) var = pt_var; } } else var = pt_var; if (var != pt_var) var_size = TYPE_SIZE_UNIT (TREE_TYPE (var)); else if (!pt_var_size) return unknown[object_size_type]; else var_size = pt_var_size; bytes = compute_object_offset (TREE_OPERAND (ptr, 0), var); if (bytes != error_mark_node) { if (TREE_CODE (bytes) == INTEGER_CST && tree_int_cst_lt (var_size, bytes)) bytes = size_zero_node; else bytes = size_binop (MINUS_EXPR, var_size, bytes); } if (var != pt_var && pt_var_size && TREE_CODE (pt_var) == MEM_REF && bytes != error_mark_node) { tree bytes2 = compute_object_offset (TREE_OPERAND (ptr, 0), pt_var); if (bytes2 != error_mark_node) { if (TREE_CODE (bytes2) == INTEGER_CST && tree_int_cst_lt (pt_var_size, bytes2)) bytes2 = size_zero_node; else bytes2 = size_binop (MINUS_EXPR, pt_var_size, bytes2); bytes = size_binop (MIN_EXPR, bytes, bytes2); } } } else if (!pt_var_size) return unknown[object_size_type]; else bytes = pt_var_size; if (host_integerp (bytes, 1)) return tree_low_cst (bytes, 1); return unknown[object_size_type]; }
static inline bool bb_seen_p (basic_block bb) { return bitmap_bit_p (bb_seen, bb->index); }
static void initialize_uninitialized_regs (void) { basic_block bb; bitmap already_genned = BITMAP_ALLOC (NULL); if (optimize == 1) { df_live_add_problem (); df_live_set_all_dirty (); } df_analyze (); FOR_EACH_BB_FN (bb, cfun) { rtx_insn *insn; bitmap lr = DF_LR_IN (bb); bitmap ur = DF_LIVE_IN (bb); bitmap_clear (already_genned); FOR_BB_INSNS (bb, insn) { df_ref use; if (!NONDEBUG_INSN_P (insn)) continue; FOR_EACH_INSN_USE (use, insn) { unsigned int regno = DF_REF_REGNO (use); /* Only do this for the pseudos. */ if (regno < FIRST_PSEUDO_REGISTER) continue; /* Do not generate multiple moves for the same regno. This is common for sequences of subreg operations. They would be deleted during combine but there is no reason to churn the system. */ if (bitmap_bit_p (already_genned, regno)) continue; /* A use is MUST uninitialized if it reaches the top of the block from the inside of the block (the lr test) and no def for it reaches the top of the block from outside of the block (the ur test). */ if (bitmap_bit_p (lr, regno) && (!bitmap_bit_p (ur, regno))) { rtx_insn *move_insn; rtx reg = DF_REF_REAL_REG (use); bitmap_set_bit (already_genned, regno); start_sequence (); emit_move_insn (reg, CONST0_RTX (GET_MODE (reg))); move_insn = get_insns (); end_sequence (); emit_insn_before (move_insn, insn); if (dump_file) fprintf (dump_file, "adding initialization in %s of reg %d at in block %d for insn %d.\n", current_function_name (), regno, bb->index, INSN_UID (insn)); } }
/* The major function for aggressive pseudo coalescing of moves only if the both pseudos were spilled and not special reload pseudos. */ bool lra_coalesce (void) { basic_block bb; rtx mv, set, insn, next, *sorted_moves; int i, mv_num, sregno, dregno; int coalesced_moves; int max_regno = max_reg_num (); bitmap_head involved_insns_bitmap; timevar_push (TV_LRA_COALESCE); if (lra_dump_file != NULL) fprintf (lra_dump_file, "\n********** Pseudos coalescing #%d: **********\n\n", ++lra_coalesce_iter); first_coalesced_pseudo = XNEWVEC (int, max_regno); next_coalesced_pseudo = XNEWVEC (int, max_regno); for (i = 0; i < max_regno; i++) first_coalesced_pseudo[i] = next_coalesced_pseudo[i] = i; sorted_moves = XNEWVEC (rtx, get_max_uid ()); mv_num = 0; /* Collect moves. */ coalesced_moves = 0; FOR_EACH_BB (bb) { FOR_BB_INSNS_SAFE (bb, insn, next) if (INSN_P (insn) && (set = single_set (insn)) != NULL_RTX && REG_P (SET_DEST (set)) && REG_P (SET_SRC (set)) && (sregno = REGNO (SET_SRC (set))) >= FIRST_PSEUDO_REGISTER && (dregno = REGNO (SET_DEST (set))) >= FIRST_PSEUDO_REGISTER && mem_move_p (sregno, dregno) && coalescable_pseudo_p (sregno) && coalescable_pseudo_p (dregno) && ! side_effects_p (set) && !(lra_intersected_live_ranges_p (lra_reg_info[sregno].live_ranges, lra_reg_info[dregno].live_ranges))) sorted_moves[mv_num++] = insn; } qsort (sorted_moves, mv_num, sizeof (rtx), move_freq_compare_func); /* Coalesced copies, most frequently executed first. */ bitmap_initialize (&coalesced_pseudos_bitmap, ®_obstack); bitmap_initialize (&involved_insns_bitmap, ®_obstack); for (i = 0; i < mv_num; i++) { mv = sorted_moves[i]; set = single_set (mv); lra_assert (set != NULL && REG_P (SET_SRC (set)) && REG_P (SET_DEST (set))); sregno = REGNO (SET_SRC (set)); dregno = REGNO (SET_DEST (set)); if (first_coalesced_pseudo[sregno] == first_coalesced_pseudo[dregno]) { coalesced_moves++; if (lra_dump_file != NULL) fprintf (lra_dump_file, " Coalescing move %i:r%d-r%d (freq=%d)\n", INSN_UID (mv), sregno, dregno, BLOCK_FOR_INSN (mv)->frequency); /* We updated involved_insns_bitmap when doing the merge. */ } else if (!(lra_intersected_live_ranges_p (lra_reg_info[first_coalesced_pseudo[sregno]].live_ranges, lra_reg_info[first_coalesced_pseudo[dregno]].live_ranges))) { coalesced_moves++; if (lra_dump_file != NULL) fprintf (lra_dump_file, " Coalescing move %i:r%d(%d)-r%d(%d) (freq=%d)\n", INSN_UID (mv), sregno, ORIGINAL_REGNO (SET_SRC (set)), dregno, ORIGINAL_REGNO (SET_DEST (set)), BLOCK_FOR_INSN (mv)->frequency); bitmap_ior_into (&involved_insns_bitmap, &lra_reg_info[sregno].insn_bitmap); bitmap_ior_into (&involved_insns_bitmap, &lra_reg_info[dregno].insn_bitmap); merge_pseudos (sregno, dregno); } } bitmap_initialize (&used_pseudos_bitmap, ®_obstack); FOR_EACH_BB (bb) { update_live_info (df_get_live_in (bb)); update_live_info (df_get_live_out (bb)); FOR_BB_INSNS_SAFE (bb, insn, next) if (INSN_P (insn) && bitmap_bit_p (&involved_insns_bitmap, INSN_UID (insn))) { if (! substitute (&insn)) continue; lra_update_insn_regno_info (insn); if ((set = single_set (insn)) != NULL_RTX && set_noop_p (set)) { /* Coalesced move. */ if (lra_dump_file != NULL) fprintf (lra_dump_file, " Removing move %i (freq=%d)\n", INSN_UID (insn), BLOCK_FOR_INSN (insn)->frequency); lra_set_insn_deleted (insn); } } } bitmap_clear (&used_pseudos_bitmap); bitmap_clear (&involved_insns_bitmap); bitmap_clear (&coalesced_pseudos_bitmap); if (lra_dump_file != NULL && coalesced_moves != 0) fprintf (lra_dump_file, "Coalesced Moves = %d\n", coalesced_moves); free (sorted_moves); free (next_coalesced_pseudo); free (first_coalesced_pseudo); timevar_pop (TV_LRA_COALESCE); return coalesced_moves != 0; }
static inline bool already_processed_vertex_p (bitmap processed, int v) { return (bitmap_bit_p (processed, v) || !bitmap_bit_p (remaining_stmts, v)); }
static bool verify_use (basic_block bb, basic_block def_bb, use_operand_p use_p, tree stmt, bool check_abnormal, bool is_virtual, bitmap names_defined_in_bb) { bool err = false; tree ssa_name = USE_FROM_PTR (use_p); err = verify_ssa_name (ssa_name, is_virtual); if (!TREE_VISITED (ssa_name)) if (verify_imm_links (stderr, ssa_name)) err = true; TREE_VISITED (ssa_name) = 1; if (IS_EMPTY_STMT (SSA_NAME_DEF_STMT (ssa_name)) && default_def (SSA_NAME_VAR (ssa_name)) == ssa_name) ; /* Default definitions have empty statements. Nothing to do. */ else if (!def_bb) { error ("missing definition"); err = true; } else if (bb != def_bb && !dominated_by_p (CDI_DOMINATORS, bb, def_bb)) { error ("definition in block %i does not dominate use in block %i", def_bb->index, bb->index); err = true; } else if (bb == def_bb && names_defined_in_bb != NULL && !bitmap_bit_p (names_defined_in_bb, SSA_NAME_VERSION (ssa_name))) { error ("definition in block %i follows the use", def_bb->index); err = true; } if (check_abnormal && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (ssa_name)) { error ("SSA_NAME_OCCURS_IN_ABNORMAL_PHI should be set"); err = true; } /* Make sure the use is in an appropriate list by checking the previous element to make sure it's the same. */ if (use_p->prev == NULL) { error ("no immediate_use list"); err = true; } else { tree listvar ; if (use_p->prev->use == NULL) listvar = use_p->prev->stmt; else listvar = USE_FROM_PTR (use_p->prev); if (listvar != ssa_name) { error ("wrong immediate use list"); err = true; } } if (err) { fprintf (stderr, "for SSA_NAME: "); print_generic_expr (stderr, ssa_name, TDF_VOPS); fprintf (stderr, " in statement:\n"); print_generic_stmt (stderr, stmt, TDF_VOPS); } return err; }