static void add_test (rtx cond, basic_block bb, basic_block dest) { rtx seq, jump, label; enum machine_mode mode; rtx op0 = XEXP (cond, 0), op1 = XEXP (cond, 1); enum rtx_code code = GET_CODE (cond); mode = GET_MODE (XEXP (cond, 0)); if (mode == VOIDmode) mode = GET_MODE (XEXP (cond, 1)); start_sequence (); op0 = force_operand (op0, NULL_RTX); op1 = force_operand (op1, NULL_RTX); label = block_label (dest); do_compare_rtx_and_jump (op0, op1, code, 0, mode, NULL_RTX, NULL_RTX, label); jump = get_last_insn (); JUMP_LABEL (jump) = label; /* The jump is supposed to handle an unlikely special case. */ REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_BR_PROB, const0_rtx, REG_NOTES (jump)); LABEL_NUSES (label)++; seq = get_insns (); end_sequence (); emit_insn_after (seq, BB_END (bb)); }
static rtx gen_speculative_prefetch (rtx address, gcov_type delta, int write) { rtx tmp; rtx sequence; /* TODO: we do the prefetching for just one iteration ahead, which often is not enough. */ start_sequence (); if (offsettable_address_p (0, VOIDmode, address)) tmp = plus_constant (copy_rtx (address), delta); else { tmp = simplify_gen_binary (PLUS, Pmode, copy_rtx (address), GEN_INT (delta)); tmp = force_operand (tmp, NULL); } if (! (*insn_data[(int)CODE_FOR_prefetch].operand[0].predicate) (tmp, insn_data[(int)CODE_FOR_prefetch].operand[0].mode)) tmp = force_reg (Pmode, tmp); emit_insn (gen_prefetch (tmp, GEN_INT (write), GEN_INT (3))); sequence = get_insns (); end_sequence (); return sequence; }
static void ldap_pack_modop(SV *dest, SV *change) { if (change && SvOK(change)) { HV *hv; STRLEN offset1, offset2; if (!SvROK(change) || !(hv = (HV*)SvRV(change)) || (SvTYPE(hv) != SVt_PVHV)) croak("bad change description"); offset1 = start_sequence(dest); pack_enum(dest, SvIV(hv_fetchs_def_undef(hv, "operation"))); offset2 = start_sequence(dest); pack_string_utf8(dest, hv_fetchs_def_undef(hv, "attribute")); pack_set_of_string_utf8(dest, hv_fetchs_def_undef(hv, "values")); end_sequence(dest, offset2); end_sequence(dest, offset1); } }
void pop_sequence () { rtx sequence_first = get_insns (); end_sequence (); emit_insns (sequence_first); }
static void insert_value_copy_on_edge (edge e, int dest, tree src, source_location locus) { rtx seq, x; enum machine_mode dest_mode, src_mode; int unsignedp; tree var; if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Inserting a value copy on edge BB%d->BB%d : PART.%d = ", e->src->index, e->dest->index, dest); print_generic_expr (dump_file, src, TDF_SLIM); fprintf (dump_file, "\n"); } gcc_assert (SA.partition_to_pseudo[dest]); set_location_for_edge (e); /* If a locus is provided, override the default. */ if (locus) set_curr_insn_source_location (locus); start_sequence (); var = SSA_NAME_VAR (partition_to_var (SA.map, dest)); src_mode = TYPE_MODE (TREE_TYPE (src)); dest_mode = GET_MODE (SA.partition_to_pseudo[dest]); gcc_assert (src_mode == TYPE_MODE (TREE_TYPE (var))); gcc_assert (!REG_P (SA.partition_to_pseudo[dest]) || dest_mode == promote_decl_mode (var, &unsignedp)); if (src_mode != dest_mode) { x = expand_expr (src, NULL, src_mode, EXPAND_NORMAL); x = convert_modes (dest_mode, src_mode, x, unsignedp); } else if (src_mode == BLKmode) { x = SA.partition_to_pseudo[dest]; store_expr (src, x, 0, false); } else x = expand_expr (src, SA.partition_to_pseudo[dest], dest_mode, EXPAND_NORMAL); if (x != SA.partition_to_pseudo[dest]) emit_move_insn (SA.partition_to_pseudo[dest], x); seq = get_insns (); end_sequence (); insert_insn_on_edge (seq, e); }
/* Generate code for transformations 3 and 4 (with MODE and OPERATION, operands OP1 and OP2, result TARGET, at most SUB subtractions, and probability of taking the optimal path(s) PROB1 and PROB2). */ static rtx gen_mod_subtract (enum machine_mode mode, enum rtx_code operation, rtx target, rtx op1, rtx op2, int sub, int prob1, int prob2) { rtx tmp, tmp1, jump; rtx end_label = gen_label_rtx (); rtx sequence; int i; start_sequence (); if (!REG_P (op2)) { tmp = gen_reg_rtx (mode); emit_move_insn (tmp, copy_rtx (op2)); } else tmp = op2; emit_move_insn (target, copy_rtx (op1)); do_compare_rtx_and_jump (target, tmp, LTU, 0, mode, NULL_RTX, NULL_RTX, end_label); /* Add branch probability to jump we just created. */ jump = get_last_insn (); REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_BR_PROB, GEN_INT (prob1), REG_NOTES (jump)); for (i = 0; i < sub; i++) { tmp1 = expand_simple_binop (mode, MINUS, target, tmp, target, 0, OPTAB_WIDEN); if (tmp1 != target) emit_move_insn (target, tmp1); do_compare_rtx_and_jump (target, tmp, LTU, 0, mode, NULL_RTX, NULL_RTX, end_label); /* Add branch probability to jump we just created. */ jump = get_last_insn (); REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_BR_PROB, GEN_INT (prob2), REG_NOTES (jump)); } tmp1 = simplify_gen_binary (operation, mode, copy_rtx (target), copy_rtx (tmp)); tmp1 = force_operand (tmp1, target); if (tmp1 != target) emit_move_insn (target, tmp1); emit_label (end_label); sequence = get_insns (); end_sequence (); rebuild_jump_labels (sequence); return sequence; }
void pack_compare_request_args(SV *dest, SV *dn, SV *attribute, SV *value) { STRLEN offset1, offset2; offset1 = start_constructed(dest, ASN1_APPLICATION|ASN1_CONSTRUCTED, LDAP_OP_COMPARE_REQUEST); pack_string_utf8(dest, dn); offset2 = start_sequence(dest); pack_string_utf8(dest, attribute); pack_string_utf8(dest, value); end_sequence(dest, offset2); end_constructed(dest, offset1); }
static rtx insert_move_insn_before (rtx next_insn, rtx dest_reg, rtx src_reg) { rtx insns; start_sequence (); emit_move_insn (dest_reg, src_reg); insns = get_insns (); end_sequence (); emit_insn_before (insns, next_insn); return insns; }
void pack_modify_request_args(SV *dest, SV *dn, SV **args, I32 n) { STRLEN offset1, offset2; I32 i; offset1 = start_constructed(dest, ASN1_APPLICATION|ASN1_CONSTRUCTED, LDAP_OP_MODIFY_REQUEST); pack_string_utf8(dest, dn); offset2 = start_sequence(dest); for (i = 0; i < n; i++) ldap_pack_modop(dest, args[i]); end_sequence(dest, offset2); end_constructed(dest, offset1); }
/* Generate code for transformation 2 (with MODE and OPERATION, operands OP1 and OP2, result TARGET and probability of taking the optimal path PROB). */ static rtx gen_mod_pow2 (enum machine_mode mode, enum rtx_code operation, rtx target, rtx op1, rtx op2, int prob) { rtx tmp, tmp1, tmp2, tmp3, jump; rtx neq_label = gen_label_rtx (); rtx end_label = gen_label_rtx (); rtx sequence; start_sequence (); if (!REG_P (op2)) { tmp = gen_reg_rtx (mode); emit_move_insn (tmp, copy_rtx (op2)); } else tmp = op2; tmp1 = expand_simple_binop (mode, PLUS, tmp, constm1_rtx, NULL_RTX, 0, OPTAB_WIDEN); tmp2 = expand_simple_binop (mode, AND, tmp, tmp1, NULL_RTX, 0, OPTAB_WIDEN); do_compare_rtx_and_jump (tmp2, const0_rtx, NE, 0, mode, NULL_RTX, NULL_RTX, neq_label); /* Add branch probability to jump we just created. */ jump = get_last_insn (); REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_BR_PROB, GEN_INT (REG_BR_PROB_BASE - prob), REG_NOTES (jump)); tmp3 = expand_simple_binop (mode, AND, op1, tmp1, target, 0, OPTAB_WIDEN); if (tmp3 != target) emit_move_insn (copy_rtx (target), tmp3); emit_jump_insn (gen_jump (end_label)); emit_barrier (); emit_label (neq_label); tmp1 = simplify_gen_binary (operation, mode, copy_rtx (op1), copy_rtx (tmp)); tmp1 = force_operand (tmp1, target); if (tmp1 != target) emit_move_insn (target, tmp1); emit_label (end_label); sequence = get_insns (); end_sequence (); rebuild_jump_labels (sequence); return sequence; }
/* Generate code for transformation 1 (with MODE and OPERATION, operands OP1 and OP2, whose value is expected to be VALUE, result TARGET and probability of taking the optimal path PROB). */ static rtx gen_divmod_fixed_value (enum machine_mode mode, enum rtx_code operation, rtx target, rtx op1, rtx op2, gcov_type value, int prob) { rtx tmp, tmp1, jump; rtx neq_label = gen_label_rtx (); rtx end_label = gen_label_rtx (); rtx sequence; start_sequence (); if (!REG_P (op2)) { tmp = gen_reg_rtx (mode); emit_move_insn (tmp, copy_rtx (op2)); } else tmp = op2; do_compare_rtx_and_jump (tmp, GEN_INT (value), NE, 0, mode, NULL_RTX, NULL_RTX, neq_label); /* Add branch probability to jump we just created. */ jump = get_last_insn (); REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_BR_PROB, GEN_INT (REG_BR_PROB_BASE - prob), REG_NOTES (jump)); tmp1 = simplify_gen_binary (operation, mode, copy_rtx (op1), GEN_INT (value)); tmp1 = force_operand (tmp1, target); if (tmp1 != target) emit_move_insn (copy_rtx (target), copy_rtx (tmp1)); emit_jump_insn (gen_jump (end_label)); emit_barrier (); emit_label (neq_label); tmp1 = simplify_gen_binary (operation, mode, copy_rtx (op1), copy_rtx (tmp)); tmp1 = force_operand (tmp1, target); if (tmp1 != target) emit_move_insn (copy_rtx (target), copy_rtx (tmp1)); emit_label (end_label); sequence = get_insns (); end_sequence (); rebuild_jump_labels (sequence); return sequence; }
rtx compare_and_jump_seq (rtx op0, rtx op1, enum rtx_code comp, rtx label, int prob, rtx cinsn) { rtx seq, jump, cond; enum machine_mode mode; mode = GET_MODE (op0); if (mode == VOIDmode) mode = GET_MODE (op1); start_sequence (); if (GET_MODE_CLASS (mode) == MODE_CC) { /* A hack -- there seems to be no easy generic way how to make a conditional jump from a ccmode comparison. */ gcc_assert (cinsn); cond = XEXP (SET_SRC (pc_set (cinsn)), 0); gcc_assert (GET_CODE (cond) == comp); gcc_assert (rtx_equal_p (op0, XEXP (cond, 0))); gcc_assert (rtx_equal_p (op1, XEXP (cond, 1))); emit_jump_insn (copy_insn (PATTERN (cinsn))); jump = get_last_insn (); gcc_assert (JUMP_P (jump)); JUMP_LABEL (jump) = JUMP_LABEL (cinsn); LABEL_NUSES (JUMP_LABEL (jump))++; redirect_jump (jump, label, 0); } else { gcc_assert (!cinsn); op0 = force_operand (op0, NULL_RTX); op1 = force_operand (op1, NULL_RTX); do_compare_rtx_and_jump (op0, op1, comp, 0, mode, NULL_RTX, NULL_RTX, label, -1); jump = get_last_insn (); gcc_assert (JUMP_P (jump)); JUMP_LABEL (jump) = label; LABEL_NUSES (label)++; } add_reg_note (jump, REG_BR_PROB, GEN_INT (prob)); seq = get_insns (); end_sequence (); return seq; }
// seg000:0FBD void __pascal far check_the_end() { if (next_room != 0 && next_room != drawn_room) { drawn_room = next_room; load_room_links(); if (current_level == 14 && drawn_room == 5) { // Special event: end of game end_sequence(); } different_room = 1; loadkid(); anim_tile_modif(); start_chompers(); check_fall_flo(); check_shadow(); } }
unsigned int emit_initial_value_sets (void) { struct initial_value_struct *ivs = crtl->hard_reg_initial_vals; int i; rtx seq; if (ivs == 0) return 0; start_sequence (); for (i = 0; i < ivs->num_entries; i++) emit_move_insn (ivs->entries[i].pseudo, ivs->entries[i].hard_reg); seq = get_insns (); end_sequence (); emit_insn_at_entry (seq); return 0; }
static bool commit_mode_sets (struct edge_list *edge_list, int e, struct bb_info *info) { bool need_commit = false; for (int ed = NUM_EDGES (edge_list) - 1; ed >= 0; ed--) { edge eg = INDEX_EDGE (edge_list, ed); int mode; if ((mode = (int)(intptr_t)(eg->aux)) != -1) { HARD_REG_SET live_at_edge; basic_block src_bb = eg->src; int cur_mode = info[src_bb->index].mode_out; rtx_insn *mode_set; REG_SET_TO_HARD_REG_SET (live_at_edge, df_get_live_out (src_bb)); rtl_profile_for_edge (eg); start_sequence (); targetm.mode_switching.emit (e, mode, cur_mode, live_at_edge); mode_set = get_insns (); end_sequence (); default_rtl_profile (); /* Do not bother to insert empty sequence. */ if (mode_set == NULL) continue; /* We should not get an abnormal edge here. */ gcc_assert (! (eg->flags & EDGE_ABNORMAL)); need_commit = true; insert_insn_on_edge (mode_set, eg); } } return need_commit; }
static inline rtx emit_partition_copy (rtx dest, rtx src, int unsignedsrcp, tree sizeexp) { rtx seq; start_sequence (); if (GET_MODE (src) != VOIDmode && GET_MODE (src) != GET_MODE (dest)) src = convert_to_mode (GET_MODE (dest), src, unsignedsrcp); if (GET_MODE (src) == BLKmode) { gcc_assert (GET_MODE (dest) == BLKmode); emit_block_move (dest, src, expr_size (sizeexp), BLOCK_OP_NORMAL); } else emit_move_insn (dest, src); seq = get_insns (); end_sequence (); return seq; }
void pack_modify_request_ref(SV *dest, HV *hv) { STRLEN offset1, offset2; SV *changes; offset1 = start_constructed(dest, ASN1_APPLICATION|ASN1_CONSTRUCTED, LDAP_OP_MODIFY_REQUEST); pack_string_utf8(dest, hv_fetchs_def_undef(hv, "dn")); offset2 = start_sequence(dest); changes = hv_fetchs_def_undef(hv, "changes"); if (changes && SvOK(changes)) { AV *av; I32 i, len; if (SvROK(changes) && (av = (AV*)SvRV(changes)) && (SvTYPE(av) == SVt_PVAV)) { len = av_len(av); for (i = 0; i <= len; i++) ldap_pack_modop(dest, av_fetch_def_undef(av, i)); } else ldap_pack_modop(dest, changes); } end_sequence(dest, offset2); end_constructed(dest, offset1); }
static int optimize_mode_switching (void) { int e; basic_block bb; bool need_commit = false; static const int num_modes[] = NUM_MODES_FOR_MODE_SWITCHING; #define N_ENTITIES ARRAY_SIZE (num_modes) int entity_map[N_ENTITIES]; struct bb_info *bb_info[N_ENTITIES]; int i, j; int n_entities = 0; int max_num_modes = 0; bool emitted ATTRIBUTE_UNUSED = false; basic_block post_entry = 0; basic_block pre_exit = 0; struct edge_list *edge_list = 0; /* These bitmaps are used for the LCM algorithm. */ sbitmap *kill, *del, *insert, *antic, *transp, *comp; sbitmap *avin, *avout; for (e = N_ENTITIES - 1; e >= 0; e--) if (OPTIMIZE_MODE_SWITCHING (e)) { int entry_exit_extra = 0; /* Create the list of segments within each basic block. If NORMAL_MODE is defined, allow for two extra blocks split from the entry and exit block. */ if (targetm.mode_switching.entry && targetm.mode_switching.exit) entry_exit_extra = 3; bb_info[n_entities] = XCNEWVEC (struct bb_info, last_basic_block_for_fn (cfun) + entry_exit_extra); entity_map[n_entities++] = e; if (num_modes[e] > max_num_modes) max_num_modes = num_modes[e]; } if (! n_entities) return 0; /* Make sure if MODE_ENTRY is defined MODE_EXIT is defined. */ gcc_assert ((targetm.mode_switching.entry && targetm.mode_switching.exit) || (!targetm.mode_switching.entry && !targetm.mode_switching.exit)); if (targetm.mode_switching.entry && targetm.mode_switching.exit) { /* Split the edge from the entry block, so that we can note that there NORMAL_MODE is supplied. */ post_entry = split_edge (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))); pre_exit = create_pre_exit (n_entities, entity_map, num_modes); } df_analyze (); /* Create the bitmap vectors. */ antic = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), n_entities * max_num_modes); transp = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), n_entities * max_num_modes); comp = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), n_entities * max_num_modes); avin = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), n_entities * max_num_modes); avout = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), n_entities * max_num_modes); kill = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), n_entities * max_num_modes); bitmap_vector_ones (transp, last_basic_block_for_fn (cfun)); bitmap_vector_clear (antic, last_basic_block_for_fn (cfun)); bitmap_vector_clear (comp, last_basic_block_for_fn (cfun)); for (j = n_entities - 1; j >= 0; j--) { int e = entity_map[j]; int no_mode = num_modes[e]; struct bb_info *info = bb_info[j]; rtx_insn *insn; /* Determine what the first use (if any) need for a mode of entity E is. This will be the mode that is anticipatable for this block. Also compute the initial transparency settings. */ FOR_EACH_BB_FN (bb, cfun) { struct seginfo *ptr; int last_mode = no_mode; bool any_set_required = false; HARD_REG_SET live_now; info[bb->index].mode_out = info[bb->index].mode_in = no_mode; REG_SET_TO_HARD_REG_SET (live_now, df_get_live_in (bb)); /* Pretend the mode is clobbered across abnormal edges. */ { edge_iterator ei; edge eg; FOR_EACH_EDGE (eg, ei, bb->preds) if (eg->flags & EDGE_COMPLEX) break; if (eg) { rtx_insn *ins_pos = BB_HEAD (bb); if (LABEL_P (ins_pos)) ins_pos = NEXT_INSN (ins_pos); gcc_assert (NOTE_INSN_BASIC_BLOCK_P (ins_pos)); if (ins_pos != BB_END (bb)) ins_pos = NEXT_INSN (ins_pos); ptr = new_seginfo (no_mode, ins_pos, bb->index, live_now); add_seginfo (info + bb->index, ptr); for (i = 0; i < no_mode; i++) clear_mode_bit (transp[bb->index], j, i); } } FOR_BB_INSNS (bb, insn) { if (INSN_P (insn)) { int mode = targetm.mode_switching.needed (e, insn); rtx link; if (mode != no_mode && mode != last_mode) { any_set_required = true; last_mode = mode; ptr = new_seginfo (mode, insn, bb->index, live_now); add_seginfo (info + bb->index, ptr); for (i = 0; i < no_mode; i++) clear_mode_bit (transp[bb->index], j, i); } if (targetm.mode_switching.after) last_mode = targetm.mode_switching.after (e, last_mode, insn); /* Update LIVE_NOW. */ for (link = REG_NOTES (insn); link; link = XEXP (link, 1)) if (REG_NOTE_KIND (link) == REG_DEAD) reg_dies (XEXP (link, 0), &live_now); note_stores (PATTERN (insn), reg_becomes_live, &live_now); for (link = REG_NOTES (insn); link; link = XEXP (link, 1)) if (REG_NOTE_KIND (link) == REG_UNUSED) reg_dies (XEXP (link, 0), &live_now); } } info[bb->index].computing = last_mode; /* Check for blocks without ANY mode requirements. N.B. because of MODE_AFTER, last_mode might still be different from no_mode, in which case we need to mark the block as nontransparent. */ if (!any_set_required) { ptr = new_seginfo (no_mode, BB_END (bb), bb->index, live_now); add_seginfo (info + bb->index, ptr); if (last_mode != no_mode) for (i = 0; i < no_mode; i++) clear_mode_bit (transp[bb->index], j, i); } } if (targetm.mode_switching.entry && targetm.mode_switching.exit) { int mode = targetm.mode_switching.entry (e); info[post_entry->index].mode_out = info[post_entry->index].mode_in = no_mode; if (pre_exit) { info[pre_exit->index].mode_out = info[pre_exit->index].mode_in = no_mode; } if (mode != no_mode) { bb = post_entry; /* By always making this nontransparent, we save an extra check in make_preds_opaque. We also need this to avoid confusing pre_edge_lcm when antic is cleared but transp and comp are set. */ for (i = 0; i < no_mode; i++) clear_mode_bit (transp[bb->index], j, i); /* Insert a fake computing definition of MODE into entry blocks which compute no mode. This represents the mode on entry. */ info[bb->index].computing = mode; if (pre_exit) info[pre_exit->index].seginfo->mode = targetm.mode_switching.exit (e); } } /* Set the anticipatable and computing arrays. */ for (i = 0; i < no_mode; i++) { int m = targetm.mode_switching.priority (entity_map[j], i); FOR_EACH_BB_FN (bb, cfun) { if (info[bb->index].seginfo->mode == m) set_mode_bit (antic[bb->index], j, m); if (info[bb->index].computing == m) set_mode_bit (comp[bb->index], j, m); } } } /* Calculate the optimal locations for the placement mode switches to modes with priority I. */ FOR_EACH_BB_FN (bb, cfun) bitmap_not (kill[bb->index], transp[bb->index]); edge_list = pre_edge_lcm_avs (n_entities * max_num_modes, transp, comp, antic, kill, avin, avout, &insert, &del); for (j = n_entities - 1; j >= 0; j--) { int no_mode = num_modes[entity_map[j]]; /* Insert all mode sets that have been inserted by lcm. */ for (int ed = NUM_EDGES (edge_list) - 1; ed >= 0; ed--) { edge eg = INDEX_EDGE (edge_list, ed); eg->aux = (void *)(intptr_t)-1; for (i = 0; i < no_mode; i++) { int m = targetm.mode_switching.priority (entity_map[j], i); if (mode_bit_p (insert[ed], j, m)) { eg->aux = (void *)(intptr_t)m; break; } } } FOR_EACH_BB_FN (bb, cfun) { struct bb_info *info = bb_info[j]; int last_mode = no_mode; /* intialize mode in availability for bb. */ for (i = 0; i < no_mode; i++) if (mode_bit_p (avout[bb->index], j, i)) { if (last_mode == no_mode) last_mode = i; if (last_mode != i) { last_mode = no_mode; break; } } info[bb->index].mode_out = last_mode; /* intialize mode out availability for bb. */ last_mode = no_mode; for (i = 0; i < no_mode; i++) if (mode_bit_p (avin[bb->index], j, i)) { if (last_mode == no_mode) last_mode = i; if (last_mode != i) { last_mode = no_mode; break; } } info[bb->index].mode_in = last_mode; for (i = 0; i < no_mode; i++) if (mode_bit_p (del[bb->index], j, i)) info[bb->index].seginfo->mode = no_mode; } /* Now output the remaining mode sets in all the segments. */ /* In case there was no mode inserted. the mode information on the edge might not be complete. Update mode info on edges and commit pending mode sets. */ need_commit |= commit_mode_sets (edge_list, entity_map[j], bb_info[j]); /* Reset modes for next entity. */ clear_aux_for_edges (); FOR_EACH_BB_FN (bb, cfun) { struct seginfo *ptr, *next; int cur_mode = bb_info[j][bb->index].mode_in; for (ptr = bb_info[j][bb->index].seginfo; ptr; ptr = next) { next = ptr->next; if (ptr->mode != no_mode) { rtx_insn *mode_set; rtl_profile_for_bb (bb); start_sequence (); targetm.mode_switching.emit (entity_map[j], ptr->mode, cur_mode, ptr->regs_live); mode_set = get_insns (); end_sequence (); /* modes kill each other inside a basic block. */ cur_mode = ptr->mode; /* Insert MODE_SET only if it is nonempty. */ if (mode_set != NULL_RTX) { emitted = true; if (NOTE_INSN_BASIC_BLOCK_P (ptr->insn_ptr)) /* We need to emit the insns in a FIFO-like manner, i.e. the first to be emitted at our insertion point ends up first in the instruction steam. Because we made sure that NOTE_INSN_BASIC_BLOCK is only used for initially empty basic blocks, we can achieve this by appending at the end of the block. */ emit_insn_after (mode_set, BB_END (NOTE_BASIC_BLOCK (ptr->insn_ptr))); else emit_insn_before (mode_set, ptr->insn_ptr); } default_rtl_profile (); } free (ptr); } } free (bb_info[j]); } free_edge_list (edge_list); /* Finished. Free up all the things we've allocated. */ sbitmap_vector_free (del); sbitmap_vector_free (insert); sbitmap_vector_free (kill); sbitmap_vector_free (antic); sbitmap_vector_free (transp); sbitmap_vector_free (comp); sbitmap_vector_free (avin); sbitmap_vector_free (avout); if (need_commit) commit_edge_insertions (); if (targetm.mode_switching.entry && targetm.mode_switching.exit) cleanup_cfg (CLEANUP_NO_INSN_DEL); else if (!need_commit && !emitted) return 0; return 1; }
rtx fr30_move_double (rtx * operands) { rtx src = operands[1]; rtx dest = operands[0]; enum rtx_code src_code = GET_CODE (src); enum rtx_code dest_code = GET_CODE (dest); enum machine_mode mode = GET_MODE (dest); rtx val; start_sequence (); if (dest_code == REG) { if (src_code == REG) { int reverse = (REGNO (dest) == REGNO (src) + 1); /* We normally copy the low-numbered register first. However, if the first register of operand 0 is the same as the second register of operand 1, we must copy in the opposite order. */ emit_insn (gen_rtx_SET (VOIDmode, operand_subword (dest, reverse, TRUE, mode), operand_subword (src, reverse, TRUE, mode))); emit_insn (gen_rtx_SET (VOIDmode, operand_subword (dest, !reverse, TRUE, mode), operand_subword (src, !reverse, TRUE, mode))); } else if (src_code == MEM) { rtx addr = XEXP (src, 0); int dregno = REGNO (dest); rtx dest0 = operand_subword (dest, 0, TRUE, mode);; rtx dest1 = operand_subword (dest, 1, TRUE, mode);; rtx new_mem; gcc_assert (GET_CODE (addr) == REG); /* Copy the address before clobbering it. See PR 34174. */ emit_insn (gen_rtx_SET (SImode, dest1, addr)); emit_insn (gen_rtx_SET (VOIDmode, dest0, adjust_address (src, SImode, 0))); emit_insn (gen_rtx_SET (SImode, dest1, plus_constant (dest1, UNITS_PER_WORD))); new_mem = gen_rtx_MEM (SImode, dest1); MEM_COPY_ATTRIBUTES (new_mem, src); emit_insn (gen_rtx_SET (VOIDmode, dest1, new_mem)); } else if (src_code == CONST_INT || src_code == CONST_DOUBLE) { rtx words[2]; split_double (src, &words[0], &words[1]); emit_insn (gen_rtx_SET (VOIDmode, operand_subword (dest, 0, TRUE, mode), words[0])); emit_insn (gen_rtx_SET (VOIDmode, operand_subword (dest, 1, TRUE, mode), words[1])); } } else if (src_code == REG && dest_code == MEM) { rtx addr = XEXP (dest, 0); rtx src0; rtx src1; gcc_assert (GET_CODE (addr) == REG); src0 = operand_subword (src, 0, TRUE, mode); src1 = operand_subword (src, 1, TRUE, mode); emit_move_insn (adjust_address (dest, SImode, 0), src0); if (REGNO (addr) == STACK_POINTER_REGNUM || REGNO (addr) == FRAME_POINTER_REGNUM) emit_insn (gen_rtx_SET (VOIDmode, adjust_address (dest, SImode, UNITS_PER_WORD), src1)); else { rtx new_mem; rtx scratch_reg_r0 = gen_rtx_REG (SImode, 0); /* We need a scratch register to hold the value of 'address + 4'. We use r0 for this purpose. It is used for example for long jumps and is already marked to not be used by normal register allocation. */ emit_insn (gen_movsi_internal (scratch_reg_r0, addr)); emit_insn (gen_addsi_small_int (scratch_reg_r0, scratch_reg_r0, GEN_INT (UNITS_PER_WORD))); new_mem = gen_rtx_MEM (SImode, scratch_reg_r0); MEM_COPY_ATTRIBUTES (new_mem, dest); emit_move_insn (new_mem, src1); emit_insn (gen_blockage ()); } } else /* This should have been prevented by the constraints on movdi_insn. */ gcc_unreachable (); val = get_insns (); end_sequence (); return val; }
rtx fr30_move_double (rtx * operands) { rtx src = operands[1]; rtx dest = operands[0]; enum rtx_code src_code = GET_CODE (src); enum rtx_code dest_code = GET_CODE (dest); enum machine_mode mode = GET_MODE (dest); rtx val; start_sequence (); if (dest_code == REG) { if (src_code == REG) { int reverse = (REGNO (dest) == REGNO (src) + 1); /* We normally copy the low-numbered register first. However, if the first register of operand 0 is the same as the second register of operand 1, we must copy in the opposite order. */ emit_insn (gen_rtx_SET (VOIDmode, operand_subword (dest, reverse, TRUE, mode), operand_subword (src, reverse, TRUE, mode))); emit_insn (gen_rtx_SET (VOIDmode, operand_subword (dest, !reverse, TRUE, mode), operand_subword (src, !reverse, TRUE, mode))); } else if (src_code == MEM) { rtx addr = XEXP (src, 0); int dregno = REGNO (dest); rtx dest0; rtx dest1; rtx new_mem; /* If the high-address word is used in the address, we must load it last. Otherwise, load it first. */ int reverse = (refers_to_regno_p (dregno, dregno + 1, addr, 0) != 0); gcc_assert (GET_CODE (addr) == REG); dest0 = operand_subword (dest, reverse, TRUE, mode); dest1 = operand_subword (dest, !reverse, TRUE, mode); if (reverse) { emit_insn (gen_rtx_SET (VOIDmode, dest1, adjust_address (src, SImode, 0))); emit_insn (gen_rtx_SET (SImode, dest0, gen_rtx_REG (SImode, REGNO (addr)))); emit_insn (gen_rtx_SET (SImode, dest0, plus_constant (dest0, UNITS_PER_WORD))); new_mem = gen_rtx_MEM (SImode, dest0); MEM_COPY_ATTRIBUTES (new_mem, src); emit_insn (gen_rtx_SET (VOIDmode, dest0, new_mem)); } else { emit_insn (gen_rtx_SET (VOIDmode, dest0, adjust_address (src, SImode, 0))); emit_insn (gen_rtx_SET (SImode, dest1, gen_rtx_REG (SImode, REGNO (addr)))); emit_insn (gen_rtx_SET (SImode, dest1, plus_constant (dest1, UNITS_PER_WORD))); new_mem = gen_rtx_MEM (SImode, dest1); MEM_COPY_ATTRIBUTES (new_mem, src); emit_insn (gen_rtx_SET (VOIDmode, dest1, new_mem)); } } else if (src_code == CONST_INT || src_code == CONST_DOUBLE) { rtx words[2]; split_double (src, &words[0], &words[1]); emit_insn (gen_rtx_SET (VOIDmode, operand_subword (dest, 0, TRUE, mode), words[0])); emit_insn (gen_rtx_SET (VOIDmode, operand_subword (dest, 1, TRUE, mode), words[1])); } } else if (src_code == REG && dest_code == MEM) { rtx addr = XEXP (dest, 0); rtx src0; rtx src1; gcc_assert (GET_CODE (addr) == REG); src0 = operand_subword (src, 0, TRUE, mode); src1 = operand_subword (src, 1, TRUE, mode); emit_insn (gen_rtx_SET (VOIDmode, adjust_address (dest, SImode, 0), src0)); if (REGNO (addr) == STACK_POINTER_REGNUM || REGNO (addr) == FRAME_POINTER_REGNUM) emit_insn (gen_rtx_SET (VOIDmode, adjust_address (dest, SImode, UNITS_PER_WORD), src1)); else { rtx new_mem; /* We need a scratch register to hold the value of 'address + 4'. We ought to allow gcc to find one for us, but for now, just push one of the source registers. */ emit_insn (gen_movsi_push (src0)); emit_insn (gen_movsi_internal (src0, addr)); emit_insn (gen_addsi_small_int (src0, src0, GEN_INT (UNITS_PER_WORD))); new_mem = gen_rtx_MEM (SImode, src0); MEM_COPY_ATTRIBUTES (new_mem, dest); emit_insn (gen_rtx_SET (VOIDmode, new_mem, src1)); emit_insn (gen_movsi_pop (src0)); } } else /* This should have been prevented by the constraints on movdi_insn. */ gcc_unreachable (); val = get_insns (); end_sequence (); return val; }
void init_caller_save () { char *first_obj = (char *) oballoc (0); rtx addr_reg; int offset; rtx address; int i, j; /* First find all the registers that we need to deal with and all the modes that they can have. If we can't find a mode to use, we can't have the register live over calls. */ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) { if (call_used_regs[i] && ! call_fixed_regs[i]) { for (j = 1; j <= MOVE_MAX / UNITS_PER_WORD; j++) { regno_save_mode[i][j] = choose_hard_reg_mode (i, j); if (regno_save_mode[i][j] == VOIDmode && j == 1) { call_fixed_regs[i] = 1; SET_HARD_REG_BIT (call_fixed_reg_set, i); } } } else regno_save_mode[i][1] = VOIDmode; } /* The following code tries to approximate the conditions under which we can easily save and restore a register without scratch registers or other complexities. It will usually work, except under conditions where the validity of an insn operand is dependent on the address offset. No such cases are currently known. We first find a typical offset from some BASE_REG_CLASS register. This address is chosen by finding the first register in the class and by finding the smallest power of two that is a valid offset from that register in every mode we will use to save registers. */ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (TEST_HARD_REG_BIT (reg_class_contents[(int) BASE_REG_CLASS], i)) break; if (i == FIRST_PSEUDO_REGISTER) abort (); addr_reg = gen_rtx_REG (Pmode, i); for (offset = 1 << (HOST_BITS_PER_INT / 2); offset; offset >>= 1) { address = gen_rtx_PLUS (Pmode, addr_reg, GEN_INT (offset)); for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) if (regno_save_mode[i][1] != VOIDmode && ! strict_memory_address_p (regno_save_mode[i][1], address)) break; if (i == FIRST_PSEUDO_REGISTER) break; } /* If we didn't find a valid address, we must use register indirect. */ if (offset == 0) address = addr_reg; /* Next we try to form an insn to save and restore the register. We see if such an insn is recognized and meets its constraints. */ start_sequence (); for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) for (j = 1; j <= MOVE_MAX / UNITS_PER_WORD; j++) if (regno_save_mode[i][j] != VOIDmode) { rtx mem = gen_rtx_MEM (regno_save_mode[i][j], address); rtx reg = gen_rtx_REG (regno_save_mode[i][j], i); rtx savepat = gen_rtx_SET (VOIDmode, mem, reg); rtx restpat = gen_rtx_SET (VOIDmode, reg, mem); rtx saveinsn = emit_insn (savepat); rtx restinsn = emit_insn (restpat); int ok; reg_save_code[i][j] = recog_memoized (saveinsn); reg_restore_code[i][j] = recog_memoized (restinsn); /* Now extract both insns and see if we can meet their constraints. */ ok = (reg_save_code[i][j] != -1 && reg_restore_code[i][j] != -1); if (ok) { insn_extract (saveinsn); ok = constrain_operands (reg_save_code[i][j], 1); insn_extract (restinsn); ok &= constrain_operands (reg_restore_code[i][j], 1); } if (! ok) { regno_save_mode[i][j] = VOIDmode; if (j == 1) { call_fixed_regs[i] = 1; SET_HARD_REG_BIT (call_fixed_reg_set, i); } } } end_sequence (); obfree (first_obj); }
static void doloop_modify (struct loop *loop, struct niter_desc *desc, rtx doloop_seq, rtx condition, rtx count) { rtx counter_reg; rtx tmp, noloop = NULL_RTX; rtx sequence; rtx jump_insn; rtx jump_label; int nonneg = 0, irr; bool increment_count; basic_block loop_end = desc->out_edge->src; enum machine_mode mode; jump_insn = BB_END (loop_end); if (dump_file) { fprintf (dump_file, "Doloop: Inserting doloop pattern ("); if (desc->const_iter) fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC, desc->niter); else fputs ("runtime", dump_file); fputs (" iterations).\n", dump_file); } /* Discard original jump to continue loop. The original compare result may still be live, so it cannot be discarded explicitly. */ delete_insn (jump_insn); counter_reg = XEXP (condition, 0); if (GET_CODE (counter_reg) == PLUS) counter_reg = XEXP (counter_reg, 0); mode = GET_MODE (counter_reg); increment_count = false; switch (GET_CODE (condition)) { case NE: /* Currently only NE tests against zero and one are supported. */ if (XEXP (condition, 1) == const1_rtx) { increment_count = true; noloop = const1_rtx; } else if (XEXP (condition, 1) == const0_rtx) noloop = const0_rtx; else abort (); break; case GE: /* Currently only GE tests against zero are supported. */ if (XEXP (condition, 1) != const0_rtx) abort (); noloop = constm1_rtx; /* The iteration count does not need incrementing for a GE test. */ increment_count = false; /* Determine if the iteration counter will be non-negative. Note that the maximum value loaded is iterations_max - 1. */ if (desc->niter_max <= ((unsigned HOST_WIDEST_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))) nonneg = 1; break; /* Abort if an invalid doloop pattern has been generated. */ default: abort (); } if (increment_count) count = simplify_gen_binary (PLUS, mode, count, const1_rtx); /* Insert initialization of the count register into the loop header. */ start_sequence (); tmp = force_operand (count, counter_reg); convert_move (counter_reg, tmp, 1); sequence = get_insns (); end_sequence (); emit_insn_after (sequence, BB_END (loop_preheader_edge (loop)->src)); if (desc->noloop_assumptions) { rtx ass = copy_rtx (desc->noloop_assumptions); basic_block preheader = loop_preheader_edge (loop)->src; basic_block set_zero = loop_split_edge_with (loop_preheader_edge (loop), NULL_RTX); basic_block new_preheader = loop_split_edge_with (loop_preheader_edge (loop), NULL_RTX); basic_block bb; edge te; gcov_type cnt; /* Expand the condition testing the assumptions and if it does not pass, reset the count register to 0. */ add_test (XEXP (ass, 0), preheader, set_zero); EDGE_SUCC (preheader, 0)->flags &= ~EDGE_FALLTHRU; cnt = EDGE_SUCC (preheader, 0)->count; EDGE_SUCC (preheader, 0)->probability = 0; EDGE_SUCC (preheader, 0)->count = 0; irr = EDGE_SUCC (preheader, 0)->flags & EDGE_IRREDUCIBLE_LOOP; te = make_edge (preheader, new_preheader, EDGE_FALLTHRU | irr); te->probability = REG_BR_PROB_BASE; te->count = cnt; set_immediate_dominator (CDI_DOMINATORS, new_preheader, preheader); set_zero->count = 0; set_zero->frequency = 0; for (ass = XEXP (ass, 1); ass; ass = XEXP (ass, 1)) { bb = loop_split_edge_with (te, NULL_RTX); te = EDGE_SUCC (bb, 0); add_test (XEXP (ass, 0), bb, set_zero); make_edge (bb, set_zero, irr); } start_sequence (); convert_move (counter_reg, noloop, 0); sequence = get_insns (); end_sequence (); emit_insn_after (sequence, BB_END (set_zero)); } /* Some targets (eg, C4x) need to initialize special looping registers. */ #ifdef HAVE_doloop_begin { rtx init; unsigned level = get_loop_level (loop) + 1; init = gen_doloop_begin (counter_reg, desc->const_iter ? desc->niter_expr : const0_rtx, desc->niter_max, GEN_INT (level)); if (init) { start_sequence (); emit_insn (init); sequence = get_insns (); end_sequence (); emit_insn_after (sequence, BB_END (loop_preheader_edge (loop)->src)); } } #endif /* Insert the new low-overhead looping insn. */ emit_jump_insn_after (doloop_seq, BB_END (loop_end)); jump_insn = BB_END (loop_end); jump_label = block_label (desc->in_edge->dest); JUMP_LABEL (jump_insn) = jump_label; LABEL_NUSES (jump_label)++; /* Ensure the right fallthru edge is marked, for case we have reversed the condition. */ desc->in_edge->flags &= ~EDGE_FALLTHRU; desc->out_edge->flags |= EDGE_FALLTHRU; /* Add a REG_NONNEG note if the actual or estimated maximum number of iterations is non-negative. */ if (nonneg) { REG_NOTES (jump_insn) = gen_rtx_EXPR_LIST (REG_NONNEG, NULL_RTX, REG_NOTES (jump_insn)); } }
static void initialize_uninitialized_regs (void) { basic_block bb; bitmap already_genned = BITMAP_ALLOC (NULL); if (optimize == 1) { df_live_add_problem (); df_live_set_all_dirty (); } df_analyze (); FOR_EACH_BB_FN (bb, cfun) { rtx_insn *insn; bitmap lr = DF_LR_IN (bb); bitmap ur = DF_LIVE_IN (bb); bitmap_clear (already_genned); FOR_BB_INSNS (bb, insn) { df_ref use; if (!NONDEBUG_INSN_P (insn)) continue; FOR_EACH_INSN_USE (use, insn) { unsigned int regno = DF_REF_REGNO (use); /* Only do this for the pseudos. */ if (regno < FIRST_PSEUDO_REGISTER) continue; /* Do not generate multiple moves for the same regno. This is common for sequences of subreg operations. They would be deleted during combine but there is no reason to churn the system. */ if (bitmap_bit_p (already_genned, regno)) continue; /* A use is MUST uninitialized if it reaches the top of the block from the inside of the block (the lr test) and no def for it reaches the top of the block from outside of the block (the ur test). */ if (bitmap_bit_p (lr, regno) && (!bitmap_bit_p (ur, regno))) { rtx_insn *move_insn; rtx reg = DF_REF_REAL_REG (use); bitmap_set_bit (already_genned, regno); start_sequence (); emit_move_insn (reg, CONST0_RTX (GET_MODE (reg))); move_insn = get_insns (); end_sequence (); emit_insn_before (move_insn, insn); if (dump_file) fprintf (dump_file, "adding initialization in %s of reg %d at in block %d for insn %d.\n", current_function_name (), regno, bb->index, INSN_UID (insn)); } }
/* Find values inside INSN for that we want to measure histograms for division/modulo optimization and stores them to VALUES. */ static void insn_divmod_values_to_profile (rtx insn, histogram_values *values) { rtx set, set_src, op1, op2; enum machine_mode mode; histogram_value hist; if (!INSN_P (insn)) return; set = single_set (insn); if (!set) return; mode = GET_MODE (SET_DEST (set)); if (!INTEGRAL_MODE_P (mode)) return; set_src = SET_SRC (set); switch (GET_CODE (set_src)) { case DIV: case MOD: case UDIV: case UMOD: op1 = XEXP (set_src, 0); op2 = XEXP (set_src, 1); if (side_effects_p (op2)) return; /* Check for a special case where the divisor is power of 2. */ if ((GET_CODE (set_src) == UMOD) && !CONSTANT_P (op2)) { hist = ggc_alloc (sizeof (*hist)); hist->value = op2; hist->seq = NULL_RTX; hist->mode = mode; hist->insn = insn; hist->type = HIST_TYPE_POW2; hist->hdata.pow2.may_be_other = 1; VEC_safe_push (histogram_value, *values, hist); } /* Check whether the divisor is not in fact a constant. */ if (!CONSTANT_P (op2)) { hist = ggc_alloc (sizeof (*hist)); hist->value = op2; hist->mode = mode; hist->seq = NULL_RTX; hist->insn = insn; hist->type = HIST_TYPE_SINGLE_VALUE; VEC_safe_push (histogram_value, *values, hist); } /* For mod, check whether it is not often a noop (or replaceable by a few subtractions). */ if (GET_CODE (set_src) == UMOD && !side_effects_p (op1)) { rtx tmp; hist = ggc_alloc (sizeof (*hist)); start_sequence (); tmp = simplify_gen_binary (DIV, mode, copy_rtx (op1), copy_rtx (op2)); hist->value = force_operand (tmp, NULL_RTX); hist->seq = get_insns (); end_sequence (); hist->mode = mode; hist->insn = insn; hist->type = HIST_TYPE_INTERVAL; hist->hdata.intvl.int_start = 0; hist->hdata.intvl.steps = 2; hist->hdata.intvl.may_be_less = 1; hist->hdata.intvl.may_be_more = 1; VEC_safe_push (histogram_value, *values, hist); } return; default: return; } }