static void adjust_frame_related_expr (rtx last_sp_set, rtx insn, HOST_WIDE_INT this_adjust) { rtx note = find_reg_note (last_sp_set, REG_FRAME_RELATED_EXPR, NULL_RTX); rtx new_expr = NULL_RTX; if (note == NULL_RTX && RTX_FRAME_RELATED_P (insn)) return; if (note && GET_CODE (XEXP (note, 0)) == SEQUENCE && XVECLEN (XEXP (note, 0), 0) >= 2) { rtx expr = XEXP (note, 0); rtx last = XVECEXP (expr, 0, XVECLEN (expr, 0) - 1); int i; if (GET_CODE (last) == SET && RTX_FRAME_RELATED_P (last) == RTX_FRAME_RELATED_P (insn) && SET_DEST (last) == stack_pointer_rtx && GET_CODE (SET_SRC (last)) == PLUS && XEXP (SET_SRC (last), 0) == stack_pointer_rtx && CONST_INT_P (XEXP (SET_SRC (last), 1))) { XEXP (SET_SRC (last), 1) = GEN_INT (INTVAL (XEXP (SET_SRC (last), 1)) + this_adjust); return; } new_expr = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (XVECLEN (expr, 0) + 1)); for (i = 0; i < XVECLEN (expr, 0); i++) XVECEXP (new_expr, 0, i) = XVECEXP (expr, 0, i); } else { new_expr = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (2)); if (note) XVECEXP (new_expr, 0, 0) = XEXP (note, 0); else { rtx expr = copy_rtx (single_set_for_csa (last_sp_set)); XEXP (SET_SRC (expr), 1) = GEN_INT (INTVAL (XEXP (SET_SRC (expr), 1)) - this_adjust); RTX_FRAME_RELATED_P (expr) = 1; XVECEXP (new_expr, 0, 0) = expr; } } XVECEXP (new_expr, 0, XVECLEN (new_expr, 0) - 1) = copy_rtx (single_set_for_csa (insn)); RTX_FRAME_RELATED_P (XVECEXP (new_expr, 0, XVECLEN (new_expr, 0) - 1)) = RTX_FRAME_RELATED_P (insn); if (note) XEXP (note, 0) = new_expr; else add_reg_note (last_sp_set, REG_FRAME_RELATED_EXPR, new_expr); }
static bool prefer_and_bit_test (enum machine_mode mode, int bitnum) { if (and_test == 0) { /* Set up rtxes for the two variations. Use NULL as a placeholder for the BITNUM-based constants. */ and_reg = gen_rtx_REG (mode, FIRST_PSEUDO_REGISTER); and_test = gen_rtx_AND (mode, and_reg, NULL); shift_test = gen_rtx_AND (mode, gen_rtx_ASHIFTRT (mode, and_reg, NULL), const1_rtx); } else { /* Change the mode of the previously-created rtxes. */ PUT_MODE (and_reg, mode); PUT_MODE (and_test, mode); PUT_MODE (shift_test, mode); PUT_MODE (XEXP (shift_test, 0), mode); } /* Fill in the integers. */ XEXP (and_test, 1) = GEN_INT ((unsigned HOST_WIDE_INT) 1 << bitnum); XEXP (XEXP (shift_test, 0), 1) = GEN_INT (bitnum); return (rtx_cost (and_test, IF_THEN_ELSE) <= rtx_cost (shift_test, IF_THEN_ELSE)); }
void moxie_expand_epilogue (void) { int regno; rtx reg; if (cfun->machine->callee_saved_reg_size != 0) { reg = gen_rtx_REG (Pmode, MOXIE_R12); if (cfun->machine->callee_saved_reg_size <= 255) { emit_move_insn (reg, hard_frame_pointer_rtx); emit_insn (gen_subsi3 (reg, reg, GEN_INT (cfun->machine->callee_saved_reg_size))); } else { emit_move_insn (reg, GEN_INT (-cfun->machine->callee_saved_reg_size)); emit_insn (gen_addsi3 (reg, reg, hard_frame_pointer_rtx)); } for (regno = FIRST_PSEUDO_REGISTER; regno-- > 0; ) if (!fixed_regs[regno] && !call_used_regs[regno] && df_regs_ever_live_p (regno)) { rtx preg = gen_rtx_REG (Pmode, regno); emit_insn (gen_movsi_pop (reg, preg)); } } emit_jump_insn (gen_returner ()); }
void crx_expand_epilogue (void) { rtx return_reg; /* Nonzero if we need to return and pop only RA. This will generate a * different insn. This differentiate is for the peepholes for call as last * statement in function. */ int only_popret_RA = (save_regs[RETURN_ADDRESS_REGNUM] && (sum_regs == UNITS_PER_WORD)); /* Return register. */ return_reg = gen_rtx_REG (Pmode, RETURN_ADDRESS_REGNUM); if (frame_pointer_needed) /* Restore the stack pointer with the frame pointers value */ emit_move_insn (stack_pointer_rtx, frame_pointer_rtx); if (size_for_adjusting_sp > 0) emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (size_for_adjusting_sp))); if (crx_interrupt_function_p ()) emit_jump_insn (gen_interrupt_return ()); else if (last_reg_to_save == -1) /* Nothing to pop */ /* Don't output jump for interrupt routine, only retx. */ emit_jump_insn (gen_indirect_jump_return ()); else if (only_popret_RA) emit_jump_insn (gen_popret_RA_return ()); else emit_jump_insn (gen_pop_and_popret_return (GEN_INT (sum_regs))); }
static void stack_adjust (HOST_WIDE_INT amount) { rtx insn; if (!IN_RANGE (amount, -32776, 32768)) { /* r10 is caller saved so it can be used as a temp reg. */ rtx r10; r10 = gen_rtx_REG (word_mode, 10); insn = emit_move_insn (r10, GEN_INT (amount)); if (amount < 0) RTX_FRAME_RELATED_P (insn) = 1; insn = emit_add (stack_pointer_rtx, stack_pointer_rtx, r10); if (amount < 0) RTX_FRAME_RELATED_P (insn) = 1; } else { insn = emit_add (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (amount)); if (amount < 0) RTX_FRAME_RELATED_P (insn) = 1; } }
static rtx gen_speculative_prefetch (rtx address, gcov_type delta, int write) { rtx tmp; rtx sequence; /* TODO: we do the prefetching for just one iteration ahead, which often is not enough. */ start_sequence (); if (offsettable_address_p (0, VOIDmode, address)) tmp = plus_constant (copy_rtx (address), delta); else { tmp = simplify_gen_binary (PLUS, Pmode, copy_rtx (address), GEN_INT (delta)); tmp = force_operand (tmp, NULL); } if (! (*insn_data[(int)CODE_FOR_prefetch].operand[0].predicate) (tmp, insn_data[(int)CODE_FOR_prefetch].operand[0].mode)) tmp = force_reg (Pmode, tmp); emit_insn (gen_prefetch (tmp, GEN_INT (write), GEN_INT (3))); sequence = get_insns (); end_sequence (); return sequence; }
/* Support function to determine the return address of the function 'count' frames back up the stack. */ rtx lm32_return_addr_rtx (int count, rtx frame) { rtx r; if (count == 0) { /* *mjs* This test originally used leaf_function_p (), we now use the regs_ever_live test which I *think* is more accurate. */ if (!df_regs_ever_live_p(RA_REGNUM)) { r = gen_rtx_REG (Pmode, RA_REGNUM); } else { r = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, frame, GEN_INT(- 2 * UNITS_PER_WORD))); set_mem_alias_set (r, get_frame_alias_set ()); } } else if (flag_omit_frame_pointer) r = NULL_RTX; else { r = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, frame, GEN_INT(- 2 * UNITS_PER_WORD))); set_mem_alias_set (r, get_frame_alias_set ()); } return r; }
/* Support function to determine the return address of the function 'count' frames back up the stack. */ rtx lm32_return_addr_rtx (int count, rtx frame) { rtx r; if (count == 0) { if (!df_regs_ever_live_p (RA_REGNUM)) r = gen_rtx_REG (Pmode, RA_REGNUM); else { r = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, frame, GEN_INT (-2 * UNITS_PER_WORD))); set_mem_alias_set (r, get_frame_alias_set ()); } } else if (flag_omit_frame_pointer) r = NULL_RTX; else { r = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, frame, GEN_INT (-2 * UNITS_PER_WORD))); set_mem_alias_set (r, get_frame_alias_set ()); } return r; }
/* Generate code for transformations 3 and 4 (with MODE and OPERATION, operands OP1 and OP2, result TARGET, at most SUB subtractions, and probability of taking the optimal path(s) PROB1 and PROB2). */ static rtx gen_mod_subtract (enum machine_mode mode, enum rtx_code operation, rtx target, rtx op1, rtx op2, int sub, int prob1, int prob2) { rtx tmp, tmp1, jump; rtx end_label = gen_label_rtx (); rtx sequence; int i; start_sequence (); if (!REG_P (op2)) { tmp = gen_reg_rtx (mode); emit_move_insn (tmp, copy_rtx (op2)); } else tmp = op2; emit_move_insn (target, copy_rtx (op1)); do_compare_rtx_and_jump (target, tmp, LTU, 0, mode, NULL_RTX, NULL_RTX, end_label); /* Add branch probability to jump we just created. */ jump = get_last_insn (); REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_BR_PROB, GEN_INT (prob1), REG_NOTES (jump)); for (i = 0; i < sub; i++) { tmp1 = expand_simple_binop (mode, MINUS, target, tmp, target, 0, OPTAB_WIDEN); if (tmp1 != target) emit_move_insn (target, tmp1); do_compare_rtx_and_jump (target, tmp, LTU, 0, mode, NULL_RTX, NULL_RTX, end_label); /* Add branch probability to jump we just created. */ jump = get_last_insn (); REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_BR_PROB, GEN_INT (prob2), REG_NOTES (jump)); } tmp1 = simplify_gen_binary (operation, mode, copy_rtx (target), copy_rtx (tmp)); tmp1 = force_operand (tmp1, target); if (tmp1 != target) emit_move_insn (target, tmp1); emit_label (end_label); sequence = get_insns (); end_sequence (); rebuild_jump_labels (sequence); return sequence; }
/* Generate code for transformation 1 (with MODE and OPERATION, operands OP1 and OP2, whose value is expected to be VALUE, result TARGET and probability of taking the optimal path PROB). */ static rtx gen_divmod_fixed_value (enum machine_mode mode, enum rtx_code operation, rtx target, rtx op1, rtx op2, gcov_type value, int prob) { rtx tmp, tmp1, jump; rtx neq_label = gen_label_rtx (); rtx end_label = gen_label_rtx (); rtx sequence; start_sequence (); if (!REG_P (op2)) { tmp = gen_reg_rtx (mode); emit_move_insn (tmp, copy_rtx (op2)); } else tmp = op2; do_compare_rtx_and_jump (tmp, GEN_INT (value), NE, 0, mode, NULL_RTX, NULL_RTX, neq_label); /* Add branch probability to jump we just created. */ jump = get_last_insn (); REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_BR_PROB, GEN_INT (REG_BR_PROB_BASE - prob), REG_NOTES (jump)); tmp1 = simplify_gen_binary (operation, mode, copy_rtx (op1), GEN_INT (value)); tmp1 = force_operand (tmp1, target); if (tmp1 != target) emit_move_insn (copy_rtx (target), copy_rtx (tmp1)); emit_jump_insn (gen_jump (end_label)); emit_barrier (); emit_label (neq_label); tmp1 = simplify_gen_binary (operation, mode, copy_rtx (op1), copy_rtx (tmp)); tmp1 = force_operand (tmp1, target); if (tmp1 != target) emit_move_insn (copy_rtx (target), copy_rtx (tmp1)); emit_label (end_label); sequence = get_insns (); end_sequence (); rebuild_jump_labels (sequence); return sequence; }
void moxie_expand_prologue (void) { int regno; rtx insn; moxie_compute_frame (); if (flag_stack_usage_info) current_function_static_stack_size = cfun->machine->size_for_adjusting_sp; /* Save callee-saved registers. */ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) { if (!fixed_regs[regno] && df_regs_ever_live_p (regno) && !call_used_regs[regno]) { insn = emit_insn (gen_movsi_push (gen_rtx_REG (Pmode, regno))); RTX_FRAME_RELATED_P (insn) = 1; } } if (cfun->machine->size_for_adjusting_sp > 0) { int i = cfun->machine->size_for_adjusting_sp; while ((i >= 255) && (i <= 510)) { insn = emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (255))); RTX_FRAME_RELATED_P (insn) = 1; i -= 255; } if (i <= 255) { insn = emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (i))); RTX_FRAME_RELATED_P (insn) = 1; } else { rtx reg = gen_rtx_REG (SImode, MOXIE_R12); insn = emit_move_insn (reg, GEN_INT (i)); RTX_FRAME_RELATED_P (insn) = 1; insn = emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, reg)); RTX_FRAME_RELATED_P (insn) = 1; } } }
/* Called after register allocation to add any instructions needed for the epilogue. Using an epilogue insn is favored compared to putting all of the instructions in output_function_epilogue(), since it allows the scheduler to intermix instructions with the restores of the caller saved registers. In some cases, it might be necessary to emit a barrier instruction as the first insn to prevent such scheduling. */ void fr30_expand_epilogue (void) { int regno; /* Perform the inversion operations of the prologue. */ gcc_assert (current_frame_info.initialised); /* Pop local variables and arguments off the stack. If frame_pointer_needed is TRUE then the frame pointer register has actually been used as a frame pointer, and we can recover the stack pointer from it, otherwise we must unwind the stack manually. */ if (current_frame_info.frame_size > 0) { if (current_frame_info.save_fp && frame_pointer_needed) { emit_insn (gen_leave_func ()); current_frame_info.save_fp = 0; } else if (current_frame_info.frame_size <= 508) emit_insn (gen_add_to_stack (GEN_INT (current_frame_info.frame_size))); else { rtx tmp = gen_rtx_REG (Pmode, PROLOGUE_TMP_REGNUM); emit_insn (gen_movsi (tmp, GEN_INT (current_frame_info.frame_size))); emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp)); } } if (current_frame_info.save_fp) emit_insn (gen_movsi_pop (frame_pointer_rtx)); /* Pop all the registers that were pushed. */ if (current_frame_info.save_rp) emit_insn (gen_movsi_pop (gen_rtx_REG (Pmode, RETURN_POINTER_REGNUM))); for (regno = 0; regno < STACK_POINTER_REGNUM; regno ++) if (current_frame_info.gmask & (1 << regno)) emit_insn (gen_movsi_pop (gen_rtx_REG (Pmode, regno))); if (current_frame_info.pretend_size) emit_insn (gen_add_to_stack (GEN_INT (current_frame_info.pretend_size))); /* Reset state info for each function. */ current_frame_info = zero_frame_info; emit_jump_insn (gen_return_from_func ()); }
void dw2_asm_output_data (int size, unsigned HOST_WIDE_INT value, const char *comment, ...) { va_list ap; const char *op = integer_asm_op (size, FALSE); va_start (ap, comment); if (size * 8 < HOST_BITS_PER_WIDE_INT) value &= ~(~(unsigned HOST_WIDE_INT) 0 << (size * 8)); if (op) { fputs (op, asm_out_file); fprint_whex (asm_out_file, value); } else assemble_integer (GEN_INT (value), size, BITS_PER_UNIT, 1); if (flag_debug_asm && comment) { fputs ("\t" ASM_COMMENT_START " ", asm_out_file); vfprintf (asm_out_file, comment, ap); } putc ('\n', asm_out_file); va_end (ap); }
static void moxie_setup_incoming_varargs (cumulative_args_t cum_v, machine_mode mode ATTRIBUTE_UNUSED, tree type ATTRIBUTE_UNUSED, int *pretend_size, int no_rtl) { CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v); int regno; int regs = 8 - *cum; *pretend_size = regs < 0 ? 0 : GET_MODE_SIZE (SImode) * regs; if (no_rtl) return; for (regno = *cum; regno < 8; regno++) { rtx reg = gen_rtx_REG (SImode, regno); rtx slot = gen_rtx_PLUS (Pmode, gen_rtx_REG (SImode, ARG_POINTER_REGNUM), GEN_INT (UNITS_PER_WORD * (3 + (regno-2)))); emit_move_insn (gen_rtx_MEM (SImode, slot), reg); } }
rtx compare_from_rtx (rtx op0, rtx op1, enum rtx_code code, int unsignedp, enum machine_mode mode, rtx size) { enum rtx_code ucode; rtx tem; /* If one operand is constant, make it the second one. Only do this if the other operand is not constant as well. */ if (swap_commutative_operands_p (op0, op1)) { tem = op0; op0 = op1; op1 = tem; code = swap_condition (code); } if (flag_force_mem) { op0 = force_not_mem (op0); op1 = force_not_mem (op1); } do_pending_stack_adjust (); ucode = unsignedp ? unsigned_condition (code) : code; if ((tem = simplify_relational_operation (ucode, mode, op0, op1)) != 0) return tem; #if 0 /* There's no need to do this now that combine.c can eliminate lots of sign extensions. This can be less efficient in certain cases on other machines. */ /* If this is a signed equality comparison, we can do it as an unsigned comparison since zero-extension is cheaper than sign extension and comparisons with zero are done as unsigned. This is the case even on machines that can do fast sign extension, since zero-extension is easier to combine with other operations than sign-extension is. If we are comparing against a constant, we must convert it to what it would look like unsigned. */ if ((code == EQ || code == NE) && ! unsignedp && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT) { if (GET_CODE (op1) == CONST_INT && (INTVAL (op1) & GET_MODE_MASK (GET_MODE (op0))) != INTVAL (op1)) op1 = GEN_INT (INTVAL (op1) & GET_MODE_MASK (GET_MODE (op0))); unsignedp = 1; } #endif emit_cmp_insn (op0, op1, code, size, mode, unsignedp); #if HAVE_cc0 return gen_rtx_fmt_ee (code, VOIDmode, cc0_rtx, const0_rtx); #else return gen_rtx_fmt_ee (code, VOIDmode, op0, op1); #endif }
static int try_apply_stack_adjustment (rtx insn, struct csa_memlist *memlist, HOST_WIDE_INT new_adjust, HOST_WIDE_INT delta) { struct csa_memlist *ml; rtx set; set = single_set_for_csa (insn); validate_change (insn, &XEXP (SET_SRC (set), 1), GEN_INT (new_adjust), 1); for (ml = memlist; ml ; ml = ml->next) validate_change (ml->insn, ml->mem, replace_equiv_address_nv (*ml->mem, plus_constant (stack_pointer_rtx, ml->sp_offset - delta)), 1); if (apply_change_group ()) { /* Succeeded. Update our knowledge of the memory references. */ for (ml = memlist; ml ; ml = ml->next) ml->sp_offset -= delta; return 1; } else return 0; }
static bool prefer_and_bit_test (machine_mode mode, int bitnum) { bool speed_p; wide_int mask = wi::set_bit_in_zero (bitnum, GET_MODE_PRECISION (mode)); if (and_test == 0) { /* Set up rtxes for the two variations. Use NULL as a placeholder for the BITNUM-based constants. */ and_reg = gen_rtx_REG (mode, LAST_VIRTUAL_REGISTER + 1); and_test = gen_rtx_AND (mode, and_reg, NULL); shift_test = gen_rtx_AND (mode, gen_rtx_ASHIFTRT (mode, and_reg, NULL), const1_rtx); } else { /* Change the mode of the previously-created rtxes. */ PUT_MODE (and_reg, mode); PUT_MODE (and_test, mode); PUT_MODE (shift_test, mode); PUT_MODE (XEXP (shift_test, 0), mode); } /* Fill in the integers. */ XEXP (and_test, 1) = immed_wide_int_const (mask, mode); XEXP (XEXP (shift_test, 0), 1) = GEN_INT (bitnum); speed_p = optimize_insn_for_speed_p (); return (rtx_cost (and_test, mode, IF_THEN_ELSE, 0, speed_p) <= rtx_cost (shift_test, mode, IF_THEN_ELSE, 0, speed_p)); }
static bool prefer_and_bit_test (enum machine_mode mode, int bitnum) { bool speed_p; if (and_test == 0) { /* Set up rtxes for the two variations. Use NULL as a placeholder for the BITNUM-based constants. */ and_reg = gen_rtx_REG (mode, FIRST_PSEUDO_REGISTER); and_test = gen_rtx_AND (mode, and_reg, NULL); shift_test = gen_rtx_AND (mode, gen_rtx_ASHIFTRT (mode, and_reg, NULL), const1_rtx); } else { /* Change the mode of the previously-created rtxes. */ PUT_MODE (and_reg, mode); PUT_MODE (and_test, mode); PUT_MODE (shift_test, mode); PUT_MODE (XEXP (shift_test, 0), mode); } /* Fill in the integers. */ XEXP (and_test, 1) = immed_double_int_const (double_int_zero.set_bit (bitnum), mode); XEXP (XEXP (shift_test, 0), 1) = GEN_INT (bitnum); speed_p = optimize_insn_for_speed_p (); return (rtx_cost (and_test, IF_THEN_ELSE, 0, speed_p) <= rtx_cost (shift_test, IF_THEN_ELSE, 0, speed_p)); }
void do_pending_stack_adjust (void) { if (inhibit_defer_pop == 0) { if (pending_stack_adjust != 0) adjust_stack (GEN_INT (pending_stack_adjust)); pending_stack_adjust = 0; } }
void moxie_expand_prologue (void) { int regno; rtx insn; moxie_compute_frame (); /* Save callee-saved registers. */ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) { if (!fixed_regs[regno] && df_regs_ever_live_p (regno) && !call_used_regs[regno]) { insn = emit_insn (gen_movsi_push (gen_rtx_REG (Pmode, regno))); RTX_FRAME_RELATED_P (insn) = 1; } } if (cfun->machine->size_for_adjusting_sp > 0) { if (cfun->machine->size_for_adjusting_sp <= 255) { insn = emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (cfun->machine->size_for_adjusting_sp))); RTX_FRAME_RELATED_P (insn) = 1; } else { insn = emit_insn (gen_movsi (gen_rtx_REG (Pmode, MOXIE_R5), GEN_INT (-cfun->machine->size_for_adjusting_sp))); RTX_FRAME_RELATED_P (insn) = 1; insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, gen_rtx_REG (Pmode, MOXIE_R5))); RTX_FRAME_RELATED_P (insn) = 1; } } }
static unsigned int arm_pertask_ssp_rtl_execute(void) { rtx_insn *insn; for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) { const char *sym; rtx body; rtx mask, masked_sp; /* * Find a SET insn involving a SYMBOL_REF to __stack_chk_guard */ if (!INSN_P(insn)) continue; body = PATTERN(insn); if (GET_CODE(body) != SET || GET_CODE(SET_SRC(body)) != SYMBOL_REF) continue; sym = XSTR(SET_SRC(body), 0); if (strcmp(sym, "__stack_chk_guard")) continue; /* * Replace the source of the SET insn with an expression that * produces the address of the copy of the stack canary value * stored in struct thread_info */ mask = GEN_INT(sext_hwi(sp_mask, GET_MODE_PRECISION(Pmode))); masked_sp = gen_reg_rtx(Pmode); emit_insn_before(gen_rtx_set(masked_sp, gen_rtx_AND(Pmode, stack_pointer_rtx, mask)), insn); SET_SRC(body) = gen_rtx_PLUS(Pmode, masked_sp, GEN_INT(canary_offset)); } return 0; }
/* Generate and emit RTL to save or restore callee save registers. */ static void expand_save_restore (struct lm32_frame_info *info, int op) { unsigned int reg_save_mask = info->reg_save_mask; int regno; HOST_WIDE_INT offset; rtx insn; /* Callee saves are below locals and above outgoing arguments. */ offset = info->args_size + info->callee_size; for (regno = 0; regno <= 31; regno++) { if ((reg_save_mask & (1 << regno)) != 0) { rtx offset_rtx; rtx mem; offset_rtx = GEN_INT (offset); if (satisfies_constraint_K (offset_rtx)) { mem = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, stack_pointer_rtx, offset_rtx)); } else { /* r10 is caller saved so it can be used as a temp reg. */ rtx r10; r10 = gen_rtx_REG (word_mode, 10); insn = emit_move_insn (r10, offset_rtx); if (op == 0) RTX_FRAME_RELATED_P (insn) = 1; insn = emit_add (r10, r10, stack_pointer_rtx); if (op == 0) RTX_FRAME_RELATED_P (insn) = 1; mem = gen_rtx_MEM (word_mode, r10); } if (op == 0) insn = emit_move_insn (mem, gen_rtx_REG (word_mode, regno)); else insn = emit_move_insn (gen_rtx_REG (word_mode, regno), mem); /* only prologue instructions which set the sp fp or save a register should be marked as frame related. */ if (op == 0) RTX_FRAME_RELATED_P (insn) = 1; offset -= UNITS_PER_WORD; } } }
/* Generate code for transformation 2 (with MODE and OPERATION, operands OP1 and OP2, result TARGET and probability of taking the optimal path PROB). */ static rtx gen_mod_pow2 (enum machine_mode mode, enum rtx_code operation, rtx target, rtx op1, rtx op2, int prob) { rtx tmp, tmp1, tmp2, tmp3, jump; rtx neq_label = gen_label_rtx (); rtx end_label = gen_label_rtx (); rtx sequence; start_sequence (); if (!REG_P (op2)) { tmp = gen_reg_rtx (mode); emit_move_insn (tmp, copy_rtx (op2)); } else tmp = op2; tmp1 = expand_simple_binop (mode, PLUS, tmp, constm1_rtx, NULL_RTX, 0, OPTAB_WIDEN); tmp2 = expand_simple_binop (mode, AND, tmp, tmp1, NULL_RTX, 0, OPTAB_WIDEN); do_compare_rtx_and_jump (tmp2, const0_rtx, NE, 0, mode, NULL_RTX, NULL_RTX, neq_label); /* Add branch probability to jump we just created. */ jump = get_last_insn (); REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_BR_PROB, GEN_INT (REG_BR_PROB_BASE - prob), REG_NOTES (jump)); tmp3 = expand_simple_binop (mode, AND, op1, tmp1, target, 0, OPTAB_WIDEN); if (tmp3 != target) emit_move_insn (copy_rtx (target), tmp3); emit_jump_insn (gen_jump (end_label)); emit_barrier (); emit_label (neq_label); tmp1 = simplify_gen_binary (operation, mode, copy_rtx (op1), copy_rtx (tmp)); tmp1 = force_operand (tmp1, target); if (tmp1 != target) emit_move_insn (target, tmp1); emit_label (end_label); sequence = get_insns (); end_sequence (); rebuild_jump_labels (sequence); return sequence; }
/* Generate and emit RTL to save or restore callee save registers */ static void expand_save_restore (struct lm32_frame_info *info, int op) { unsigned int reg_save_mask = info->reg_save_mask; int regno; HOST_WIDE_INT offset; rtx insn; /* Callee saves are below locals and above outgoing arguments */ offset = info->args_size + info->callee_size; for (regno = 0; regno <= 31; regno++) { if ((reg_save_mask & (1 << regno)) != 0) { if (op == 0) { insn = emit_move_insn (gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (offset))), gen_rtx_REG (word_mode, regno)); } else { insn = emit_move_insn (gen_rtx_REG (word_mode, regno), gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (offset)))); } /* only prologue instructions which set the sp fp or save a register should be marked as frame related */ if (op==0) RTX_FRAME_RELATED_P (insn) = 1; offset -= UNITS_PER_WORD; } } }
static void moxie_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value) { rtx mem, fnaddr = XEXP (DECL_RTL (fndecl), 0); emit_block_move (m_tramp, assemble_trampoline_template (), GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL); mem = adjust_address (m_tramp, SImode, 4); emit_move_insn (mem, chain_value); mem = adjust_address (m_tramp, SImode, 16); emit_move_insn (mem, fnaddr); }
void crx_expand_prologue (void) { crx_compute_frame (); crx_compute_save_regs (); /* If there is no need in push and adjustment to sp, return. */ if (size_for_adjusting_sp + sum_regs == 0) return; if (last_reg_to_save != -1) /* If there are registers to push. */ emit_insn (gen_push_for_prologue (GEN_INT (sum_regs))); if (size_for_adjusting_sp > 0) emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-size_for_adjusting_sp))); if (frame_pointer_needed) /* Initialize the frame pointer with the value of the stack pointer * pointing now to the locals. */ emit_move_insn (frame_pointer_rtx, stack_pointer_rtx); }
/* Verify that there is exactly single jump instruction since last and attach REG_BR_PROB note specifying probability. ??? We really ought to pass the probability down to RTL expanders and let it re-distribute it when the conditional expands into multiple conditionals. This is however difficult to do. */ static void add_reg_br_prob_note (FILE *dump_file, rtx last, int probability) { if (profile_status == PROFILE_ABSENT) return; for (last = NEXT_INSN (last); last && NEXT_INSN (last); last = NEXT_INSN (last)) if (GET_CODE (last) == JUMP_INSN) { /* It is common to emit condjump-around-jump sequence when we don't know how to reverse the conditional. Special case this. */ if (!any_condjump_p (last) || GET_CODE (NEXT_INSN (last)) != JUMP_INSN || !simplejump_p (NEXT_INSN (last)) || GET_CODE (NEXT_INSN (NEXT_INSN (last))) != BARRIER || GET_CODE (NEXT_INSN (NEXT_INSN (NEXT_INSN (last)))) != CODE_LABEL || NEXT_INSN (NEXT_INSN (NEXT_INSN (NEXT_INSN (last))))) goto failed; if (find_reg_note (last, REG_BR_PROB, 0)) abort (); REG_NOTES (last) = gen_rtx_EXPR_LIST (REG_BR_PROB, GEN_INT (REG_BR_PROB_BASE - probability), REG_NOTES (last)); return; } if (!last || GET_CODE (last) != JUMP_INSN || !any_condjump_p (last)) goto failed; if (find_reg_note (last, REG_BR_PROB, 0)) abort (); REG_NOTES (last) = gen_rtx_EXPR_LIST (REG_BR_PROB, GEN_INT (probability), REG_NOTES (last)); return; failed: if (dump_file) fprintf (dump_file, "Failed to add probability note\n"); }
rtx compare_and_jump_seq (rtx op0, rtx op1, enum rtx_code comp, rtx label, int prob, rtx cinsn) { rtx seq, jump, cond; enum machine_mode mode; mode = GET_MODE (op0); if (mode == VOIDmode) mode = GET_MODE (op1); start_sequence (); if (GET_MODE_CLASS (mode) == MODE_CC) { /* A hack -- there seems to be no easy generic way how to make a conditional jump from a ccmode comparison. */ gcc_assert (cinsn); cond = XEXP (SET_SRC (pc_set (cinsn)), 0); gcc_assert (GET_CODE (cond) == comp); gcc_assert (rtx_equal_p (op0, XEXP (cond, 0))); gcc_assert (rtx_equal_p (op1, XEXP (cond, 1))); emit_jump_insn (copy_insn (PATTERN (cinsn))); jump = get_last_insn (); gcc_assert (JUMP_P (jump)); JUMP_LABEL (jump) = JUMP_LABEL (cinsn); LABEL_NUSES (JUMP_LABEL (jump))++; redirect_jump (jump, label, 0); } else { gcc_assert (!cinsn); op0 = force_operand (op0, NULL_RTX); op1 = force_operand (op1, NULL_RTX); do_compare_rtx_and_jump (op0, op1, comp, 0, mode, NULL_RTX, NULL_RTX, label, -1); jump = get_last_insn (); gcc_assert (JUMP_P (jump)); JUMP_LABEL (jump) = label; LABEL_NUSES (label)++; } add_reg_note (jump, REG_BR_PROB, GEN_INT (prob)); seq = get_insns (); end_sequence (); return seq; }
rtx lm32_function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode, tree type, int named) { if (mode == VOIDmode) /* Compute operand 2 of the call insn. */ return GEN_INT (0); if (targetm.calls.must_pass_in_stack (mode, type)) return NULL_RTX; if (!named || (cum + LM32_NUM_REGS2(mode, type) > LM32_NUM_ARG_REGS)) return NULL_RTX; return gen_rtx_REG (mode, cum + LM32_FIRST_ARG_REG); }
static rtx emit_setmem_doubleword_loop (rtx itr, rtx size, rtx value) { rtx word_mode_label = gen_label_rtx (); rtx word_mode_end_label = gen_label_rtx (); rtx byte_mode_size = gen_reg_rtx (SImode); rtx byte_mode_size_tmp = gen_reg_rtx (SImode); rtx word_mode_end = gen_reg_rtx (SImode); rtx size_for_word = gen_reg_rtx (SImode); /* and $size_for_word, $size, #~0x7 */ size_for_word = expand_binop (SImode, and_optab, size, gen_int_mode (~0x7, SImode), NULL_RTX, 0, OPTAB_WIDEN); emit_move_insn (byte_mode_size, size); /* beqz $size_for_word, .Lbyte_mode_entry */ emit_cmp_and_jump_insns (size_for_word, const0_rtx, EQ, NULL, SImode, 1, word_mode_end_label); /* add $word_mode_end, $dst, $size_for_word */ word_mode_end = expand_binop (Pmode, add_optab, itr, size_for_word, NULL_RTX, 0, OPTAB_WIDEN); /* andi $byte_mode_size, $size, 0x7 */ byte_mode_size_tmp = expand_binop (SImode, and_optab, size, GEN_INT (0x7), NULL_RTX, 0, OPTAB_WIDEN); emit_move_insn (byte_mode_size, byte_mode_size_tmp); /* .Lword_mode: */ emit_label (word_mode_label); /* ! word-mode set loop smw.bim $value4word, [$dst_itr], $value4word, 0 bne $word_mode_end, $dst_itr, .Lword_mode */ emit_insn (gen_unaligned_store_update_base_dw (itr, itr, value)); emit_cmp_and_jump_insns (word_mode_end, itr, NE, NULL, Pmode, 1, word_mode_label); emit_label (word_mode_end_label); return byte_mode_size; }