static int op_ldi(struct code_rom *rom, char *operands) { char *tok[2] = {NULL, NULL}; uint8_t reg; uint8_t imm; int n; n = easy_explode(operands, ',', tok, 2); if ( n != 2 ) { fprintf(stderr, "%s:%u: ldi: wrong number of arguments\n", rom->fn, rom->line); return 0; } if ( !reg_from_name(rom, tok[0], ®) ) return 0; if ( !imm_from_str(rom, tok[1], &imm) ) return 0; if ( !emit_insn(rom, (1 << 4) | (1 << reg)) ) return 0; if ( !emit_insn(rom, imm) ) return 0; return 1; }
void moxie_expand_epilogue (void) { int regno; rtx reg; if (cfun->machine->callee_saved_reg_size != 0) { reg = gen_rtx_REG (Pmode, MOXIE_R12); if (cfun->machine->callee_saved_reg_size <= 255) { emit_move_insn (reg, hard_frame_pointer_rtx); emit_insn (gen_subsi3 (reg, reg, GEN_INT (cfun->machine->callee_saved_reg_size))); } else { emit_move_insn (reg, GEN_INT (-cfun->machine->callee_saved_reg_size)); emit_insn (gen_addsi3 (reg, reg, hard_frame_pointer_rtx)); } for (regno = FIRST_PSEUDO_REGISTER; regno-- > 0; ) if (!fixed_regs[regno] && !call_used_regs[regno] && df_regs_ever_live_p (regno)) { rtx preg = gen_rtx_REG (Pmode, regno); emit_insn (gen_movsi_pop (reg, preg)); } } emit_jump_insn (gen_returner ()); }
static void nds32_emit_mem_move_block (int base_regno, int count, rtx *dst_base_reg, rtx *dst_mem, rtx *src_base_reg, rtx *src_mem, bool update_base_reg_p) { rtx new_base_reg; emit_insn (nds32_expand_load_multiple (base_regno, count, *src_base_reg, *src_mem, update_base_reg_p, &new_base_reg)); if (update_base_reg_p) { *src_base_reg = new_base_reg; *src_mem = gen_rtx_MEM (SImode, *src_base_reg); } emit_insn (nds32_expand_store_multiple (base_regno, count, *dst_base_reg, *dst_mem, update_base_reg_p, &new_base_reg)); if (update_base_reg_p) { *dst_base_reg = new_base_reg; *dst_mem = gen_rtx_MEM (SImode, *dst_base_reg); } }
static bool nds32_expand_setmem_loop_v3m (rtx dstmem, rtx size, rtx value) { rtx base_reg = copy_to_mode_reg (Pmode, XEXP (dstmem, 0)); rtx need_align_bytes = gen_reg_rtx (SImode); rtx last_2_bit = gen_reg_rtx (SImode); rtx byte_loop_base = gen_reg_rtx (SImode); rtx byte_loop_size = gen_reg_rtx (SImode); rtx remain_size = gen_reg_rtx (SImode); rtx new_base_reg; rtx value4byte, value4doubleword; rtx byte_mode_size; rtx last_byte_loop_label = gen_label_rtx (); size = force_reg (SImode, size); value4doubleword = nds32_gen_dup_8_byte_to_double_word_value (value); value4byte = simplify_gen_subreg (QImode, value4doubleword, DImode, subreg_lowpart_offset (QImode, DImode)); emit_move_insn (byte_loop_size, size); emit_move_insn (byte_loop_base, base_reg); /* Jump to last byte loop if size is less than 16. */ emit_cmp_and_jump_insns (size, gen_int_mode (16, SImode), LE, NULL, SImode, 1, last_byte_loop_label); /* Make sure align to 4 byte first since v3m can't unalign access. */ emit_insn (gen_andsi3 (last_2_bit, base_reg, gen_int_mode (0x3, SImode))); emit_insn (gen_subsi3 (need_align_bytes, gen_int_mode (4, SImode), last_2_bit)); /* Align to 4 byte. */ new_base_reg = emit_setmem_byte_loop (base_reg, need_align_bytes, value4byte, true); /* Calculate remain size. */ emit_insn (gen_subsi3 (remain_size, size, need_align_bytes)); /* Set memory word by word. */ byte_mode_size = emit_setmem_doubleword_loop (new_base_reg, remain_size, value4doubleword); emit_move_insn (byte_loop_base, new_base_reg); emit_move_insn (byte_loop_size, byte_mode_size); emit_label (last_byte_loop_label); /* And set memory for remain bytes. */ emit_setmem_byte_loop (byte_loop_base, byte_loop_size, value4byte, false); return true; }
bool nds32_expand_strlen (rtx result, rtx str, rtx target_char, rtx align ATTRIBUTE_UNUSED) { rtx base_reg, backup_base_reg; rtx ffb_result; rtx target_char_ptr, length; rtx loop_label, tmp; if (optimize_size || optimize < 3) return false; gcc_assert (MEM_P (str)); gcc_assert (CONST_INT_P (target_char) || REG_P (target_char)); base_reg = copy_to_mode_reg (SImode, XEXP (str, 0)); loop_label = gen_label_rtx (); ffb_result = gen_reg_rtx (Pmode); tmp = gen_reg_rtx (SImode); backup_base_reg = gen_reg_rtx (SImode); /* Emit loop version of strlen. move $backup_base, $base .Lloop: lmw.bim $tmp, [$base], $tmp, 0 ffb $ffb_result, $tmp, $target_char ! is there $target_char? beqz $ffb_result, .Lloop add $last_char_ptr, $base, $ffb_result sub $length, $last_char_ptr, $backup_base */ /* move $backup_base, $base */ emit_move_insn (backup_base_reg, base_reg); /* .Lloop: */ emit_label (loop_label); /* lmw.bim $tmp, [$base], $tmp, 0 */ emit_insn (gen_unaligned_load_update_base_w (base_reg, tmp, base_reg)); /* ffb $ffb_result, $tmp, $target_char ! is there $target_char? */ emit_insn (gen_unspec_ffb (ffb_result, tmp, target_char)); /* beqz $ffb_result, .Lloop */ emit_cmp_and_jump_insns (ffb_result, const0_rtx, EQ, NULL, SImode, 1, loop_label); /* add $target_char_ptr, $base, $ffb_result */ target_char_ptr = expand_binop (Pmode, add_optab, base_reg, ffb_result, NULL_RTX, 0, OPTAB_WIDEN); /* sub $length, $target_char_ptr, $backup_base */ length = expand_binop (Pmode, sub_optab, target_char_ptr, backup_base_reg, NULL_RTX, 0, OPTAB_WIDEN); emit_move_insn (result, length); return true; }
static rtx nds32_gen_dup_4_byte_to_word_value_aux (rtx value, rtx value4word) { gcc_assert (GET_MODE (value) == QImode || CONST_INT_P (value)); if (CONST_INT_P (value)) { unsigned HOST_WIDE_INT val = UINTVAL (value) & GET_MODE_MASK(QImode); rtx new_val = gen_int_mode (val | (val << 8) | (val << 16) | (val << 24), SImode); /* Just calculate at here if it's constant value. */ emit_move_insn (value4word, new_val); } else { if (NDS32_EXT_DSP_P ()) { /* ! prepare word insb $tmp, $value, 1 ! $tmp <- 0x0000abab pkbb16 $tmp6, $tmp2, $tmp2 ! $value4word <- 0xabababab */ rtx tmp = gen_reg_rtx (SImode); convert_move (tmp, value, true); emit_insn ( gen_insvsi_internal (tmp, gen_int_mode (0x8, SImode), tmp)); emit_insn (gen_pkbbsi_1 (value4word, tmp, tmp)); } else { /* ! prepare word andi $tmp1, $value, 0xff ! $tmp1 <- 0x000000ab slli $tmp2, $tmp1, 8 ! $tmp2 <- 0x0000ab00 or $tmp3, $tmp1, $tmp2 ! $tmp3 <- 0x0000abab slli $tmp4, $tmp3, 16 ! $tmp4 <- 0xabab0000 or $val4word, $tmp3, $tmp4 ! $value4word <- 0xabababab */ rtx tmp1, tmp2, tmp3, tmp4; tmp1 = expand_binop (SImode, and_optab, value, gen_int_mode (0xff, SImode), NULL_RTX, 0, OPTAB_WIDEN); tmp2 = expand_binop (SImode, ashl_optab, tmp1, gen_int_mode (8, SImode), NULL_RTX, 0, OPTAB_WIDEN); tmp3 = expand_binop (SImode, ior_optab, tmp1, tmp2, NULL_RTX, 0, OPTAB_WIDEN); tmp4 = expand_binop (SImode, ashl_optab, tmp3, gen_int_mode (16, SImode), NULL_RTX, 0, OPTAB_WIDEN); emit_insn (gen_iorsi3 (value4word, tmp3, tmp4)); } } return value4word; }
rtx nds32_expand_builtin_impl (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED, machine_mode mode ATTRIBUTE_UNUSED, int ignore ATTRIBUTE_UNUSED) { tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0); int fcode = DECL_FUNCTION_CODE (fndecl); switch (fcode) { /* Cache. */ case NDS32_BUILTIN_ISYNC: return nds32_expand_builtin_null_ftype_reg (CODE_FOR_unspec_volatile_isync, exp, target); case NDS32_BUILTIN_ISB: /* Since there are no result and operands for isb instruciton, we can simply emit this rtx. */ emit_insn (gen_unspec_volatile_isb ()); return target; /* Register Transfer. */ case NDS32_BUILTIN_MFSR: return nds32_expand_builtin_reg_ftype_imm (CODE_FOR_unspec_volatile_mfsr, exp, target); case NDS32_BUILTIN_MFUSR: return nds32_expand_builtin_reg_ftype_imm (CODE_FOR_unspec_volatile_mfusr, exp, target); case NDS32_BUILTIN_MTSR: return nds32_expand_builtin_null_ftype_reg_imm (CODE_FOR_unspec_volatile_mtsr, exp, target); case NDS32_BUILTIN_MTUSR: return nds32_expand_builtin_null_ftype_reg_imm (CODE_FOR_unspec_volatile_mtusr, exp, target); /* Interrupt. */ case NDS32_BUILTIN_SETGIE_EN: /* Since there are no result and operands for setgie.e instruciton, we can simply emit this rtx. */ emit_insn (gen_unspec_volatile_setgie_en ()); return target; case NDS32_BUILTIN_SETGIE_DIS: /* Since there are no result and operands for setgie.d instruciton, we can simply emit this rtx. */ emit_insn (gen_unspec_volatile_setgie_dis ()); return target; default: gcc_unreachable (); } return NULL_RTX; }
void moxie_expand_prologue (void) { int regno; rtx insn; moxie_compute_frame (); if (flag_stack_usage_info) current_function_static_stack_size = cfun->machine->size_for_adjusting_sp; /* Save callee-saved registers. */ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) { if (!fixed_regs[regno] && df_regs_ever_live_p (regno) && !call_used_regs[regno]) { insn = emit_insn (gen_movsi_push (gen_rtx_REG (Pmode, regno))); RTX_FRAME_RELATED_P (insn) = 1; } } if (cfun->machine->size_for_adjusting_sp > 0) { int i = cfun->machine->size_for_adjusting_sp; while ((i >= 255) && (i <= 510)) { insn = emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (255))); RTX_FRAME_RELATED_P (insn) = 1; i -= 255; } if (i <= 255) { insn = emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (i))); RTX_FRAME_RELATED_P (insn) = 1; } else { rtx reg = gen_rtx_REG (SImode, MOXIE_R12); insn = emit_move_insn (reg, GEN_INT (i)); RTX_FRAME_RELATED_P (insn) = 1; insn = emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, reg)); RTX_FRAME_RELATED_P (insn) = 1; } } }
static rtx emit_add (rtx dest, rtx src0, rtx src1) { rtx insn; insn = emit_insn (gen_addsi3 (dest, src0, src1)); return insn; }
/* Create an emit instructions for a functions epilogue. */ void lm32_expand_epilogue (void) { rtx ra_rtx = gen_rtx_REG (Pmode, RA_REGNUM); lm32_compute_frame_size (get_frame_size ()); if (current_frame_info.total_size > 0) { /* Prevent stack code from being reordered. */ emit_insn (gen_blockage ()); /* Restore callee save registers. */ if (current_frame_info.reg_save_mask != 0) expand_save_restore (¤t_frame_info, 1); /* Deallocate stack. */ stack_adjust (current_frame_info.total_size); /* Return to calling function. */ emit_jump_insn (gen_return_internal (ra_rtx)); } else { /* Return to calling function. */ emit_jump_insn (gen_return_internal (ra_rtx)); } }
void c54x_expand_epilogue() { int r; emit_insn(gen_frame(gen_rtx_REG (QImode, STACK_POINTER_REGNUM), gen_rtx_CONST_INT(VOIDmode, -get_frame_size()))); for(r = FIRST_PSEUDO_REGISTER - 1; r > 0; r--) { if(c54x_save_register_p(r)) { emit_insn(gen_popqi(gen_rtx_REG(QImode, r))); } } emit_insn(gen_return()); }
rtx aarch64_crc32_expand_builtin (int fcode, tree exp, rtx target) { rtx pat; aarch64_crc_builtin_datum *d = &aarch64_crc_builtin_data[fcode - (AARCH64_CRC32_BUILTIN_BASE + 1)]; enum insn_code icode = d->icode; tree arg0 = CALL_EXPR_ARG (exp, 0); tree arg1 = CALL_EXPR_ARG (exp, 1); rtx op0 = expand_normal (arg0); rtx op1 = expand_normal (arg1); machine_mode tmode = insn_data[icode].operand[0].mode; machine_mode mode0 = insn_data[icode].operand[1].mode; machine_mode mode1 = insn_data[icode].operand[2].mode; if (! target || GET_MODE (target) != tmode || ! (*insn_data[icode].operand[0].predicate) (target, tmode)) target = gen_reg_rtx (tmode); gcc_assert ((GET_MODE (op0) == mode0 || GET_MODE (op0) == VOIDmode) && (GET_MODE (op1) == mode1 || GET_MODE (op1) == VOIDmode)); if (! (*insn_data[icode].operand[1].predicate) (op0, mode0)) op0 = copy_to_mode_reg (mode0, op0); if (! (*insn_data[icode].operand[2].predicate) (op1, mode1)) op1 = copy_to_mode_reg (mode1, op1); pat = GEN_FCN (icode) (target, op0, op1); if (!pat) return NULL_RTX; emit_insn (pat); return target; }
void crx_expand_epilogue (void) { rtx return_reg; /* Nonzero if we need to return and pop only RA. This will generate a * different insn. This differentiate is for the peepholes for call as last * statement in function. */ int only_popret_RA = (save_regs[RETURN_ADDRESS_REGNUM] && (sum_regs == UNITS_PER_WORD)); /* Return register. */ return_reg = gen_rtx_REG (Pmode, RETURN_ADDRESS_REGNUM); if (frame_pointer_needed) /* Restore the stack pointer with the frame pointers value */ emit_move_insn (stack_pointer_rtx, frame_pointer_rtx); if (size_for_adjusting_sp > 0) emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (size_for_adjusting_sp))); if (crx_interrupt_function_p ()) emit_jump_insn (gen_interrupt_return ()); else if (last_reg_to_save == -1) /* Nothing to pop */ /* Don't output jump for interrupt routine, only retx. */ emit_jump_insn (gen_indirect_jump_return ()); else if (only_popret_RA) emit_jump_insn (gen_popret_RA_return ()); else emit_jump_insn (gen_pop_and_popret_return (GEN_INT (sum_regs))); }
static rtx gen_speculative_prefetch (rtx address, gcov_type delta, int write) { rtx tmp; rtx sequence; /* TODO: we do the prefetching for just one iteration ahead, which often is not enough. */ start_sequence (); if (offsettable_address_p (0, VOIDmode, address)) tmp = plus_constant (copy_rtx (address), delta); else { tmp = simplify_gen_binary (PLUS, Pmode, copy_rtx (address), GEN_INT (delta)); tmp = force_operand (tmp, NULL); } if (! (*insn_data[(int)CODE_FOR_prefetch].operand[0].predicate) (tmp, insn_data[(int)CODE_FOR_prefetch].operand[0].mode)) tmp = force_reg (Pmode, tmp); emit_insn (gen_prefetch (tmp, GEN_INT (write), GEN_INT (3))); sequence = get_insns (); end_sequence (); return sequence; }
/* Function to move block memory content by using load_multiple and store_multiple. This is auxiliary extern function to help create rtx template. Check nds32-multiple.md file for the patterns. */ int nds32_expand_movmemqi (rtx dstmem, rtx srcmem, rtx total_bytes, rtx alignment) { HOST_WIDE_INT in_words, out_words; rtx dst_base_reg, src_base_reg; int maximum_bytes; /* Because reduced-set regsiters has few registers (r0~r5, r6~10, r15, r28~r31, where 'r15' and 'r28~r31' cannot be used for register allocation), using 8 registers (32 bytes) for moving memory block may easily consume all of them. It makes register allocation/spilling hard to work. So we only allow maximum=4 registers (16 bytes) for moving memory block under reduced-set registers. */ if (TARGET_REDUCED_REGS) maximum_bytes = 16; else maximum_bytes = 32; /* 1. Total_bytes is integer for sure. 2. Alignment is integer for sure. 3. Maximum 4 or 8 registers, 4 * 4 = 16 bytes, 8 * 4 = 32 bytes. 4. Requires (n * 4) block size. 5. Requires 4-byte alignment. */ if (GET_CODE (total_bytes) != CONST_INT || GET_CODE (alignment) != CONST_INT || INTVAL (total_bytes) > maximum_bytes || INTVAL (total_bytes) & 3 || INTVAL (alignment) & 3) return 0; dst_base_reg = copy_to_mode_reg (SImode, XEXP (dstmem, 0)); src_base_reg = copy_to_mode_reg (SImode, XEXP (srcmem, 0)); out_words = in_words = INTVAL (total_bytes) / UNITS_PER_WORD; emit_insn (nds32_expand_load_multiple (0, in_words, src_base_reg, srcmem)); emit_insn (nds32_expand_store_multiple (0, out_words, dst_base_reg, dstmem)); /* Successfully create patterns, return 1. */ return 1; }
void c54x_expand_prologue() { int r; for(r = 0; r < FIRST_PSEUDO_REGISTER; r++) { if(c54x_save_register_p(r)) { emit_insn(gen_pushqi(gen_rtx_REG(QImode, r))); } } if(frame_pointer_needed) { emit_move_insn(gen_rtx_REG (QImode, FRAME_POINTER_REGNUM), gen_rtx_REG (QImode, STACK_POINTER_REGNUM)); } emit_insn(gen_frame(gen_rtx_REG (QImode, STACK_POINTER_REGNUM), gen_rtx_CONST_INT(VOIDmode, get_frame_size()))); }
void emit_load_locked (int mode, void *reg, void *mem) { void * (*fn) (void *, void *) = ((void *)0); if (mode == 9) fn = gen_load_locked_si; else if (mode == 10) fn = gen_load_locked_di; emit_insn (fn (reg, mem)); }
/* Main entry to expand conditional compare statement G. Return NULL_RTX if G is not a legal candidate or expand fail. Otherwise return the target. */ rtx expand_ccmp_expr (gimple *g) { rtx_insn *last; rtx tmp; rtx prep_seq, gen_seq; prep_seq = gen_seq = NULL_RTX; if (!ccmp_candidate_p (g)) return NULL_RTX; last = get_last_insn (); tmp = expand_ccmp_expr_1 (g, &prep_seq, &gen_seq); if (tmp) { enum insn_code icode; enum machine_mode cc_mode = CCmode; tree lhs = gimple_assign_lhs (g); #ifdef SELECT_CC_MODE cc_mode = SELECT_CC_MODE (NE, tmp, const0_rtx); #endif icode = optab_handler (cstore_optab, cc_mode); if (icode != CODE_FOR_nothing) { enum machine_mode mode = TYPE_MODE (TREE_TYPE (lhs)); rtx target = gen_reg_rtx (mode); emit_insn (prep_seq); emit_insn (gen_seq); tmp = emit_cstore (target, icode, NE, cc_mode, cc_mode, 0, tmp, const0_rtx, 1, mode); if (tmp) return tmp; } } /* Clean up. */ delete_insns_since (last); return NULL_RTX; }
void moxie_expand_prologue (void) { int regno; rtx insn; moxie_compute_frame (); /* Save callee-saved registers. */ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) { if (!fixed_regs[regno] && df_regs_ever_live_p (regno) && !call_used_regs[regno]) { insn = emit_insn (gen_movsi_push (gen_rtx_REG (Pmode, regno))); RTX_FRAME_RELATED_P (insn) = 1; } } if (cfun->machine->size_for_adjusting_sp > 0) { if (cfun->machine->size_for_adjusting_sp <= 255) { insn = emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (cfun->machine->size_for_adjusting_sp))); RTX_FRAME_RELATED_P (insn) = 1; } else { insn = emit_insn (gen_movsi (gen_rtx_REG (Pmode, MOXIE_R5), GEN_INT (-cfun->machine->size_for_adjusting_sp))); RTX_FRAME_RELATED_P (insn) = 1; insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, gen_rtx_REG (Pmode, MOXIE_R5))); RTX_FRAME_RELATED_P (insn) = 1; } } }
int c54x_expand_movqi(rtx ops[]) { int done = 0; int i; fprintf(stderr, "--->>>"); for(i=0; i < 2; i++) { print_rtl(stderr, ops[i]); } fprintf(stderr, "<<<---\n"); if(ACC_REG_P(ops[0])) { ops[0] = copy_rtx(ops[0]); PUT_MODE(ops[0], PSImode); fprintf(stderr, "+++"); print_rtl(stderr, ops[0]); fprintf(stderr, "+++\n"); done = 1; if(MEM_P(ops[1])) { emit_insn(gen_ldm(ops[0], ops[1])); } else if(REG_P(ops[1])) { emit_insn(gen_ldu(ops[0], ops[1])); } else if(CONSTANT_P(ops[1])) { emit_insn(gen_ld_const(ops[0], ops[1], gen_reg_rtx(QImode))); } else { done = 2; } } else if( (REG_P(ops[0]) && (GET_CODE(ops[1]) == MEM && REG_P(XEXP(ops[1],0)))) || (T_REG_P(ops[0]) && ARSP_REG_P(ops[1])) ) { done = 2; } return done; }
/* Called after register allocation to add any instructions needed for the epilogue. Using an epilogue insn is favored compared to putting all of the instructions in output_function_epilogue(), since it allows the scheduler to intermix instructions with the restores of the caller saved registers. In some cases, it might be necessary to emit a barrier instruction as the first insn to prevent such scheduling. */ void fr30_expand_epilogue (void) { int regno; /* Perform the inversion operations of the prologue. */ gcc_assert (current_frame_info.initialised); /* Pop local variables and arguments off the stack. If frame_pointer_needed is TRUE then the frame pointer register has actually been used as a frame pointer, and we can recover the stack pointer from it, otherwise we must unwind the stack manually. */ if (current_frame_info.frame_size > 0) { if (current_frame_info.save_fp && frame_pointer_needed) { emit_insn (gen_leave_func ()); current_frame_info.save_fp = 0; } else if (current_frame_info.frame_size <= 508) emit_insn (gen_add_to_stack (GEN_INT (current_frame_info.frame_size))); else { rtx tmp = gen_rtx_REG (Pmode, PROLOGUE_TMP_REGNUM); emit_insn (gen_movsi (tmp, GEN_INT (current_frame_info.frame_size))); emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp)); } } if (current_frame_info.save_fp) emit_insn (gen_movsi_pop (frame_pointer_rtx)); /* Pop all the registers that were pushed. */ if (current_frame_info.save_rp) emit_insn (gen_movsi_pop (gen_rtx_REG (Pmode, RETURN_POINTER_REGNUM))); for (regno = 0; regno < STACK_POINTER_REGNUM; regno ++) if (current_frame_info.gmask & (1 << regno)) emit_insn (gen_movsi_pop (gen_rtx_REG (Pmode, regno))); if (current_frame_info.pretend_size) emit_insn (gen_add_to_stack (GEN_INT (current_frame_info.pretend_size))); /* Reset state info for each function. */ current_frame_info = zero_frame_info; emit_jump_insn (gen_return_from_func ()); }
void crx_expand_prologue (void) { crx_compute_frame (); crx_compute_save_regs (); /* If there is no need in push and adjustment to sp, return. */ if (size_for_adjusting_sp + sum_regs == 0) return; if (last_reg_to_save != -1) /* If there are registers to push. */ emit_insn (gen_push_for_prologue (GEN_INT (sum_regs))); if (size_for_adjusting_sp > 0) emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-size_for_adjusting_sp))); if (frame_pointer_needed) /* Initialize the frame pointer with the value of the stack pointer * pointing now to the locals. */ emit_move_insn (frame_pointer_rtx, stack_pointer_rtx); }
static void emitAlignedCode(int cd) { int64_t ad; ad = code_address & 15; while (ad != 0 && ad != 5 && ad != 10) { emitByte(0x00); ad = code_address & 15; } ad = code_address & 0xfff; if ((ad > 0xFF0 && cd == 0xFD) || (ad > 0xFEA && cd == 0x61)) { emit_insn(0xEAEAEAEAEA); emit_insn(0xEAEAEAEAEA); if (cd==0x61) emit_insn(0xEAEAEAEAEA); ProcessEOL(0); ad = code_address & 0xfff; while (ad != 0 && ad != 5 && ad != 10) { emitByte(0x00); ad = code_address & 15; } } emitByte(cd); }
void emit_assign (astree tree) { astree left = NULL; assert (tree != NULL); left = tree->first; assert (left != NULL); assert (left->next != NULL); assert (left->next->next == NULL); emit (left->next); if (left->symbol != IDENT) { eprintf ("%:%s: %d: left operand of `=' is not an identifier\n", scanner_filename (left->filenr), left->linenr); }else{ emit_insn ("popvar", left->lexinfo, left); } }
/* Expand an expression EXP that calls a built-in function, with result going to TARGET if that's convenient. */ rtx aarch64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED, machine_mode mode ATTRIBUTE_UNUSED, int ignore ATTRIBUTE_UNUSED) { tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0); int fcode = DECL_FUNCTION_CODE (fndecl); int icode; rtx pat, op0; tree arg0; switch (fcode) { case AARCH64_BUILTIN_GET_FPCR: case AARCH64_BUILTIN_SET_FPCR: case AARCH64_BUILTIN_GET_FPSR: case AARCH64_BUILTIN_SET_FPSR: if ((fcode == AARCH64_BUILTIN_GET_FPCR) || (fcode == AARCH64_BUILTIN_GET_FPSR)) { icode = (fcode == AARCH64_BUILTIN_GET_FPSR) ? CODE_FOR_get_fpsr : CODE_FOR_get_fpcr; target = gen_reg_rtx (SImode); pat = GEN_FCN (icode) (target); } else { target = NULL_RTX; icode = (fcode == AARCH64_BUILTIN_SET_FPSR) ? CODE_FOR_set_fpsr : CODE_FOR_set_fpcr; arg0 = CALL_EXPR_ARG (exp, 0); op0 = expand_normal (arg0); pat = GEN_FCN (icode) (op0); } emit_insn (pat); return target; } if (fcode >= AARCH64_SIMD_BUILTIN_BASE && fcode <= AARCH64_SIMD_BUILTIN_MAX) return aarch64_simd_expand_builtin (fcode, exp, target); else if (fcode >= AARCH64_CRC32_BUILTIN_BASE && fcode <= AARCH64_CRC32_BUILTIN_MAX) return aarch64_crc32_expand_builtin (fcode, exp, target); gcc_unreachable (); }
void c54x_expand_addqi(rtx ops[]) { int i; fprintf(stderr, "---<<<"); for(i=0; i < 3; i++) { print_rtl(stderr, ops[i]); } fprintf(stderr, ">>>---\n"); if(SP_REG_P(ops[0]) && SP_REG_P(ops[1]) && (GET_CODE(ops[2]) == CONST_INT)) { emit_insn(gen_frame(ops[0], ops[2])); } }
static rtx emit_setmem_doubleword_loop (rtx itr, rtx size, rtx value) { rtx word_mode_label = gen_label_rtx (); rtx word_mode_end_label = gen_label_rtx (); rtx byte_mode_size = gen_reg_rtx (SImode); rtx byte_mode_size_tmp = gen_reg_rtx (SImode); rtx word_mode_end = gen_reg_rtx (SImode); rtx size_for_word = gen_reg_rtx (SImode); /* and $size_for_word, $size, #~0x7 */ size_for_word = expand_binop (SImode, and_optab, size, gen_int_mode (~0x7, SImode), NULL_RTX, 0, OPTAB_WIDEN); emit_move_insn (byte_mode_size, size); /* beqz $size_for_word, .Lbyte_mode_entry */ emit_cmp_and_jump_insns (size_for_word, const0_rtx, EQ, NULL, SImode, 1, word_mode_end_label); /* add $word_mode_end, $dst, $size_for_word */ word_mode_end = expand_binop (Pmode, add_optab, itr, size_for_word, NULL_RTX, 0, OPTAB_WIDEN); /* andi $byte_mode_size, $size, 0x7 */ byte_mode_size_tmp = expand_binop (SImode, and_optab, size, GEN_INT (0x7), NULL_RTX, 0, OPTAB_WIDEN); emit_move_insn (byte_mode_size, byte_mode_size_tmp); /* .Lword_mode: */ emit_label (word_mode_label); /* ! word-mode set loop smw.bim $value4word, [$dst_itr], $value4word, 0 bne $word_mode_end, $dst_itr, .Lword_mode */ emit_insn (gen_unaligned_store_update_base_dw (itr, itr, value)); emit_cmp_and_jump_insns (word_mode_end, itr, NE, NULL, Pmode, 1, word_mode_label); emit_label (word_mode_end_label); return byte_mode_size; }
static int cbranch_op(struct code_rom *rom, uint8_t invert, uint8_t zero, const char *label) { struct label *l; uint16_t insn; invert = !!invert; zero = !!zero; l = label_lookup(rom, label); if ( NULL == l ) { fprintf(stderr, "%s:%u: label not found: %s\n", rom->fn, rom->line, label); return 0; } insn = (1 << 8) | (invert << 7) | (zero << 6) | l->addr; return emit_insn(rom, insn); }
static int alu_op(struct code_rom *rom, uint8_t op, char *operands) { char *tok[3] = {NULL, NULL, NULL}; uint8_t reg, x, y; int n; n = easy_explode(operands, ',', tok, 3); if ( n != 3 ) { fprintf(stderr, "%s:%u: %s: wrong number of arguments\n", rom->fn, rom->line, cpu_alu_name(op)); return 0; } if ( !xreg_from_name(rom, tok[0], &x) ) return 0; if ( !yreg_from_name(rom, tok[1], &y) ) return 0; if ( !reg_from_name(rom, tok[2], ®) ) return 0; return emit_insn(rom, (1 << 7) | (x << 6) | (y << 5) | reg); }
/* Create and emit instructions for a functions prologue. */ void lm32_expand_prologue (void) { rtx insn; lm32_compute_frame_size (get_frame_size ()); if (current_frame_info.total_size > 0) { /* Add space on stack new frame. */ stack_adjust (-current_frame_info.total_size); /* Save callee save registers. */ if (current_frame_info.reg_save_mask != 0) expand_save_restore (¤t_frame_info, 0); /* Setup frame pointer if it's needed. */ if (frame_pointer_needed == 1) { /* Move sp to fp. */ insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx); RTX_FRAME_RELATED_P (insn) = 1; /* Add offset - Don't use total_size, as that includes pretend_size, which isn't part of this frame? */ insn = emit_add (frame_pointer_rtx, frame_pointer_rtx, GEN_INT (current_frame_info.args_size + current_frame_info.callee_size + current_frame_info.locals_size)); RTX_FRAME_RELATED_P (insn) = 1; } /* Prevent prologue from being scheduled into function body. */ emit_insn (gen_blockage ()); } }