static void dec_cmpe(DisasContext *dc) { if (dc->format == OP_FMT_RI) { LOG_DIS("cmpei r%d, r%d, %d\n", dc->r1, dc->r0, sign_extend(dc->imm16, 16)); } else { LOG_DIS("cmpe r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1); } gen_compare(dc, TCG_COND_EQ); }
static void dec_cmpgu(DisasContext *dc) { if (dc->format == OP_FMT_RI) { LOG_DIS("cmpgui r%d, r%d, %d\n", dc->r1, dc->r0, zero_extend(dc->imm16, 16)); } else { LOG_DIS("cmpgu r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1); } gen_compare(dc, TCG_COND_GTU); }
static unsigned int dec10_setclrf(DisasContext *dc) { uint32_t flags; unsigned int set = ~dc->opcode & 1; flags = EXTRACT_FIELD(dc->ir, 0, 3) | (EXTRACT_FIELD(dc->ir, 12, 15) << 4); LOG_DIS("%s set=%d flags=%x\n", __func__, set, flags); if (flags & X_FLAG) { dc->flagx_known = 1; if (set) dc->flags_x = X_FLAG; else dc->flags_x = 0; } cris_evaluate_flags (dc); cris_update_cc_op(dc, CC_OP_FLAGS, 4); cris_update_cc_x(dc); tcg_gen_movi_tl(cc_op, dc->cc_op); if (set) { tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], flags); } else { tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~(flags|F_FLAG_V10|P_FLAG_V10)); } dc->flags_uptodate = 1; dc->clear_x = 0; cris_lock_irq(dc); return 2; }
static void dec_bg(DisasContext *dc) { LOG_DIS("bg r%d, r%d, %d\n", dc->r0, dc->r1, sign_extend(dc->imm16, 16 * 4)); gen_cond_branch(dc, TCG_COND_GT); }
static void dec_xor(DisasContext *dc) { if (dc->format == OP_FMT_RI) { LOG_DIS("xori r%d, r%d, %d\n", dc->r0, dc->r1, zero_extend(dc->imm16, 16)); } else { LOG_DIS("xor r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1); } if (dc->format == OP_FMT_RI) { tcg_gen_xori_tl(cpu_R[dc->r1], cpu_R[dc->r0], zero_extend(dc->imm16, 16)); } else { tcg_gen_xor_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]); } }
static void dec_bne(DisasContext *dc) { LOG_DIS("bne r%d, r%d, %d\n", dc->r0, dc->r1, sign_extend(dc->imm16, 16) * 4); gen_cond_branch(dc, TCG_COND_NE); }
static inline void decode(DisasContext *dc, uint32_t ir) { dc->ir = ir; LOG_DIS("%8.8x\t", dc->ir); dc->opcode = EXTRACT_FIELD(ir, 26, 31); dc->imm5 = EXTRACT_FIELD(ir, 0, 4); dc->imm16 = EXTRACT_FIELD(ir, 0, 15); dc->imm26 = EXTRACT_FIELD(ir, 0, 25); dc->csr = EXTRACT_FIELD(ir, 21, 25); dc->r0 = EXTRACT_FIELD(ir, 21, 25); dc->r1 = EXTRACT_FIELD(ir, 16, 20); dc->r2 = EXTRACT_FIELD(ir, 11, 15); /* bit 31 seems to indicate insn type. */ if (ir & (1 << 31)) { dc->format = OP_FMT_RR; } else { dc->format = OP_FMT_RI; } assert(ARRAY_SIZE(decinfo) == 64); assert(dc->opcode < 64); decinfo[dc->opcode](dc); }
static void dec_user(DisasContext *dc) { LOG_DIS("user"); qemu_log_mask(LOG_GUEST_ERROR, "user instruction undefined\n"); t_gen_illegal_insn(dc); }
static int dec10_bdap_m(DisasContext *dc, int size) { int insn_len = 2; int rd = dc->dst; LOG_DIS("bdap_m pc=%x opcode=%d r%d r%d sz=%d\n", dc->pc, dc->opcode, dc->src, dc->dst, size); assert(dc->dst != 15); #if 0 /* 8bit embedded offset? */ if (!dc->postinc && (dc->ir & (1 << 11))) { int simm = dc->ir & 0xff; /* cpu_abort(dc->env, "Unhandled opcode"); */ /* sign extended. */ simm = (int8_t)simm; tcg_gen_addi_tl(cpu_PR[PR_PREFIX], cpu_R[dc->dst], simm); cris_set_prefix(dc); return insn_len; } #endif /* Now the rest of the modes are truly indirect. */ insn_len += dec10_prep_move_m(dc, 1, size, cpu_PR[PR_PREFIX]); tcg_gen_add_tl(cpu_PR[PR_PREFIX], cpu_PR[PR_PREFIX], cpu_R[rd]); cris_set_prefix(dc); return insn_len; }
static int dec10_alux_m(DisasContext *dc, int op) { unsigned int size = (dc->size & 1) ? 2 : 1; unsigned int sx = !!(dc->size & 2); int insn_len = 2; int rd = dc->dst; TCGv t; LOG_DIS("addx size=%d sx=%d op=%d %d\n", size, sx, dc->src, dc->dst); t = tcg_temp_new(); cris_cc_mask(dc, CC_MASK_NZVC); insn_len += dec10_prep_move_m(dc, sx, size, t); cris_alu(dc, op, cpu_R[dc->dst], cpu_R[rd], t, 4); if (dc->dst == 15) { tcg_gen_mov_tl(env_btarget, cpu_R[dc->dst]); cris_prepare_jmp(dc, JMP_INDIRECT); dc->delayed_branch = 1; return insn_len; } tcg_temp_free(t); return insn_len; }
static void dec10_movem_m_r(DisasContext *dc) { int i, pfix = dc->tb_flags & PFIX_FLAG; TCGv addr, t0; LOG_DIS("%s [r%d], r%d pi=%d ir=%x\n", __func__, dc->src, dc->dst, dc->postinc, dc->ir); addr = tcg_temp_new(); t0 = tcg_temp_new(); crisv10_prepare_memaddr(dc, addr, 4); tcg_gen_mov_tl(t0, addr); for (i = dc->dst; i >= 0; i--) { gen_load(dc, cpu_R[i], addr, 4, 0); tcg_gen_addi_tl(addr, addr, 4); } if (pfix && dc->mode == CRISV10_MODE_AUTOINC) { tcg_gen_mov_tl(cpu_R[dc->src], t0); } if (!pfix && dc->mode == CRISV10_MODE_AUTOINC) { tcg_gen_mov_tl(cpu_R[dc->src], addr); } tcg_temp_free(addr); tcg_temp_free(t0); }
static void dec_bgeu(DisasContext *dc) { LOG_DIS("bgeu r%d, r%d, %d\n", dc->r1, dc->r0, sign_extend(dc->imm16, 16) * 4); gen_cond_branch(dc, TCG_COND_GEU); }
static void dec_bi(DisasContext *dc) { LOG_DIS("bi %d\n", sign_extend(dc->imm26 << 2, 26)); gen_goto_tb(dc, 0, dc->pc + (sign_extend(dc->imm26 << 2, 26))); dc->is_jmp = DISAS_TB_JUMP; }
static int dec10_prep_move_m(DisasContext *dc, int s_ext, int memsize, TCGv dst) { unsigned int rs; uint32_t imm; int is_imm; int insn_len = 0; rs = dc->src; is_imm = rs == 15 && !(dc->tb_flags & PFIX_FLAG); LOG_DIS("rs=%d rd=%d is_imm=%d mode=%d pfix=%d\n", rs, dc->dst, is_imm, dc->mode, dc->tb_flags & PFIX_FLAG); /* Load [$rs] onto T1. */ if (is_imm) { if (memsize != 4) { if (s_ext) { if (memsize == 1) imm = ldsb_code(dc->pc + 2); else imm = ldsw_code(dc->pc + 2); } else { if (memsize == 1) imm = ldub_code(dc->pc + 2); else imm = lduw_code(dc->pc + 2); } } else imm = ldl_code(dc->pc + 2); tcg_gen_movi_tl(dst, imm); if (dc->mode == CRISV10_MODE_AUTOINC) { insn_len += memsize; if (memsize == 1) insn_len++; tcg_gen_addi_tl(cpu_R[15], cpu_R[15], insn_len); } } else { TCGv addr; addr = tcg_temp_new(); cris_flush_cc_state(dc); crisv10_prepare_memaddr(dc, addr, memsize); gen_load(dc, dst, addr, memsize, 0); if (s_ext) t_gen_sext(dst, dst, memsize); else t_gen_zext(dst, dst, memsize); insn_len += crisv10_post_memaddr(dc, memsize); tcg_temp_free(addr); } if (dc->mode == CRISV10_MODE_INDIRECT && (dc->tb_flags & PFIX_FLAG)) { dc->dst = dc->src; } return insn_len; }
static void dec_call(DisasContext *dc) { LOG_DIS("call r%d\n", dc->r0); tcg_gen_movi_tl(cpu_R[R_RA], dc->pc + 4); tcg_gen_mov_tl(cpu_pc, cpu_R[dc->r0]); dc->is_jmp = DISAS_JUMP; }
static void dec_calli(DisasContext *dc) { LOG_DIS("calli %d\n", sign_extend(dc->imm26, 26) * 4); tcg_gen_movi_tl(cpu_R[R_RA], dc->pc + 4); gen_goto_tb(dc, 0, dc->pc + (sign_extend(dc->imm26 << 2, 26))); dc->is_jmp = DISAS_TB_JUMP; }
static void dec_nor(DisasContext *dc) { if (dc->format == OP_FMT_RI) { LOG_DIS("nori r%d, r%d, %d\n", dc->r0, dc->r1, zero_extend(dc->imm16, 16)); } else { LOG_DIS("nor r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1); } if (dc->format == OP_FMT_RI) { TCGv t0 = tcg_temp_new(); tcg_gen_movi_tl(t0, zero_extend(dc->imm16, 16)); tcg_gen_nor_tl(cpu_R[dc->r1], cpu_R[dc->r0], t0); tcg_temp_free(t0); } else { tcg_gen_nor_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]); } }
static void dec_scall(DisasContext *dc) { switch (dc->imm5) { case 2: LOG_DIS("break\n"); tcg_gen_movi_tl(cpu_pc, dc->pc); t_gen_raise_exception(dc, EXCP_BREAKPOINT); break; case 7: LOG_DIS("scall\n"); tcg_gen_movi_tl(cpu_pc, dc->pc); t_gen_raise_exception(dc, EXCP_SYSTEMCALL); break; default: qemu_log_mask(LOG_GUEST_ERROR, "invalid opcode @0x%x", dc->pc); t_gen_illegal_insn(dc); break; } }
static void dec_sw(DisasContext *dc) { TCGv t0; LOG_DIS("sw (r%d+%d), r%d\n", dc->r0, sign_extend(dc->imm16, 16), dc->r1); t0 = tcg_temp_new(); tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16)); tcg_gen_qemu_st32(cpu_R[dc->r1], t0, MEM_INDEX); tcg_temp_free(t0); }
static void dec_lw(DisasContext *dc) { TCGv t0; LOG_DIS("lw r%d, (r%d+%d)\n", dc->r1, dc->r0, sign_extend(dc->imm16, 16)); t0 = tcg_temp_new(); tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16)); tcg_gen_qemu_ld32s(cpu_R[dc->r1], t0, MEM_INDEX); tcg_temp_free(t0); }
static void dec_and(DisasContext *dc) { if (dc->format == OP_FMT_RI) { LOG_DIS("andi r%d, r%d, %d\n", dc->r1, dc->r0, zero_extend(dc->imm16, 16)); } else { LOG_DIS("and r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1); } if (dc->format == OP_FMT_RI) { tcg_gen_andi_tl(cpu_R[dc->r1], cpu_R[dc->r0], zero_extend(dc->imm16, 16)); } else { if (dc->r0 == 0 && dc->r1 == 0 && dc->r2 == 0) { tcg_gen_movi_tl(cpu_pc, dc->pc + 4); gen_helper_hlt(cpu_env); } else { tcg_gen_and_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]); } } }
static void dec_rcsr(DisasContext *dc) { LOG_DIS("rcsr r%d, %d\n", dc->r2, dc->csr); switch (dc->csr) { case CSR_IE: tcg_gen_mov_tl(cpu_R[dc->r2], cpu_ie); break; case CSR_IM: gen_helper_rcsr_im(cpu_R[dc->r2], cpu_env); break; case CSR_IP: gen_helper_rcsr_ip(cpu_R[dc->r2], cpu_env); break; case CSR_CC: tcg_gen_mov_tl(cpu_R[dc->r2], cpu_cc); break; case CSR_CFG: tcg_gen_mov_tl(cpu_R[dc->r2], cpu_cfg); break; case CSR_EBA: tcg_gen_mov_tl(cpu_R[dc->r2], cpu_eba); break; case CSR_DC: tcg_gen_mov_tl(cpu_R[dc->r2], cpu_dc); break; case CSR_DEBA: tcg_gen_mov_tl(cpu_R[dc->r2], cpu_deba); break; case CSR_JTX: gen_helper_rcsr_jtx(cpu_R[dc->r2], cpu_env); break; case CSR_JRX: gen_helper_rcsr_jrx(cpu_R[dc->r2], cpu_env); break; case CSR_ICC: case CSR_DCC: case CSR_BP0: case CSR_BP1: case CSR_BP2: case CSR_BP3: case CSR_WP0: case CSR_WP1: case CSR_WP2: case CSR_WP3: qemu_log_mask(LOG_GUEST_ERROR, "invalid read access csr=%x\n", dc->csr); break; default: qemu_log_mask(LOG_GUEST_ERROR, "read_csr: unknown csr=%x\n", dc->csr); break; } }
static void dec_sexth(DisasContext *dc) { LOG_DIS("sexth r%d, r%d\n", dc->r2, dc->r0); if (!(dc->features & LM32_FEATURE_SIGN_EXTEND)) { qemu_log_mask(LOG_GUEST_ERROR, "hardware sign extender is not available\n"); t_gen_illegal_insn(dc); return; } tcg_gen_ext16s_tl(cpu_R[dc->r2], cpu_R[dc->r0]); }
static unsigned int dec10_ind_move_r_m(DisasContext *dc, unsigned int size) { unsigned int insn_len = 2; TCGv addr; LOG_DIS("move.%d $r%d, [$r%d]\n", dc->size, dc->src, dc->dst); addr = tcg_temp_new(); crisv10_prepare_memaddr(dc, addr, size); gen_store_v10(dc, addr, cpu_R[dc->dst], size); insn_len += crisv10_post_memaddr(dc, size); return insn_len; }
static void dec_sl(DisasContext *dc) { if (dc->format == OP_FMT_RI) { LOG_DIS("sli r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm5); } else { LOG_DIS("sl r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1); } if (!(dc->features & LM32_FEATURE_SHIFT)) { qemu_log_mask(LOG_GUEST_ERROR, "hardware shifter is not available\n"); t_gen_illegal_insn(dc); return; } if (dc->format == OP_FMT_RI) { tcg_gen_shli_tl(cpu_R[dc->r1], cpu_R[dc->r0], dc->imm5); } else { TCGv t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, cpu_R[dc->r1], 0x1f); tcg_gen_shl_tl(cpu_R[dc->r2], cpu_R[dc->r0], t0); tcg_temp_free(t0); } }
static void dec_mul(DisasContext *dc) { if (dc->format == OP_FMT_RI) { LOG_DIS("muli r%d, r%d, %d\n", dc->r0, dc->r1, sign_extend(dc->imm16, 16)); } else { LOG_DIS("mul r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1); } if (!(dc->features & LM32_FEATURE_MULTIPLY)) { qemu_log_mask(LOG_GUEST_ERROR, "hardware multiplier is not available\n"); t_gen_illegal_insn(dc); return; } if (dc->format == OP_FMT_RI) { tcg_gen_muli_tl(cpu_R[dc->r1], cpu_R[dc->r0], sign_extend(dc->imm16, 16)); } else { tcg_gen_mul_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]); } }
static void dec_sr(DisasContext *dc) { if (dc->format == OP_FMT_RI) { LOG_DIS("sri r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm5); } else { LOG_DIS("sr r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1); } /* The real CPU (w/o hardware shifter) only supports right shift by exactly * one bit */ if (dc->format == OP_FMT_RI) { if (!(dc->features & LM32_FEATURE_SHIFT) && (dc->imm5 != 1)) { qemu_log_mask(LOG_GUEST_ERROR, "hardware shifter is not available\n"); t_gen_illegal_insn(dc); return; } tcg_gen_sari_tl(cpu_R[dc->r1], cpu_R[dc->r0], dc->imm5); } else { TCGLabel *l1 = gen_new_label(); TCGLabel *l2 = gen_new_label(); TCGv t0 = tcg_temp_local_new(); tcg_gen_andi_tl(t0, cpu_R[dc->r1], 0x1f); if (!(dc->features & LM32_FEATURE_SHIFT)) { tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 1, l1); t_gen_illegal_insn(dc); tcg_gen_br(l2); } gen_set_label(l1); tcg_gen_sar_tl(cpu_R[dc->r2], cpu_R[dc->r0], t0); gen_set_label(l2); tcg_temp_free(t0); } }
static void dec10_reg_abs(DisasContext *dc) { TCGv t0; LOG_DIS("abs $r%u, $r%u\n", dc->src, dc->dst); assert(dc->dst != 15); t0 = tcg_temp_new(); tcg_gen_sari_tl(t0, cpu_R[dc->src], 31); tcg_gen_xor_tl(cpu_R[dc->dst], cpu_R[dc->src], t0); tcg_gen_sub_tl(t0, cpu_R[dc->dst], t0); cris_alu(dc, CC_OP_MOVE, cpu_R[dc->dst], cpu_R[dc->dst], t0, 4); tcg_temp_free(t0); }
static void dec_add(DisasContext *dc) { if (dc->format == OP_FMT_RI) { if (dc->r0 == R_R0) { if (dc->r1 == R_R0 && dc->imm16 == 0) { LOG_DIS("nop\n"); } else { LOG_DIS("mvi r%d, %d\n", dc->r1, sign_extend(dc->imm16, 16)); } } else { LOG_DIS("addi r%d, r%d, %d\n", dc->r1, dc->r0, sign_extend(dc->imm16, 16)); } } else { LOG_DIS("add r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1); } if (dc->format == OP_FMT_RI) { tcg_gen_addi_tl(cpu_R[dc->r1], cpu_R[dc->r0], sign_extend(dc->imm16, 16)); } else { tcg_gen_add_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]); } }
static void dec_b(DisasContext *dc) { if (dc->r0 == R_RA) { LOG_DIS("ret\n"); } else if (dc->r0 == R_EA) { LOG_DIS("eret\n"); } else if (dc->r0 == R_BA) { LOG_DIS("bret\n"); } else { LOG_DIS("b r%d\n", dc->r0); } /* restore IE.IE in case of an eret */ if (dc->r0 == R_EA) { TCGv t0 = tcg_temp_new(); TCGLabel *l1 = gen_new_label(); tcg_gen_andi_tl(t0, cpu_ie, IE_EIE); tcg_gen_ori_tl(cpu_ie, cpu_ie, IE_IE); tcg_gen_brcondi_tl(TCG_COND_EQ, t0, IE_EIE, l1); tcg_gen_andi_tl(cpu_ie, cpu_ie, ~IE_IE); gen_set_label(l1); tcg_temp_free(t0); } else if (dc->r0 == R_BA) { TCGv t0 = tcg_temp_new(); TCGLabel *l1 = gen_new_label(); tcg_gen_andi_tl(t0, cpu_ie, IE_BIE); tcg_gen_ori_tl(cpu_ie, cpu_ie, IE_IE); tcg_gen_brcondi_tl(TCG_COND_EQ, t0, IE_BIE, l1); tcg_gen_andi_tl(cpu_ie, cpu_ie, ~IE_IE); gen_set_label(l1); tcg_temp_free(t0); } tcg_gen_mov_tl(cpu_pc, cpu_R[dc->r0]); dc->is_jmp = DISAS_JUMP; }