void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec, int is_asi, int size) { CPUState *saved_env; if (!cpu_single_env) { /* XXX: ??? */ return; } /* XXX: hack to restore env in all cases, even if not called from generated code */ saved_env = env; env = cpu_single_env; qemu_log_mask(CPU_LOG_INT, "Unassigned " TARGET_FMT_plx " wr=%d exe=%d\n", addr, is_write, is_exec); if (!(env->sregs[SR_MSR] & MSR_EE)) { env = saved_env; return; } env->sregs[SR_EAR] = addr; if (is_exec) { if ((env->pvr.regs[2] & PVR2_IOPB_BUS_EXC_MASK)) { env->sregs[SR_ESR] = ESR_EC_INSN_BUS; helper_raise_exception(EXCP_HW_EXCP); } } else { if ((env->pvr.regs[2] & PVR2_DOPB_BUS_EXC_MASK)) { env->sregs[SR_ESR] = ESR_EC_DATA_BUS; helper_raise_exception(EXCP_HW_EXCP); } } env = saved_env; }
void cpu_unassigned_access(CPUState *env1, target_phys_addr_t addr, int is_write, int is_exec, int is_asi, int size) { CPUState *saved_env; saved_env = env; env = env1; qemu_log_mask(CPU_LOG_INT, "Unassigned " TARGET_FMT_plx " wr=%d exe=%d\n", addr, is_write, is_exec); if (!(env->sregs[SR_MSR] & MSR_EE)) { env = saved_env; return; } env->sregs[SR_EAR] = addr; if (is_exec) { if ((env->pvr.regs[2] & PVR2_IOPB_BUS_EXC_MASK)) { env->sregs[SR_ESR] = ESR_EC_INSN_BUS; helper_raise_exception(EXCP_HW_EXCP); } } else { if ((env->pvr.regs[2] & PVR2_DOPB_BUS_EXC_MASK)) { env->sregs[SR_ESR] = ESR_EC_DATA_BUS; helper_raise_exception(EXCP_HW_EXCP); } } env = saved_env; }
void mb_cpu_unassigned_access(CPUState *cs, hwaddr addr, bool is_write, bool is_exec, int is_asi, unsigned size) { MicroBlazeCPU *cpu; CPUMBState *env; qemu_log_mask(CPU_LOG_INT, "Unassigned " TARGET_FMT_plx " wr=%d exe=%d\n", addr, is_write ? 1 : 0, is_exec ? 1 : 0); if (cs == NULL) { return; } cpu = MICROBLAZE_CPU(cs); env = &cpu->env; if (!(env->sregs[SR_MSR] & MSR_EE)) { return; } env->sregs[SR_EAR] = addr; if (is_exec) { if ((env->pvr.regs[2] & PVR2_IOPB_BUS_EXC_MASK)) { env->sregs[SR_ESR] = ESR_EC_INSN_BUS; helper_raise_exception(env, EXCP_HW_EXCP); } } else { if ((env->pvr.regs[2] & PVR2_DOPB_BUS_EXC_MASK)) { env->sregs[SR_ESR] = ESR_EC_DATA_BUS; helper_raise_exception(env, EXCP_HW_EXCP); } } }
void helper_rett(CPUSPARCState *env) { unsigned int cwp; if (env->psret == 1) { helper_raise_exception(env, TT_ILL_INSN); } env->psret = 1; cwp = cpu_cwp_inc(env, env->cwp + 1) ; if (env->wim & (1 << cwp)) { helper_raise_exception(env, TT_WIN_UNF); } cpu_set_cwp(env, cwp); env->psrs = env->psrps; }
static target_ulong helper_udiv_common(CPUSPARCState *env, target_ulong a, target_ulong b, int cc) { int overflow = 0; uint64_t x0; uint32_t x1; x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32); x1 = (b & 0xffffffff); if (x1 == 0) { cpu_restore_state2(env, GETPC()); helper_raise_exception(env, TT_DIV_ZERO); } x0 = x0 / x1; if (x0 > 0xffffffff) { x0 = 0xffffffff; overflow = 1; } if (cc) { env->cc_dst = x0; env->cc_src2 = overflow; env->cc_op = CC_OP_DIV; } return x0; }
target_ulong helper_tsubcctv(CPUSPARCState *env, target_ulong src1, target_ulong src2) { target_ulong dst; /* Tag overflow occurs if either input has bits 0 or 1 set. */ if ((src1 | src2) & 3) { goto tag_overflow; } dst = src1 - src2; /* Tag overflow occurs if the subtraction overflows. */ if ((src1 ^ src2) & (src1 ^ dst) & (1u << 31)) { goto tag_overflow; } /* Only modify the CC after any exceptions have been generated. */ env->cc_op = CC_OP_TSUBTV; env->cc_src = src1; env->cc_src2 = src2; env->cc_dst = dst; return dst; tag_overflow: cpu_restore_state2(env, GETPC()); helper_raise_exception(env, TT_TOVF); }
void helper_rett(void) { unsigned int cwp; if (env->psret == 1) { helper_raise_exception(env, TT_ILL_INSN); } env->psret = 1; cwp = cwp_inc(env->cwp + 1) ; if (env->wim & (1 << cwp)) { helper_raise_exception(env, TT_WIN_UNF); } set_cwp(cwp); env->psrs = env->psrps; }
void helper_wrpsr(CPUSPARCState *env, target_ulong new_psr) { if ((new_psr & PSR_CWP) >= env->nwindows) { helper_raise_exception(env, TT_ILL_INSN); } else { cpu_put_psr(env, new_psr); } }
uint64_t helper_udivx(CPUSPARCState *env, uint64_t a, uint64_t b) { if (b == 0) { /* Raise divide by zero trap. */ cpu_restore_state2(env, GETPC()); helper_raise_exception(env, TT_DIV_ZERO); } return a / b; }
/* XXX: use another pointer for %iN registers to avoid slow wrapping handling ? */ void helper_save(CPUSPARCState *env) { uint32_t cwp; cwp = cpu_cwp_dec(env, env->cwp - 1); if (env->wim & (1 << cwp)) { helper_raise_exception(env, TT_WIN_OVF); } cpu_set_cwp(env, cwp); }
/* XXX: use another pointer for %iN registers to avoid slow wrapping handling ? */ void helper_save(void) { uint32_t cwp; cwp = cwp_dec(env->cwp - 1); if (env->wim & (1 << cwp)) { helper_raise_exception(env, TT_WIN_OVF); } set_cwp(cwp); }
void helper_memalign(uint32_t addr, uint32_t dr, uint32_t wr, uint32_t mask) { if (addr & mask) { qemu_log("unaligned access addr=%x mask=%x, wr=%d dr=r%d\n", addr, mask, wr, dr); env->regs[CR_BADADDR] = addr; env->regs[CR_EXCEPTION] = EXCP_UNALIGN << 2; helper_raise_exception(EXCP_UNALIGN); } }
void helper_stackprot(uint32_t addr) { if (addr < env->slr || addr > env->shr) { qemu_log("Stack protector violation at %x %x %x\n", addr, env->slr, env->shr); env->sregs[SR_EAR] = addr; env->sregs[SR_ESR] = ESR_EC_STACKPROT; helper_raise_exception(EXCP_HW_EXCP); } }
void helper_restore(CPUSPARCState *env) { uint32_t cwp; cwp = cpu_cwp_inc(env, env->cwp + 1); if (env->wim & (1 << cwp)) { helper_raise_exception(env, TT_WIN_UNF); } cpu_set_cwp(env, cwp); }
void helper_restore(void) { uint32_t cwp; cwp = cwp_inc(env->cwp + 1); if (env->wim & (1 << cwp)) { helper_raise_exception(env, TT_WIN_UNF); } set_cwp(cwp); }
void helper_stackprot(CPUMBState *env, uint32_t addr) { if (addr < env->slr || addr > env->shr) { qemu_log_mask(CPU_LOG_INT, "Stack protector violation at %x %x %x\n", addr, env->slr, env->shr); env->sregs[SR_EAR] = addr; env->sregs[SR_ESR] = ESR_EC_STACKPROT; helper_raise_exception(env, EXCP_HW_EXCP); } }
void cpu_unassigned_access(CPUMBState *env, hwaddr addr, int is_write, int is_exec, int is_asi, int size) { qemu_log_mask(CPU_LOG_INT, "Unassigned " TARGET_FMT_plx " wr=%d exe=%d\n", addr, is_write, is_exec); if (!env || !(env->sregs[SR_MSR] & MSR_EE)) { return; } env->sregs[SR_EAR] = addr; if (is_exec) { if ((env->pvr.regs[2] & PVR2_IOPB_BUS_EXC_MASK)) { env->sregs[SR_ESR] = ESR_EC_INSN_BUS; helper_raise_exception(env, EXCP_HW_EXCP); } } else { if ((env->pvr.regs[2] & PVR2_DOPB_BUS_EXC_MASK)) { env->sregs[SR_ESR] = ESR_EC_DATA_BUS; helper_raise_exception(env, EXCP_HW_EXCP); } } }
int64_t helper_sdivx(CPUSPARCState *env, int64_t a, int64_t b) { if (b == 0) { /* Raise divide by zero trap. */ cpu_restore_state2(env, GETPC()); helper_raise_exception(env, TT_DIV_ZERO); } else if (b == -1) { /* Avoid overflow trap with i386 divide insn. */ return -a; } else { return a / b; } }
static inline int div_prepare(CPUMBState *env, uint32_t a, uint32_t b) { if (b == 0) { env->sregs[SR_MSR] |= MSR_DZ; if ((env->sregs[SR_MSR] & MSR_EE) && !(env->pvr.regs[2] & PVR2_DIV_ZERO_EXC_MASK)) { env->sregs[SR_ESR] = ESR_EC_DIVZERO; helper_raise_exception(env, EXCP_HW_EXCP); } return 0; } env->sregs[SR_MSR] &= ~MSR_DZ; return 1; }
void helper_memalign(uint32_t addr, uint32_t dr, uint32_t wr, uint32_t mask) { if (addr & mask) { qemu_log_mask(CPU_LOG_INT, "unaligned access addr=%x mask=%x, wr=%d dr=r%d\n", addr, mask, wr, dr); env->sregs[SR_EAR] = addr; env->sregs[SR_ESR] = ESR_EC_UNALIGNED_DATA | (wr << 10) \ | (dr & 31) << 5; if (mask == 3) { env->sregs[SR_ESR] |= 1 << 11; } if (!(env->sregs[SR_MSR] & MSR_EE)) { return; } helper_raise_exception(EXCP_HW_EXCP); } }
/* raise FPU exception. */ static void raise_fpu_exception(CPUMBState *env) { env->sregs[SR_ESR] = ESR_EC_FPU; helper_raise_exception(env, EXCP_HW_EXCP); }
/* raise FPU exception. */ static void raise_fpu_exception(void) { env->sregs[SR_ESR] = ESR_EC_FPU; helper_raise_exception(EXCP_HW_EXCP); }