void cpu_check_irqs(CPUSPARCState *env) { CPUState *cs; uint32_t pil = env->pil_in | (env->softint & ~(SOFTINT_TIMER | SOFTINT_STIMER)); /* TT_IVEC has a higher priority (16) than TT_EXTINT (31..17) */ if (env->ivec_status & 0x20) { return; } cs = CPU(sparc_env_get_cpu(env)); /* check if TM or SM in SOFTINT are set setting these also causes interrupt 14 */ if (env->softint & (SOFTINT_TIMER | SOFTINT_STIMER)) { pil |= 1 << 14; } /* The bit corresponding to psrpil is (1<< psrpil), the next bit is (2 << psrpil). */ if (pil < (2 << env->psrpil)){ if (cs->interrupt_request & CPU_INTERRUPT_HARD) { CPUIRQ_DPRINTF("Reset CPU IRQ (current interrupt %x)\n", env->interrupt_index); env->interrupt_index = 0; cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); } return; } if (cpu_interrupts_enabled(env)) { unsigned int i; for (i = 15; i > env->psrpil; i--) { if (pil & (1 << i)) { int old_interrupt = env->interrupt_index; int new_interrupt = TT_EXTINT | i; if (unlikely(env->tl > 0 && cpu_tsptr(env)->tt > new_interrupt && ((cpu_tsptr(env)->tt & 0x1f0) == TT_EXTINT))) { CPUIRQ_DPRINTF("Not setting CPU IRQ: TL=%d " "current %x >= pending %x\n", env->tl, cpu_tsptr(env)->tt, new_interrupt); } else if (old_interrupt != new_interrupt) { env->interrupt_index = new_interrupt; CPUIRQ_DPRINTF("Set CPU IRQ %d old=%x new=%x\n", i, old_interrupt, new_interrupt); cpu_interrupt(cs, CPU_INTERRUPT_HARD); } break; } } } else if (cs->interrupt_request & CPU_INTERRUPT_HARD) { CPUIRQ_DPRINTF("Interrupts disabled, pil=%08x pil_in=%08x softint=%08x " "current interrupt %x\n", pil, env->pil_in, env->softint, env->interrupt_index); env->interrupt_index = 0; cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); } }
void cpu_check_irqs(CPUState *env) { uint32_t pil = env->pil_in | (env->softint & ~(SOFTINT_TIMER | SOFTINT_STIMER)); /* check if TM or SM in SOFTINT are set setting these also causes interrupt 14 */ if (env->softint & (SOFTINT_TIMER | SOFTINT_STIMER)) { pil |= 1 << 14; } if (!pil) { if (env->interrupt_request & CPU_INTERRUPT_HARD) { CPUIRQ_DPRINTF("Reset CPU IRQ (current interrupt %x)\n", env->interrupt_index); env->interrupt_index = 0; cpu_reset_interrupt(env, CPU_INTERRUPT_HARD); } return; } if (cpu_interrupts_enabled(env)) { unsigned int i; for (i = 15; i > env->psrpil; i--) { if (pil & (1 << i)) { int old_interrupt = env->interrupt_index; int new_interrupt = TT_EXTINT | i; if (env->tl > 0 && cpu_tsptr(env)->tt > new_interrupt) { CPUIRQ_DPRINTF("Not setting CPU IRQ: TL=%d " "current %x >= pending %x\n", env->tl, cpu_tsptr(env)->tt, new_interrupt); } else if (old_interrupt != new_interrupt) { env->interrupt_index = new_interrupt; CPUIRQ_DPRINTF("Set CPU IRQ %d old=%x new=%x\n", i, old_interrupt, new_interrupt); cpu_interrupt(env, CPU_INTERRUPT_HARD); } break; } } } else { CPUIRQ_DPRINTF("Interrupts disabled, pil=%08x pil_in=%08x softint=%08x " "current interrupt %x\n", pil, env->pil_in, env->softint, env->interrupt_index); } }
/* CPUClass::reset() */ static void sparc_cpu_reset(CPUState *s) { SPARCCPU *cpu = SPARC_CPU(s); SPARCCPUClass *scc = SPARC_CPU_GET_CLASS(cpu); CPUSPARCState *env = &cpu->env; if (qemu_loglevel_mask(CPU_LOG_RESET)) { qemu_log("CPU Reset (CPU %d)\n", s->cpu_index); log_cpu_state(env, 0); } scc->parent_reset(s); memset(env, 0, offsetof(CPUSPARCState, breakpoints)); tlb_flush(env, 1); env->cwp = 0; #ifndef TARGET_SPARC64 env->wim = 1; #endif env->regwptr = env->regbase + (env->cwp * 16); CC_OP = CC_OP_FLAGS; #if defined(CONFIG_USER_ONLY) #ifdef TARGET_SPARC64 env->cleanwin = env->nwindows - 2; env->cansave = env->nwindows - 2; env->pstate = PS_RMO | PS_PEF | PS_IE; env->asi = 0x82; /* Primary no-fault */ #endif #else #if !defined(TARGET_SPARC64) env->psret = 0; env->psrs = 1; env->psrps = 1; #endif #ifdef TARGET_SPARC64 env->pstate = PS_PRIV|PS_RED|PS_PEF|PS_AG; env->hpstate = cpu_has_hypervisor(env) ? HS_PRIV : 0; env->tl = env->maxtl; cpu_tsptr(env)->tt = TT_POWER_ON_RESET; env->lsu = 0; #else env->mmuregs[0] &= ~(MMU_E | MMU_NF); env->mmuregs[0] |= env->def->mmu_bm; #endif env->pc = 0; env->npc = env->pc + 4; #endif env->cache_control = 0; }
void cpu_reset(CPUSPARCState *env) { if (qemu_loglevel_mask(CPU_LOG_RESET)) { qemu_log("CPU Reset (CPU %d)\n", env->cpu_index); log_cpu_state(env, 0); } tlb_flush(env, 1); env->cwp = 0; #ifndef TARGET_SPARC64 env->wim = 1; #endif env->regwptr = env->regbase + (env->cwp * 16); CC_OP = CC_OP_FLAGS; #if defined(CONFIG_USER_ONLY) #ifdef TARGET_SPARC64 env->cleanwin = env->nwindows - 2; env->cansave = env->nwindows - 2; env->pstate = PS_RMO | PS_PEF | PS_IE; env->asi = 0x82; /* Primary no-fault */ #endif #else #if !defined(TARGET_SPARC64) env->psret = 0; env->psrs = 1; env->psrps = 1; #endif #ifdef TARGET_SPARC64 env->pstate = PS_PRIV|PS_RED|PS_PEF|PS_AG; env->hpstate = cpu_has_hypervisor(env) ? HS_PRIV : 0; env->tl = env->maxtl; cpu_tsptr(env)->tt = TT_POWER_ON_RESET; env->lsu = 0; #else env->mmuregs[0] &= ~(MMU_E | MMU_NF); env->mmuregs[0] |= env->def->mmu_bm; #endif env->pc = 0; env->npc = env->pc + 4; #endif env->cache_control = 0; }
void sparc_cpu_do_interrupt(CPUState *cs) { SPARCCPU *cpu = SPARC_CPU(cs); CPUSPARCState *env = &cpu->env; int intno = cs->exception_index; trap_state *tsptr; /* Compute PSR before exposing state. */ if (env->cc_op != CC_OP_FLAGS) { cpu_get_psr(env); } #ifdef DEBUG_PCALL if (qemu_loglevel_mask(CPU_LOG_INT)) { static int count; const char *name; if (intno < 0 || intno >= 0x180) { name = "Unknown"; } else if (intno >= 0x100) { name = "Trap Instruction"; } else if (intno >= 0xc0) { name = "Window Fill"; } else if (intno >= 0x80) { name = "Window Spill"; } else { name = excp_names[intno]; if (!name) { name = "Unknown"; } } qemu_log("%6d: %s (v=%04x)\n", count, name, intno); log_cpu_state(cs, 0); #if 0 { int i; uint8_t *ptr; qemu_log(" code="); ptr = (uint8_t *)env->pc; for (i = 0; i < 16; i++) { qemu_log(" %02x", ldub(ptr + i)); } qemu_log("\n"); } #endif count++; } #endif #if !defined(CONFIG_USER_ONLY) if (env->tl >= env->maxtl) { cpu_abort(cs, "Trap 0x%04x while trap level (%d) >= MAXTL (%d)," " Error state", cs->exception_index, env->tl, env->maxtl); return; } #endif if (env->tl < env->maxtl - 1) { env->tl++; } else { env->pstate |= PS_RED; if (env->tl < env->maxtl) { env->tl++; } } tsptr = cpu_tsptr(env); tsptr->tstate = (cpu_get_ccr(env) << 32) | ((env->asi & 0xff) << 24) | ((env->pstate & 0xf3f) << 8) | cpu_get_cwp64(env); tsptr->tpc = env->pc; tsptr->tnpc = env->npc; tsptr->tt = intno; switch (intno) { case TT_IVEC: cpu_change_pstate(env, PS_PEF | PS_PRIV | PS_IG); break; case TT_TFAULT: case TT_DFAULT: case TT_TMISS ... TT_TMISS + 3: case TT_DMISS ... TT_DMISS + 3: case TT_DPROT ... TT_DPROT + 3: cpu_change_pstate(env, PS_PEF | PS_PRIV | PS_MG); break; default: cpu_change_pstate(env, PS_PEF | PS_PRIV | PS_AG); break; } if (intno == TT_CLRWIN) { cpu_set_cwp(env, cpu_cwp_dec(env, env->cwp - 1)); } else if ((intno & 0x1c0) == TT_SPILL) { cpu_set_cwp(env, cpu_cwp_dec(env, env->cwp - env->cansave - 2)); } else if ((intno & 0x1c0) == TT_FILL) { cpu_set_cwp(env, cpu_cwp_inc(env, env->cwp + 1)); } env->tbr &= ~0x7fffULL; env->tbr |= ((env->tl > 1) ? 1 << 14 : 0) | (intno << 5); env->pc = env->tbr; env->npc = env->pc + 4; cs->exception_index = -1; }
void cpu_check_irqs(CPUSPARCState *env) { CPUState *cs; uint32_t pil = env->pil_in | (env->softint & ~(SOFTINT_TIMER | SOFTINT_STIMER)); /* We should be holding the BQL before we mess with IRQs */ g_assert(qemu_mutex_iothread_locked()); /* TT_IVEC has a higher priority (16) than TT_EXTINT (31..17) */ if (env->ivec_status & 0x20) { return; } cs = CPU(sparc_env_get_cpu(env)); /* check if TM or SM in SOFTINT are set setting these also causes interrupt 14 */ if (env->softint & (SOFTINT_TIMER | SOFTINT_STIMER)) { pil |= 1 << 14; } /* The bit corresponding to psrpil is (1<< psrpil), the next bit is (2 << psrpil). */ if (pil < (2 << env->psrpil)) { if (cs->interrupt_request & CPU_INTERRUPT_HARD) { trace_sparc64_cpu_check_irqs_reset_irq(env->interrupt_index); env->interrupt_index = 0; cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); } return; } if (cpu_interrupts_enabled(env)) { unsigned int i; for (i = 15; i > env->psrpil; i--) { if (pil & (1 << i)) { int old_interrupt = env->interrupt_index; int new_interrupt = TT_EXTINT | i; if (unlikely(env->tl > 0 && cpu_tsptr(env)->tt > new_interrupt && ((cpu_tsptr(env)->tt & 0x1f0) == TT_EXTINT))) { trace_sparc64_cpu_check_irqs_noset_irq(env->tl, cpu_tsptr(env)->tt, new_interrupt); } else if (old_interrupt != new_interrupt) { env->interrupt_index = new_interrupt; trace_sparc64_cpu_check_irqs_set_irq(i, old_interrupt, new_interrupt); cpu_interrupt(cs, CPU_INTERRUPT_HARD); } break; } } } else if (cs->interrupt_request & CPU_INTERRUPT_HARD) { trace_sparc64_cpu_check_irqs_disabled(pil, env->pil_in, env->softint, env->interrupt_index); env->interrupt_index = 0; cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); } }