/* Update interrupts. */ static void pl190_update(struct pl190_emulator_state *s) { u32 status; struct vmm_vcpu *vcpu = vmm_manager_guest_vcpu(s->guest, 0); if (!vcpu) { return; } status = pl190_irq_status(s); if (s->is_child_pic) { vmm_devemu_emulate_irq(s->guest, s->parent_irq, status); } else { if (status & s->prio_mask[s->priority]) { vmm_vcpu_irq_assert(vcpu, s->parent_irq, 0x0); } else { vmm_vcpu_irq_deassert(vcpu, s->parent_irq); } if ((s->level | s->soft_level) & s->fiq_select) { vmm_vcpu_irq_assert(vcpu, s->parent_irq + 1, 0x0); } else { vmm_vcpu_irq_deassert(vcpu, s->parent_irq + 1); } } }
/* Update interrupts. */ static void pl190_emulator_update(struct pl190_emulator_state *s) { u32 irqset, fiqset, status; struct vmm_vcpu *vcpu = vmm_manager_guest_vcpu(s->guest, 0); if (!vcpu) { return; } status = pl190_emulator_irq_status(s); if (s->is_child_pic) { vmm_devemu_emulate_irq(s->guest, s->parent_irq, status); } else { irqset = ((status & s->prio_mask[s->priority]) != 0); if (irqset) { vmm_vcpu_irq_assert(vcpu, CPU_EXTERNAL_IRQ, 0x0); } else { vmm_vcpu_irq_deassert(vcpu, CPU_EXTERNAL_IRQ); } fiqset = (((s->level | s->soft_level) & s->fiq_select) != 0); if (fiqset) { vmm_vcpu_irq_assert(vcpu, CPU_EXTERNAL_FIQ, 0x0); } else { vmm_vcpu_irq_deassert(vcpu, CPU_EXTERNAL_FIQ); } } }
void do_soft_irq(arch_regs_t * uregs) { int rc = VMM_OK; struct vmm_vcpu * vcpu; if ((uregs->cpsr & CPSR_MODE_MASK) != CPSR_MODE_USER) { vmm_panic("%s: unexpected exception\n", __func__); } vmm_scheduler_irq_enter(uregs, TRUE); vcpu = vmm_scheduler_current_vcpu(); /* If vcpu priviledge is user then generate exception * and return without emulating instruction */ if ((arm_priv(vcpu)->cpsr & CPSR_MODE_MASK) == CPSR_MODE_USER) { vmm_vcpu_irq_assert(vcpu, CPU_SOFT_IRQ, 0x0); } else { if (uregs->cpsr & CPSR_THUMB_ENABLED) { rc = cpu_vcpu_hypercall_thumb(vcpu, uregs, *((u32 *)uregs->pc)); } else { rc = cpu_vcpu_hypercall_arm(vcpu, uregs, *((u32 *)uregs->pc)); } } if (rc) { vmm_printf("%s: error %d\n", __func__, rc); } vmm_scheduler_irq_exit(uregs); }
void do_undef_inst(vmm_user_regs_t * uregs) { int rc = VMM_OK; vmm_vcpu_t * vcpu; if ((uregs->cpsr & CPSR_MODE_MASK) != CPSR_MODE_USER) { vmm_panic("%s: unexpected exception\n", __func__); } vmm_scheduler_irq_enter(uregs, TRUE); vcpu = vmm_scheduler_current_vcpu(); /* If vcpu priviledge is user then generate exception * and return without emulating instruction */ if ((vcpu->sregs->cpsr & CPSR_MODE_MASK) == CPSR_MODE_USER) { vmm_vcpu_irq_assert(vcpu, CPU_UNDEF_INST_IRQ, 0x0); } else { if (uregs->cpsr & CPSR_THUMB_ENABLED) { rc = cpu_vcpu_emulate_thumb_inst(vcpu, uregs, FALSE); } else { rc = cpu_vcpu_emulate_arm_inst(vcpu, uregs, FALSE); } } if (rc) { vmm_printf("%s: error %d\n", __func__, rc); } vmm_scheduler_irq_exit(uregs); }