Ejemplo n.º 1
0
void vmm_scheduler_irq_exit(arch_regs_t *regs)
{
	struct vmm_scheduler_ctrl *schedp = &this_cpu(sched);
	struct vmm_vcpu *vcpu = NULL;

	/* Determine current vcpu */
	vcpu = schedp->current_vcpu;
	if (!vcpu) {
		return;
	}

	/* If current vcpu is not RUNNING or yield on exit is set
	 * then context switch
	 */
	if ((vmm_manager_vcpu_get_state(vcpu) != VMM_VCPU_STATE_RUNNING) ||
	    schedp->yield_on_irq_exit) {
		vmm_scheduler_next(schedp, &schedp->ev, schedp->irq_regs);
		schedp->yield_on_irq_exit = FALSE;
	}

	/* VCPU irq processing */
	vmm_vcpu_irq_process(vcpu, regs);

	/* Indicate that we have exited IRQ */
	schedp->irq_context = FALSE;

	/* Clear pointer to IRQ registers */
	schedp->irq_regs = NULL;
}
Ejemplo n.º 2
0
void vmm_scheduler_yield(void)
{
	irq_flags_t flags;
	struct vmm_scheduler_ctrl *schedp = &this_cpu(sched);

	arch_cpu_irq_save(flags);

	if (schedp->irq_context) {
		vmm_panic("%s: Cannot yield in IRQ context\n", __func__);
	}

	if (!schedp->current_vcpu) {
		vmm_panic("%s: NULL VCPU pointer\n", __func__);
	}

	if (schedp->current_vcpu->is_normal) {
		/* For Normal VCPU
		 * Just enable yield on exit and rest will be taken care
		 * by vmm_scheduler_irq_exit()
		 */
		if (vmm_manager_vcpu_get_state(schedp->current_vcpu) == 
						VMM_VCPU_STATE_RUNNING) {
			schedp->yield_on_irq_exit = TRUE;
		}
	} else {
		/* For Orphan VCPU
		 * Forcefully expire yield 
		 */
		arch_vcpu_preempt_orphan();
	}

	arch_cpu_irq_restore(flags);
}
Ejemplo n.º 3
0
void vmm_vcpu_irq_assert(struct vmm_vcpu *vcpu, u32 irq_no, u64 reason)
{
	/* For non-normal VCPU dont do anything */
	if (!vcpu || !vcpu->is_normal) {
		return;
	}

	/* If VCPU is not in interruptible state then dont do anything */
	if (!(vmm_manager_vcpu_get_state(vcpu) & VMM_VCPU_STATE_INTERRUPTIBLE)) {
		return;
	}

	/* Check irq number */
	if (irq_no > vcpu->irqs.irq_count) {
		return;
	}

	/* Assert the irq */
	if (arch_atomic_cmpxchg(&vcpu->irqs.irq[irq_no].assert, 
				DEASSERTED, ASSERTED) == DEASSERTED) {
		if (arch_vcpu_irq_assert(vcpu, irq_no, reason) == VMM_OK) {
			vcpu->irqs.irq[irq_no].reason = reason;
			arch_atomic_inc(&vcpu->irqs.execute_pending);
			arch_atomic64_inc(&vcpu->irqs.assert_count);
		} else {
			arch_atomic_write(&vcpu->irqs.irq[irq_no].assert,
					  DEASSERTED);
		}
	}

	/* Resume VCPU from wfi */
	vcpu_irq_wfi_resume(vcpu, FALSE);
}
Ejemplo n.º 4
0
void vmm_vcpu_irq_process(struct vmm_vcpu *vcpu, arch_regs_t *regs)
{
	/* For non-normal vcpu dont do anything */
	if (!vcpu || !vcpu->is_normal) {
		return;
	}

	/* If vcpu is not in interruptible state then dont do anything */
	if (!(vmm_manager_vcpu_get_state(vcpu) & 
					VMM_VCPU_STATE_INTERRUPTIBLE)) {
		return;
	}

	/* Proceed only if we have pending execute */
	if (arch_atomic_dec_if_positive(&vcpu->irqs.execute_pending) >= 0) {
		int irq_no = -1;
		u32 i, tmp_prio, irq_count = vcpu->irqs.irq_count;
		u32 irq_prio = 0;

		/* Find the irq number to process */
		for (i = 0; i < irq_count; i++) {
			if (arch_atomic_read(&vcpu->irqs.irq[i].assert) ==
			    ASSERTED) {
				tmp_prio = arch_vcpu_irq_priority(vcpu, i);
				if (tmp_prio > irq_prio) {
					irq_no = i;
					irq_prio = tmp_prio;
				}
			}
		}
		if (irq_no == -1) {
			return;
		}

		/* If irq number found then execute it */
		if (arch_atomic_cmpxchg(&vcpu->irqs.irq[irq_no].assert,
					ASSERTED, PENDING) == ASSERTED) {
			if (arch_vcpu_irq_execute(vcpu, regs, irq_no,
			    	vcpu->irqs.irq[irq_no].reason) == VMM_OK) {
				arch_atomic_write(&vcpu->irqs.
						  irq[irq_no].assert,
						  DEASSERTED);
				arch_atomic64_inc(&vcpu->irqs.
						  execute_count);
			} else {
				/* arch_vcpu_irq_execute failed may be
				 * because VCPU was already processing
				 * a VCPU irq hence increment execute
				 * pending count to try next time.
				 */
				arch_atomic_inc(&vcpu->irqs.
						execute_pending);
				arch_atomic_write(&vcpu->irqs.
						  irq[irq_no].assert,
						  ASSERTED);
			}
		}
	}
}
Ejemplo n.º 5
0
void cpu_vcpu_halt(struct vmm_vcpu *vcpu, arch_regs_t *regs)
{
	if (vmm_manager_vcpu_get_state(vcpu) != VMM_VCPU_STATE_HALTED) {
		vmm_printf("\n");
		cpu_vcpu_dump_user_reg(regs);
		vmm_manager_vcpu_halt(vcpu);
	}
}
Ejemplo n.º 6
0
void do_sync(arch_regs_t *regs, unsigned long mode)
{
	int rc = VMM_OK;
	u32 ec, il, iss;
	u64 esr, far, elr;
	physical_addr_t fipa = 0;
	struct vmm_vcpu *vcpu;

	esr = mrs(esr_el2);
	far = mrs(far_el2);
	elr = mrs(elr_el2);

	ec = (esr & ESR_EC_MASK) >> ESR_EC_SHIFT;
	il = (esr & ESR_IL_MASK) >> ESR_IL_SHIFT;
	iss = (esr & ESR_ISS_MASK) >> ESR_ISS_SHIFT;

	vcpu = vmm_scheduler_current_vcpu();

	/* We dont expect any faults from hypervisor code itself 
	 * so, any trap we get from hypervisor mode means something
	 * unexpected has occured.
	 */
	if ((regs->pstate & PSR_EL_MASK) == PSR_EL_2) {
		if ((ec == EC_TRAP_HVC_A64) && (iss == 0)) {
			vmm_scheduler_preempt_orphan(regs);
			return;
		}
		vmm_printf("%s: CPU%d VCPU=%s unexpected exception\n",
			   __func__, vmm_smp_processor_id(),
			   (vcpu) ? vcpu->name : "(NULL)");
		vmm_printf("%s: ESR=0x%016lx EC=0x%x IL=0x%x ISS=0x%x\n",
			   __func__, esr, ec, il, iss);
		vmm_printf("%s: ELR=0x%016lx FAR=0x%016lx HPFAR=0x%016lx\n",
			   __func__, elr, far, mrs(hpfar_el2));
		cpu_vcpu_dump_user_reg(regs);
		vmm_panic("%s: please reboot ...\n", __func__);
	}

	vmm_scheduler_irq_enter(regs, TRUE);

	switch (ec) {
	case EC_UNKNOWN:
		/* We dont expect to get this trap so error */
		rc = VMM_EFAIL;
		break;
	case EC_TRAP_WFI_WFE:
		/* WFI emulation */
		rc = cpu_vcpu_emulate_wfi_wfe(vcpu, regs, il, iss);
		break;
	case EC_TRAP_MCR_MRC_CP15_A32:
		/* MCR/MRC CP15 emulation */
		rc = cpu_vcpu_emulate_mcr_mrc_cp15(vcpu, regs, il, iss);
		break;
	case EC_TRAP_MCRR_MRRC_CP15_A32:
		/* MCRR/MRRC CP15 emulation */
		rc = cpu_vcpu_emulate_mcrr_mrrc_cp15(vcpu, regs, il, iss);
		break;
	case EC_TRAP_MCR_MRC_CP14_A32:
		/* MCR/MRC CP14 emulation */
		rc = cpu_vcpu_emulate_mcr_mrc_cp14(vcpu, regs, il, iss);
		break;
	case EC_TRAP_LDC_STC_CP14_A32:
		/* LDC/STC CP14 emulation */
		rc = cpu_vcpu_emulate_ldc_stc_cp14(vcpu, regs, il, iss);
		break;
	case EC_SIMD_FPU:
		/* Advanced SIMD and FPU emulation */
		rc = cpu_vcpu_emulate_simd_fp_regs(vcpu, regs, il, iss);
		break;
	case EC_FPEXC_A32:
	case EC_FPEXC_A64:
		/* We dont expect any FP execution faults */
		rc = VMM_EFAIL;
		break;
	case EC_TRAP_MRC_VMRS_CP10_A32:
		/* MRC (or VMRS) to CP10 for MVFR0, MVFR1 or FPSID */
		rc = cpu_vcpu_emulate_vmrs(vcpu, regs, il, iss);
		break;
	case EC_TRAP_MCRR_MRRC_CP14_A32:
		/* MRRC to CP14 emulation */
		rc = cpu_vcpu_emulate_mcrr_mrrc_cp14(vcpu, regs, il, iss);
		break;
	case EC_TRAP_SVC_A32:
	case EC_TRAP_SVC_A64:
		/* We dont expect to get these traps so error */
		rc = VMM_EFAIL;
		break;
	case EC_TRAP_SMC_A32:
		/* SMC emulation for A32 guest */
		rc = cpu_vcpu_emulate_smc32(vcpu, regs, il, iss);
		break;
	case EC_TRAP_SMC_A64:
		/* SMC emulation for A64 guest */
		rc = cpu_vcpu_emulate_smc64(vcpu, regs, il, iss);
		break;
	case EC_TRAP_HVC_A32:
		/* HVC emulation for A32 guest */
		rc = cpu_vcpu_emulate_hvc32(vcpu, regs, il, iss);
		break;
	case EC_TRAP_HVC_A64:
		/* HVC emulation for A64 guest */
		rc = cpu_vcpu_emulate_hvc64(vcpu, regs, il, iss);
		break;
	case EC_TRAP_MSR_MRS_SYSTEM:
		/* MSR/MRS/SystemRegs emulation */
		rc = cpu_vcpu_emulate_msr_mrs_system(vcpu, regs, il, iss);
		break;
	case EC_TRAP_LWREL_INST_ABORT:
		/* Stage2 instruction abort */
		fipa = (mrs(hpfar_el2) & HPFAR_FIPA_MASK) >> HPFAR_FIPA_SHIFT;
		fipa = fipa << HPFAR_FIPA_PAGE_SHIFT;
		fipa = fipa | (mrs(far_el2) & HPFAR_FIPA_PAGE_MASK);
		rc = cpu_vcpu_inst_abort(vcpu, regs, il, iss, fipa);
		break;
	case EC_TRAP_LWREL_DATA_ABORT:
		/* Stage2 data abort */
		fipa = (mrs(hpfar_el2) & HPFAR_FIPA_MASK) >> HPFAR_FIPA_SHIFT;
		fipa = fipa << HPFAR_FIPA_PAGE_SHIFT;
		fipa = fipa | (mrs(far_el2) & HPFAR_FIPA_PAGE_MASK);
		rc = cpu_vcpu_data_abort(vcpu, regs, il, iss, fipa);
		break;
	case EC_CUREL_INST_ABORT:
	case EC_CUREL_DATA_ABORT:
	case EC_SERROR:
		/* We dont expect to get aborts from EL2 so error */
		rc = VMM_EFAIL;
		break;
	case EC_PC_UNALIGNED:
	case EC_SP_UNALIGNED:
		/* We dont expect to get alignment faults from EL2 */
		rc = VMM_EFAIL;
		break;
	default:
		/* Unhandled or unknown EC value so error */
		rc = VMM_EFAIL;
		break;
	};

	if (rc) {
		vmm_printf("%s: CPU%d VCPU=%s sync failed (error %d)\n", 
			   __func__, vmm_smp_processor_id(), 
			   (vcpu) ? vcpu->name : "(NULL)", rc);
		vmm_printf("%s: ESR=0x%016lx EC=0x%x IL=0x%x ISS=0x%x\n",
			   __func__, esr, ec, il, iss);
		vmm_printf("%s: ELR=0x%016lx FAR=0x%016lx HPFAR=0x%016lx\n",
			   __func__, elr, far, mrs(hpfar_el2));
		if (vmm_manager_vcpu_get_state(vcpu) != VMM_VCPU_STATE_HALTED) {
			cpu_vcpu_halt(vcpu, regs);
		}
	}

	vmm_scheduler_irq_exit(regs);
}
Ejemplo n.º 7
0
void do_hyp_trap(arch_regs_t *regs)
{
	int rc = VMM_OK;
	u32 hsr, ec, il, iss;
	virtual_addr_t far;
	physical_addr_t fipa = 0;
	struct vmm_vcpu *vcpu;

	hsr = read_hsr();
	ec = (hsr & HSR_EC_MASK) >> HSR_EC_SHIFT;
	il = (hsr & HSR_IL_MASK) >> HSR_IL_SHIFT;
	iss = (hsr & HSR_ISS_MASK) >> HSR_ISS_SHIFT;

	vcpu = vmm_scheduler_current_vcpu();

	/* We dont expect any faults from hypervisor code itself 
	 * so, any trap we get from hypervisor mode means something
	 * unexpected has occured.
	 */
	if ((regs->cpsr & CPSR_MODE_MASK) == CPSR_MODE_HYPERVISOR) {
		vmm_printf("%s: CPU%d unexpected exception\n", 
			   __func__, vmm_smp_processor_id());
		vmm_printf("%s: Current VCPU=%s HSR=0x%08x\n",
			   __func__, (vcpu) ? vcpu->name : "(NULL)", 
			   read_hsr());
		vmm_printf("%s: HPFAR=0x%08x HIFAR=0x%08x HDFAR=0x%08x\n",
			   __func__, read_hpfar(), read_hifar(), read_hdfar());
		cpu_vcpu_dump_user_reg(regs);
		vmm_panic("%s: please reboot ...\n", __func__);
	}

	vmm_scheduler_irq_enter(regs, TRUE);

	switch (ec) {
	case EC_UNKNOWN:
		/* We dont expect to get this trap so error */
		rc = VMM_EFAIL;
		break;
	case EC_TRAP_WFI_WFE:
		/* WFI emulation */
		rc = cpu_vcpu_emulate_wfi_wfe(vcpu, regs, il, iss);
		break;
	case EC_TRAP_MCR_MRC_CP15:
		/* MCR/MRC CP15 emulation */
		rc = cpu_vcpu_emulate_mcr_mrc_cp15(vcpu, regs, il, iss);
		break;
	case EC_TRAP_MCRR_MRRC_CP15:
		/* MCRR/MRRC CP15 emulation */
		rc = cpu_vcpu_emulate_mcrr_mrrc_cp15(vcpu, regs, il, iss);
		break;
	case EC_TRAP_MCR_MRC_CP14:
		/* MCR/MRC CP14 emulation */
		rc = cpu_vcpu_emulate_mcr_mrc_cp14(vcpu, regs, il, iss);
		break;
	case EC_TRAP_LDC_STC_CP14:
		/* LDC/STC CP14 emulation */
		rc = cpu_vcpu_emulate_ldc_stc_cp14(vcpu, regs, il, iss);
		break;
	case EC_TRAP_CP0_TO_CP13:
		/* CP0 to CP13 emulation */
		rc = cpu_vcpu_emulate_cp0_cp13(vcpu, regs, il, iss);
		break;
	case EC_TRAP_VMRS:
		/* MRC (or VMRS) to CP10 for MVFR0, MVFR1 or FPSID */
		rc = cpu_vcpu_emulate_vmrs(vcpu, regs, il, iss);
		break;
	case EC_TRAP_JAZELLE:
		/* Jazelle emulation */
		rc = cpu_vcpu_emulate_jazelle(vcpu, regs, il, iss);
		break;
	case EC_TRAP_BXJ:
		/* BXJ emulation */
		rc = cpu_vcpu_emulate_bxj(vcpu, regs, il, iss);
		break;
	case EC_TRAP_MRRC_CP14:
		/* MRRC to CP14 emulation */
		rc = cpu_vcpu_emulate_mrrc_cp14(vcpu, regs, il, iss);
		break;
	case EC_TRAP_SVC:
		/* We dont expect to get this trap so error */
		rc = VMM_EFAIL;
		break;
	case EC_TRAP_HVC:
		/* Hypercall or HVC emulation */
		rc = cpu_vcpu_emulate_hvc(vcpu, regs, il, iss);
		break;
	case EC_TRAP_SMC:
		/* System Monitor Call or SMC emulation */
		rc = cpu_vcpu_emulate_smc(vcpu, regs, il, iss);
		break;
	case EC_TRAP_STAGE2_INST_ABORT:
		/* Stage2 instruction abort */
		far  = read_hifar();
		fipa = (read_hpfar() & HPFAR_FIPA_MASK) >> HPFAR_FIPA_SHIFT;
		fipa = fipa << HPFAR_FIPA_PAGE_SHIFT;
		fipa = fipa | (far & HPFAR_FIPA_PAGE_MASK);
		rc = cpu_vcpu_inst_abort(vcpu, regs, il, iss, far, fipa);
		break;
	case EC_TRAP_STAGE1_INST_ABORT:
		/* We dont expect to get this trap so error */
		rc = VMM_EFAIL;
		break;
	case EC_TRAP_STAGE2_DATA_ABORT:
		/* Stage2 data abort */
		far  = read_hdfar();
		fipa = (read_hpfar() & HPFAR_FIPA_MASK) >> HPFAR_FIPA_SHIFT;
		fipa = fipa << HPFAR_FIPA_PAGE_SHIFT;
		fipa = fipa | (far & HPFAR_FIPA_PAGE_MASK);
		rc = cpu_vcpu_data_abort(vcpu, regs, il, iss, far, fipa);
		break;
	case EC_TRAP_STAGE1_DATA_ABORT:
		/* We dont expect to get this trap so error */
		rc = VMM_EFAIL;
		break;
	default:
		/* Unknown EC value so error */
		rc = VMM_EFAIL;
		break;
	};

	if (rc) {
		vmm_printf("\n%s: ec=0x%x, il=0x%x, iss=0x%x,"
			   " fipa=0x%x, error=%d\n", __func__,
			   ec, il, iss, fipa, rc);
		if (vmm_manager_vcpu_get_state(vcpu) != VMM_VCPU_STATE_HALTED) {
			cpu_vcpu_halt(vcpu, regs);
		}
	}

	vmm_scheduler_irq_exit(regs);
}