Esempio n. 1
0
void do_bad_mode(arch_regs_t *regs, unsigned long mode)
{
	u32 ec, il, iss;
	u64 esr, far, elr;
	struct vmm_vcpu *vcpu;

	esr = mrs(esr_el2);
	far = mrs(far_el2);
	elr = mrs(elr_el2);

	ec = (esr & ESR_EC_MASK) >> ESR_EC_SHIFT;
	il = (esr & ESR_IL_MASK) >> ESR_IL_SHIFT;
	iss = (esr & ESR_ISS_MASK) >> ESR_ISS_SHIFT;

	vcpu = vmm_scheduler_current_vcpu();

	vmm_printf("%s: CPU%d VCPU=%s unexpected exception\n",
		   __func__, vmm_smp_processor_id(),
		   (vcpu) ? vcpu->name : "(NULL)");
	vmm_printf("%s: ESR=0x%016lx EC=0x%x IL=0x%x ISS=0x%x\n",
		   __func__, esr, ec, il, iss);
	vmm_printf("%s: ELR=0x%016lx FAR=0x%016lx HPFAR=0x%016lx\n",
		   __func__, elr, far, mrs(hpfar_el2));
	cpu_vcpu_dump_user_reg(regs);
	vmm_panic("%s: please reboot ...\n", __func__);
}
Esempio n. 2
0
int vmm_mutex_unlock(struct vmm_mutex *mut)
{
	int rc = VMM_EINVALID;
	irq_flags_t flags;
	struct vmm_vcpu *current_vcpu = vmm_scheduler_current_vcpu();

	BUG_ON(!mut);
	BUG_ON(!vmm_scheduler_orphan_context());

	vmm_spin_lock_irqsave(&mut->wq.lock, flags);

	if (mut->lock && mut->owner == current_vcpu) {
		mut->lock--;
		if (!mut->lock) {
			mut->owner = NULL;
			vmm_manager_vcpu_resource_remove(current_vcpu,
							 &mut->res);
			rc = __vmm_waitqueue_wakeall(&mut->wq);
			if (rc == VMM_ENOENT) {
				rc = VMM_OK;
			}
		} else {
			rc = VMM_OK;
		}
	}

	vmm_spin_unlock_irqrestore(&mut->wq.lock, flags);

	return rc;
}
Esempio n. 3
0
int arch_vcpu_irq_deassert(struct vmm_vcpu *vcpu, u32 irq_no, u32 reason)
{
	u32 hcr;

	/* Skip IRQ & FIQ if VGIC available */
	if (arm_vgic_avail(vcpu) &&
	    ((irq_no == CPU_EXTERNAL_IRQ) || 
	     (irq_no == CPU_EXTERNAL_FIQ))) {
		return VMM_OK;
	}

	hcr = arm_priv(vcpu)->hcr;

	switch(irq_no) {
	case CPU_EXTERNAL_IRQ:
		hcr &= ~HCR_VI_MASK;
		break;
	case CPU_EXTERNAL_FIQ:
		hcr &= ~HCR_VF_MASK;
		break;
	default:
		return VMM_EFAIL;
		break;
	};

	arm_priv(vcpu)->hcr = hcr;
	if (vmm_scheduler_current_vcpu() == vcpu) {
		write_hcr(hcr);
	}

	return VMM_OK;
}
Esempio n. 4
0
void do_undef_inst(vmm_user_regs_t * uregs)
{
	int rc = VMM_OK;
	vmm_vcpu_t * vcpu;

	if ((uregs->cpsr & CPSR_MODE_MASK) != CPSR_MODE_USER) {
		vmm_panic("%s: unexpected exception\n", __func__);
	}

	vmm_scheduler_irq_enter(uregs, TRUE);

	vcpu = vmm_scheduler_current_vcpu();

	/* If vcpu priviledge is user then generate exception 
	 * and return without emulating instruction 
	 */
	if ((vcpu->sregs->cpsr & CPSR_MODE_MASK) == CPSR_MODE_USER) {
		vmm_vcpu_irq_assert(vcpu, CPU_UNDEF_INST_IRQ, 0x0);
	} else {
		if (uregs->cpsr & CPSR_THUMB_ENABLED) {
			rc = cpu_vcpu_emulate_thumb_inst(vcpu, uregs, FALSE);
		} else {
			rc = cpu_vcpu_emulate_arm_inst(vcpu, uregs, FALSE);
		}
	}

	if (rc) {
		vmm_printf("%s: error %d\n", __func__, rc);
	}

	vmm_scheduler_irq_exit(uregs);
}
Esempio n. 5
0
void do_soft_irq(arch_regs_t * uregs)
{
	int rc = VMM_OK;
	struct vmm_vcpu * vcpu;

	if ((uregs->cpsr & CPSR_MODE_MASK) != CPSR_MODE_USER) {
		vmm_panic("%s: unexpected exception\n", __func__);
	}

	vmm_scheduler_irq_enter(uregs, TRUE);

	vcpu = vmm_scheduler_current_vcpu();

	/* If vcpu priviledge is user then generate exception 
	 * and return without emulating instruction 
	 */
	if ((arm_priv(vcpu)->cpsr & CPSR_MODE_MASK) == CPSR_MODE_USER) {
		vmm_vcpu_irq_assert(vcpu, CPU_SOFT_IRQ, 0x0);
	} else {
		if (uregs->cpsr & CPSR_THUMB_ENABLED) {
			rc = cpu_vcpu_hypercall_thumb(vcpu, uregs, 
							*((u32 *)uregs->pc));
		} else {
			rc = cpu_vcpu_hypercall_arm(vcpu, uregs, 
							*((u32 *)uregs->pc));
		}
	}

	if (rc) {
		vmm_printf("%s: error %d\n", __func__, rc);
	}

	vmm_scheduler_irq_exit(uregs);
}
Esempio n. 6
0
static int mutex_lock_common(struct vmm_mutex *mut, u64 *timeout)
{
    int rc = VMM_OK;

    BUG_ON(!mut);
    BUG_ON(!vmm_scheduler_orphan_context());

    vmm_spin_lock_irq(&mut->wq.lock);

    while (mut->lock) {
        rc = __vmm_waitqueue_sleep(&mut->wq, timeout);
        if (rc) {
            /* Timeout or some other failure */
            break;
        }
    }
    if (rc == VMM_OK) {
        mut->lock = 1;
        mut->owner = vmm_scheduler_current_vcpu();
    }

    vmm_spin_unlock_irq(&mut->wq.lock);

    return rc;
}
Esempio n. 7
0
int vmm_mutex_trylock(struct vmm_mutex *mut)
{
	int ret = 0;
	struct vmm_vcpu *current_vcpu = vmm_scheduler_current_vcpu();

	BUG_ON(!mut);
	BUG_ON(!vmm_scheduler_orphan_context());

	vmm_spin_lock_irq(&mut->wq.lock);

	if (!mut->lock) {
		mut->lock++;
		vmm_manager_vcpu_resource_add(current_vcpu, &mut->res);
		mut->owner = current_vcpu;
		ret = 1;
	} else if (mut->owner == current_vcpu) {
		/*
		 * If VCPU owning the lock try to acquire it again then let
		 * it acquire lock multiple times (as-per POSIX standard).
		 */
		mut->lock++;
		ret = 1;
	}

	vmm_spin_unlock_irq(&mut->wq.lock);

	return ret;
}
Esempio n. 8
0
vmm_guest_t * vmm_scheduler_current_guest(void)
{
	vmm_vcpu_t *vcpu = vmm_scheduler_current_vcpu();
	if (vcpu) {
		return vcpu->guest;
	}
	return NULL;
}
Esempio n. 9
0
u32 do_general_exception(arch_regs_t *uregs)
{
    u32 cp0_cause = read_c0_cause();
    u32 cp0_status = read_c0_status();
    mips32_entryhi_t ehi;
    u32 victim_asid;
    u32 victim_inst;
    struct vmm_vcpu *c_vcpu;
    u8 delay_slot_exception = IS_BD_SET(cp0_cause);

    ehi._entryhi = read_c0_entryhi();
    victim_asid = ehi._s_entryhi.asid >> ASID_SHIFT;
    c_vcpu = vmm_scheduler_current_vcpu();

    /*
     * When exception is happening in the delay slot. We need to emulate
     * the corresponding branch instruction first. If its one of the "likely"
     * instructions, we don't need to emulate the faulting instruction since
     * "likely" instructions don't allow slot to be executed if branch is not
     * taken.
     */
    if (delay_slot_exception) {
        victim_inst = *((u32 *)(uregs->cp0_epc + 4));

        /*
         * If this function returns zero, the branch instruction was a
         * "likely" instruction and the branch wasn't taken. So don't
         * execute the delay slot, just return. The correct EPC to return
         * to will be programmed under our feet.
         */
        if (!cpu_vcpu_emulate_branch_and_jump_inst(c_vcpu, *((u32 *)uregs->cp0_epc), uregs)) {
            return VMM_OK;
        }
    } else {
        victim_inst = *((u32 *)uregs->cp0_epc);
    }

    switch (EXCEPTION_CAUSE(cp0_cause)) {
    case EXEC_CODE_COPU:
        cpu_vcpu_emulate_cop_inst(c_vcpu, victim_inst, uregs);

        if (!delay_slot_exception)
            uregs->cp0_epc += 4;

        break;

    case EXEC_CODE_TLBL:
        if (CPU_IN_USER_MODE(cp0_status) && is_vmm_asid(ehi._s_entryhi.asid)) {
            ehi._s_entryhi.asid = (0x1 << ASID_SHIFT);
            write_c0_entryhi(ehi._entryhi);
            vmm_panic("CPU is in user mode and ASID is pointing to VMM!!\n");
        }
        break;
    }

    return VMM_OK;
}
Esempio n. 10
0
void vmm_scheduler_preempt_enable(void)
{
	irq_flags_t flags;
	vmm_vcpu_t * vcpu = vmm_scheduler_current_vcpu();
	if (vcpu && vcpu->preempt_count) {
		flags = vmm_cpu_irq_save();
		vcpu->preempt_count--;
		vmm_cpu_irq_restore(flags);
	}
}
Esempio n. 11
0
void do_data_abort(arch_regs_t *regs)
{
	struct vmm_vcpu *vcpu = vmm_scheduler_current_vcpu();

	vmm_printf("%s: CPU%d unexpected exception\n",
		   __func__, vmm_smp_processor_id());
	vmm_printf("%s: Current VCPU=%s HSR=0x%08x\n",
		   __func__, (vcpu) ? vcpu->name : "(NULL)", 
		   read_hsr());
	vmm_printf("%s: HPFAR=0x%08x HIFAR=0x%08x HDFAR=0x%08x\n",
		   __func__, read_hpfar(), read_hifar(), read_hdfar());

	cpu_vcpu_dump_user_reg(regs);

	vmm_panic("%s: please reboot ...\n", __func__);
}
Esempio n. 12
0
static vmm_irq_return_t generic_virt_timer_handler(int irq, void *dev)
{
	int rc;
	u32 ctl, virq;
	struct vmm_vcpu *vcpu;

	ctl = generic_timer_reg_read(GENERIC_TIMER_REG_VIRT_CTRL);
	if (!(ctl & GENERIC_TIMER_CTRL_IT_STAT)) {
		/* We got interrupt without status bit set.
		 * Looks like we are running on buggy hardware.
		 */
		vmm_printf("%s: suprious interrupt\n", __func__);
		return VMM_IRQ_NONE;
	}

	ctl |= GENERIC_TIMER_CTRL_IT_MASK;
	generic_timer_reg_write(GENERIC_TIMER_REG_VIRT_CTRL, ctl);

	vcpu = vmm_scheduler_current_vcpu();
	if (!vcpu->is_normal) {
		/* We accidently got an interrupt meant for normal VCPU 
		 * that was previously running on this host CPU. 
		 */
		vmm_printf("%s: In orphan context (current VCPU=%s)\n",
			   __func__, vcpu->name);
		return VMM_IRQ_NONE;
	}

	virq = arm_gentimer_context(vcpu)->virt_timer_irq;
	if (virq == 0) {
		return VMM_IRQ_NONE;
	}

	rc = vmm_devemu_emulate_percpu_irq(vcpu->guest, virq, vcpu->subid, 0);
	if (rc) {
		vmm_printf("%s: Emulate VCPU=%s irq=%d level=0 failed\n",
			   __func__, vcpu->name, virq);
	}

	rc = vmm_devemu_emulate_percpu_irq(vcpu->guest, virq, vcpu->subid, 1);
	if (rc) {
		vmm_printf("%s: Emulate VCPU=%s irq=%d level=1 failed\n",
			   __func__, vcpu->name, virq);
	}

	return VMM_IRQ_HANDLED;
}
int arch_vcpu_irq_assert(struct vmm_vcpu *vcpu, u32 irq_no, u64 reason)
{
	u64 hcr;
	bool update_hcr;
	irq_flags_t flags;

	/* Skip IRQ & FIQ if VGIC available */
	if (arm_vgic_avail(vcpu) &&
	    ((irq_no == CPU_EXTERNAL_IRQ) || 
	     (irq_no == CPU_EXTERNAL_FIQ))) {
		return VMM_OK;
	}

	vmm_spin_lock_irqsave(&arm_priv(vcpu)->hcr_lock, flags);

	hcr = arm_priv(vcpu)->hcr;
	update_hcr = FALSE;

	switch(irq_no) {
	case CPU_EXTERNAL_IRQ:
		hcr |= HCR_VI_MASK;
		/* VI bit will be cleared on deassertion */
		update_hcr = TRUE;
		break;
	case CPU_EXTERNAL_FIQ:
		hcr |= HCR_VF_MASK;
		/* VF bit will be cleared on deassertion */
		update_hcr = TRUE;
		break;
	default:
		break;
	};

	if (update_hcr) {
		arm_priv(vcpu)->hcr = hcr;
		if (vcpu == vmm_scheduler_current_vcpu()) {
			msr(hcr_el2, hcr);
		}
	}

	vmm_spin_unlock_irqrestore(&arm_priv(vcpu)->hcr_lock, flags);

	return VMM_OK;
}
Esempio n. 14
0
int arch_vcpu_irq_deassert(struct vmm_vcpu *vcpu, u32 irq_no, u64 reason)
{
	u32 hcr;
	bool update_hcr;
	irq_flags_t flags;

	/* Skip IRQ & FIQ if VGIC available */
	if (arm_vgic_avail(vcpu) &&
	    ((irq_no == CPU_EXTERNAL_IRQ) || 
	     (irq_no == CPU_EXTERNAL_FIQ))) {
		return VMM_OK;
	}

	vmm_spin_lock_irqsave_lite(&arm_priv(vcpu)->hcr_lock, flags);

	hcr = arm_priv(vcpu)->hcr;
	update_hcr = FALSE;

	switch(irq_no) {
	case CPU_EXTERNAL_IRQ:
		hcr &= ~HCR_VI_MASK;
		update_hcr = TRUE;
		break;
	case CPU_EXTERNAL_FIQ:
		hcr &= ~HCR_VF_MASK;
		update_hcr = TRUE;
		break;
	default:
		break;
	};

	if (update_hcr) {
		arm_priv(vcpu)->hcr = hcr;
		if (vmm_scheduler_current_vcpu() == vcpu) {
			write_hcr(hcr);
		}
	}

	vmm_spin_unlock_irqrestore_lite(&arm_priv(vcpu)->hcr_lock, flags);

	return VMM_OK;
}
Esempio n. 15
0
int vmm_mutex_unlock(struct vmm_mutex *mut)
{
    int rc = VMM_OK;
    irq_flags_t flags;

    BUG_ON(!mut);
    BUG_ON(!vmm_scheduler_orphan_context());

    vmm_spin_lock_irqsave(&mut->wq.lock, flags);

    if (mut->lock && mut->owner == vmm_scheduler_current_vcpu()) {
        mut->lock = 0;
        mut->owner = NULL;
        rc = __vmm_waitqueue_wakeall(&mut->wq);
    }

    vmm_spin_unlock_irqrestore(&mut->wq.lock, flags);

    return rc;
}
Esempio n. 16
0
void do_handle_irq(arch_regs_t *regs, unsigned long cause)
{
	int rc = VMM_OK;

	vmm_scheduler_irq_enter(regs, FALSE);

	/* NOTE: Only exec <= 0xFFFFFFFFUL will be handled */
	if (cause <= 0xFFFFFFFFUL) {
		rc = vmm_host_active_irq_exec(cause);
	} else {
		rc = VMM_EINVALID;
	}

	if (rc) {
		do_error(vmm_scheduler_current_vcpu(), regs,
			 cause | SCAUSE_INTERRUPT_MASK,
			 "interrupt handling failed", rc, TRUE);
	}

	vmm_scheduler_irq_exit(regs);
}
Esempio n. 17
0
static vmm_irq_return_t generic_virt_timer_handler(int irq, void *dev)
{
	u32 ctl;
	struct vmm_vcpu *vcpu;
	struct generic_timer_context *cntx;

	ctl = generic_timer_reg_read(GENERIC_TIMER_REG_VIRT_CTRL);
	if (!(ctl & GENERIC_TIMER_CTRL_IT_STAT)) {
		/* We got interrupt without status bit set.
		 * Looks like we are running on buggy hardware.
		 */
		DPRINTF("%s: suprious interrupt\n", __func__);
		return VMM_IRQ_NONE;
	}

	ctl |= GENERIC_TIMER_CTRL_IT_MASK;
	generic_timer_reg_write(GENERIC_TIMER_REG_VIRT_CTRL, ctl);

	vcpu = vmm_scheduler_current_vcpu();
	if (!vcpu->is_normal) {
		/* We accidently got an interrupt meant for normal VCPU
		 * that was previously running on this host CPU.
		 */
		DPRINTF("%s: In orphan context (current VCPU=%s)\n",
			__func__, vcpu->name);
		return VMM_IRQ_NONE;
	}

	cntx = arm_gentimer_context(vcpu);
	if (!cntx) {
		/* We accidently got an interrupt meant another normal VCPU */
		DPRINTF("%s: Invalid normal context (current VCPU=%s)\n",
			__func__, vcpu->name);
		return VMM_IRQ_NONE;
	}

	generic_virt_irq_inject(vcpu, cntx);

	return VMM_IRQ_HANDLED;
}
Esempio n. 18
0
static int mutex_lock_common(struct vmm_mutex *mut, u64 *timeout)
{
	int rc = VMM_OK;
	irq_flags_t flags;
	struct vmm_vcpu *current_vcpu = vmm_scheduler_current_vcpu();

	BUG_ON(!mut);
	BUG_ON(!vmm_scheduler_orphan_context());

	vmm_spin_lock_irqsave(&mut->wq.lock, flags);

	while (mut->lock) {
		/*
		 * If VCPU owning the lock try to acquire it again then let
		 * it acquire lock multiple times (as-per POSIX standard).
		 */
		if (mut->owner == current_vcpu) {
			break;
		}
		rc = __vmm_waitqueue_sleep(&mut->wq, timeout);
		if (rc) {
			/* Timeout or some other failure */
			break;
		}
	}
	if (rc == VMM_OK) {
		if (!mut->lock) {
			mut->lock = 1;
			vmm_manager_vcpu_resource_add(current_vcpu,
						      &mut->res);
			mut->owner = current_vcpu;
		} else {
			mut->lock++;
		}
	}

	vmm_spin_unlock_irqrestore(&mut->wq.lock, flags);

	return rc;
}
Esempio n. 19
0
int do_vcpu_tlbmiss(arch_regs_t *uregs)
{
	u32 badvaddr = read_c0_badvaddr();
	struct vmm_vcpu *current_vcpu;
	int counter = 0;
	mips32_tlb_entry_t *c_tlbe;

	current_vcpu = vmm_scheduler_current_vcpu();
	for (counter = 0; counter < 2 * CPU_TLB_COUNT; counter++) {
		c_tlbe = &mips_sregs(current_vcpu)->shadow_tlb_entries[counter];
		if (TBE_PGMSKD_VPN2(c_tlbe) ==
		    (badvaddr & ~c_tlbe->page_mask)) {
			mips_fill_tlb_entry(c_tlbe, -1);
			return 0;
		} else {
			vmm_panic("No TLB entry in shadow."
				  " Send fault to guest.\n");
		}
	}

	return VMM_EFAIL;
}
Esempio n. 20
0
int arch_vcpu_irq_assert(struct vmm_vcpu *vcpu, u32 irq_no, u32 reason)
{
	u32 hcr;

	/* Skip IRQ & FIQ if VGIC available */
	if (arm_vgic_avail(vcpu) &&
	    ((irq_no == CPU_EXTERNAL_IRQ) || 
	     (irq_no == CPU_EXTERNAL_FIQ))) {
		return VMM_OK;
	}

	hcr = arm_priv(vcpu)->hcr;

	switch(irq_no) {
	case CPU_DATA_ABORT_IRQ:
		hcr |= HCR_VA_MASK;
		/* VA bit is auto-cleared */
		break;
	case CPU_EXTERNAL_IRQ:
		hcr |= HCR_VI_MASK;
		/* VI bit will be cleared on deassertion */
		break;
	case CPU_EXTERNAL_FIQ:
		hcr |= HCR_VF_MASK;
		/* VF bit will be cleared on deassertion */
		break;
	default:
		return VMM_EFAIL;
		break;
	};

	arm_priv(vcpu)->hcr = hcr;
	if (vmm_scheduler_current_vcpu() == vcpu) {
		write_hcr(hcr);
	}

	return VMM_OK;
}
Esempio n. 21
0
void do_soft_irq(arch_regs_t *regs)
{
	struct vmm_vcpu *vcpu;

	if ((regs->cpsr & CPSR_MODE_MASK) == CPSR_MODE_HYPERVISOR) {
		vmm_scheduler_preempt_orphan(regs);
		return;
	} else {
		vcpu = vmm_scheduler_current_vcpu();

		vmm_printf("%s: CPU%d unexpected exception\n",
			   __func__, vmm_smp_processor_id());
		vmm_printf("%s: Current VCPU=%s HSR=0x%08x\n",
			   __func__, (vcpu) ? vcpu->name : "(NULL)", 
			   read_hsr());
		vmm_printf("%s: HPFAR=0x%08x HIFAR=0x%08x HDFAR=0x%08x\n",
			   __func__, read_hpfar(), read_hifar(), read_hdfar());

		cpu_vcpu_dump_user_reg(regs);

		vmm_panic("%s: please reboot ...\n", __func__);
	}
}
Esempio n. 22
0
u32 cpu_vcpu_regmode_read(struct vmm_vcpu * vcpu, 
			  arch_regs_t * regs, 
			  u32 mode,
			  u32 reg_num)
{
	u32 hwreg;
	if (vcpu != vmm_scheduler_current_vcpu()) {
		/* This function should only be called for current VCPU */
		while (1); /* Hang !!! */
	}
	switch (reg_num) {
	case 0:
	case 1:
	case 2:
	case 3:
	case 4:
	case 5:
	case 6:
	case 7:
		return regs->gpr[reg_num];
	case 8:
		if (mode == CPSR_MODE_FIQ) {
			asm volatile (" mrs     %0, r8_fiq\n\t" 
				      :"=r" (hwreg)::"memory", "cc");
			arm_priv(vcpu)->gpr_fiq[reg_num - 8] = hwreg;
			return arm_priv(vcpu)->gpr_fiq[reg_num - 8];
		} else {
			return regs->gpr[reg_num];
		}
	case 9:
		if (mode == CPSR_MODE_FIQ) {
			asm volatile (" mrs     %0, r9_fiq\n\t" 
				      :"=r" (hwreg)::"memory", "cc");
			arm_priv(vcpu)->gpr_fiq[reg_num - 8] = hwreg;
			return arm_priv(vcpu)->gpr_fiq[reg_num - 8];
		} else {
			return regs->gpr[reg_num];
Esempio n. 23
0
void do_sync(arch_regs_t *regs, unsigned long mode)
{
	int rc = VMM_OK;
	u32 ec, il, iss;
	u64 esr, far, elr;
	physical_addr_t fipa = 0;
	struct vmm_vcpu *vcpu;

	esr = mrs(esr_el2);
	far = mrs(far_el2);
	elr = mrs(elr_el2);

	ec = (esr & ESR_EC_MASK) >> ESR_EC_SHIFT;
	il = (esr & ESR_IL_MASK) >> ESR_IL_SHIFT;
	iss = (esr & ESR_ISS_MASK) >> ESR_ISS_SHIFT;

	vcpu = vmm_scheduler_current_vcpu();

	/* We dont expect any faults from hypervisor code itself 
	 * so, any trap we get from hypervisor mode means something
	 * unexpected has occured.
	 */
	if ((regs->pstate & PSR_EL_MASK) == PSR_EL_2) {
		if ((ec == EC_TRAP_HVC_A64) && (iss == 0)) {
			vmm_scheduler_preempt_orphan(regs);
			return;
		}
		vmm_printf("%s: CPU%d VCPU=%s unexpected exception\n",
			   __func__, vmm_smp_processor_id(),
			   (vcpu) ? vcpu->name : "(NULL)");
		vmm_printf("%s: ESR=0x%016lx EC=0x%x IL=0x%x ISS=0x%x\n",
			   __func__, esr, ec, il, iss);
		vmm_printf("%s: ELR=0x%016lx FAR=0x%016lx HPFAR=0x%016lx\n",
			   __func__, elr, far, mrs(hpfar_el2));
		cpu_vcpu_dump_user_reg(regs);
		vmm_panic("%s: please reboot ...\n", __func__);
	}

	vmm_scheduler_irq_enter(regs, TRUE);

	switch (ec) {
	case EC_UNKNOWN:
		/* We dont expect to get this trap so error */
		rc = VMM_EFAIL;
		break;
	case EC_TRAP_WFI_WFE:
		/* WFI emulation */
		rc = cpu_vcpu_emulate_wfi_wfe(vcpu, regs, il, iss);
		break;
	case EC_TRAP_MCR_MRC_CP15_A32:
		/* MCR/MRC CP15 emulation */
		rc = cpu_vcpu_emulate_mcr_mrc_cp15(vcpu, regs, il, iss);
		break;
	case EC_TRAP_MCRR_MRRC_CP15_A32:
		/* MCRR/MRRC CP15 emulation */
		rc = cpu_vcpu_emulate_mcrr_mrrc_cp15(vcpu, regs, il, iss);
		break;
	case EC_TRAP_MCR_MRC_CP14_A32:
		/* MCR/MRC CP14 emulation */
		rc = cpu_vcpu_emulate_mcr_mrc_cp14(vcpu, regs, il, iss);
		break;
	case EC_TRAP_LDC_STC_CP14_A32:
		/* LDC/STC CP14 emulation */
		rc = cpu_vcpu_emulate_ldc_stc_cp14(vcpu, regs, il, iss);
		break;
	case EC_SIMD_FPU:
		/* Advanced SIMD and FPU emulation */
		rc = cpu_vcpu_emulate_simd_fp_regs(vcpu, regs, il, iss);
		break;
	case EC_FPEXC_A32:
	case EC_FPEXC_A64:
		/* We dont expect any FP execution faults */
		rc = VMM_EFAIL;
		break;
	case EC_TRAP_MRC_VMRS_CP10_A32:
		/* MRC (or VMRS) to CP10 for MVFR0, MVFR1 or FPSID */
		rc = cpu_vcpu_emulate_vmrs(vcpu, regs, il, iss);
		break;
	case EC_TRAP_MCRR_MRRC_CP14_A32:
		/* MRRC to CP14 emulation */
		rc = cpu_vcpu_emulate_mcrr_mrrc_cp14(vcpu, regs, il, iss);
		break;
	case EC_TRAP_SVC_A32:
	case EC_TRAP_SVC_A64:
		/* We dont expect to get these traps so error */
		rc = VMM_EFAIL;
		break;
	case EC_TRAP_SMC_A32:
		/* SMC emulation for A32 guest */
		rc = cpu_vcpu_emulate_smc32(vcpu, regs, il, iss);
		break;
	case EC_TRAP_SMC_A64:
		/* SMC emulation for A64 guest */
		rc = cpu_vcpu_emulate_smc64(vcpu, regs, il, iss);
		break;
	case EC_TRAP_HVC_A32:
		/* HVC emulation for A32 guest */
		rc = cpu_vcpu_emulate_hvc32(vcpu, regs, il, iss);
		break;
	case EC_TRAP_HVC_A64:
		/* HVC emulation for A64 guest */
		rc = cpu_vcpu_emulate_hvc64(vcpu, regs, il, iss);
		break;
	case EC_TRAP_MSR_MRS_SYSTEM:
		/* MSR/MRS/SystemRegs emulation */
		rc = cpu_vcpu_emulate_msr_mrs_system(vcpu, regs, il, iss);
		break;
	case EC_TRAP_LWREL_INST_ABORT:
		/* Stage2 instruction abort */
		fipa = (mrs(hpfar_el2) & HPFAR_FIPA_MASK) >> HPFAR_FIPA_SHIFT;
		fipa = fipa << HPFAR_FIPA_PAGE_SHIFT;
		fipa = fipa | (mrs(far_el2) & HPFAR_FIPA_PAGE_MASK);
		rc = cpu_vcpu_inst_abort(vcpu, regs, il, iss, fipa);
		break;
	case EC_TRAP_LWREL_DATA_ABORT:
		/* Stage2 data abort */
		fipa = (mrs(hpfar_el2) & HPFAR_FIPA_MASK) >> HPFAR_FIPA_SHIFT;
		fipa = fipa << HPFAR_FIPA_PAGE_SHIFT;
		fipa = fipa | (mrs(far_el2) & HPFAR_FIPA_PAGE_MASK);
		rc = cpu_vcpu_data_abort(vcpu, regs, il, iss, fipa);
		break;
	case EC_CUREL_INST_ABORT:
	case EC_CUREL_DATA_ABORT:
	case EC_SERROR:
		/* We dont expect to get aborts from EL2 so error */
		rc = VMM_EFAIL;
		break;
	case EC_PC_UNALIGNED:
	case EC_SP_UNALIGNED:
		/* We dont expect to get alignment faults from EL2 */
		rc = VMM_EFAIL;
		break;
	default:
		/* Unhandled or unknown EC value so error */
		rc = VMM_EFAIL;
		break;
	};

	if (rc) {
		vmm_printf("%s: CPU%d VCPU=%s sync failed (error %d)\n", 
			   __func__, vmm_smp_processor_id(), 
			   (vcpu) ? vcpu->name : "(NULL)", rc);
		vmm_printf("%s: ESR=0x%016lx EC=0x%x IL=0x%x ISS=0x%x\n",
			   __func__, esr, ec, il, iss);
		vmm_printf("%s: ELR=0x%016lx FAR=0x%016lx HPFAR=0x%016lx\n",
			   __func__, elr, far, mrs(hpfar_el2));
		if (vmm_manager_vcpu_get_state(vcpu) != VMM_VCPU_STATE_HALTED) {
			cpu_vcpu_halt(vcpu, regs);
		}
	}

	vmm_scheduler_irq_exit(regs);
}
Esempio n. 24
0
void do_prefetch_abort(vmm_user_regs_t * uregs)
{
	int rc = VMM_EFAIL;
	bool crash_dump = FALSE;
	u32 ifsr, ifar, fs;
	vmm_vcpu_t * vcpu;
	cpu_l1tbl_t * l1;
	cpu_page_t pg;

	ifsr = read_ifsr();
	ifar = read_ifar();
	fs = (ifsr & IFSR_FS4_MASK) >> IFSR_FS4_SHIFT;
	fs = (fs << 4) | (ifsr & IFSR_FS_MASK);

	if ((uregs->cpsr & CPSR_MODE_MASK) != CPSR_MODE_USER) {
		if (fs != IFSR_FS_TRANS_FAULT_SECTION ||
		    fs != IFSR_FS_TRANS_FAULT_PAGE) {
			vmm_panic("%s: unexpected prefetch abort\n"
				  "%s: ifsr = 0x%08x, ifar = 0x%08x\n", 
				  __func__, __func__, ifsr, ifar);
		}
		rc = cpu_mmu_get_reserved_page((virtual_addr_t)ifar, &pg);
		if (rc) {
			vmm_panic("%s: cannot find reserved page\n"
				  "%s: ifsr = 0x%08x, ifar = 0x%08x\n", 
				  __func__, __func__, ifsr, ifar);
		}
		l1 = cpu_mmu_l1tbl_current();
		if (!l1) {
			vmm_panic("%s: cannot find l1 table\n"
				  "%s: ifsr = 0x%08x, ifar = 0x%08x\n",
				  __func__, __func__, ifsr, ifar);
		}
		rc = cpu_mmu_map_page(l1, &pg);
		if (rc) {
			vmm_panic("%s: cannot map page in l1 table\n"
				  "%s: ifsr = 0x%08x, ifar = 0x%08x\n",
				  __func__, __func__, ifsr, ifar);
		}
		return;
	}

	vmm_scheduler_irq_enter(uregs, TRUE);

	vcpu = vmm_scheduler_current_vcpu();

	switch(fs) {
	case IFSR_FS_TTBL_WALK_SYNC_EXT_ABORT_1:
	case IFSR_FS_TTBL_WALK_SYNC_EXT_ABORT_2:
		break;
	case IFSR_FS_TTBL_WALK_SYNC_PARITY_ERROR_1:
	case IFSR_FS_TTBL_WALK_SYNC_PARITY_ERROR_2:
		break;
	case IFSR_FS_TRANS_FAULT_SECTION:
	case IFSR_FS_TRANS_FAULT_PAGE:
		rc = cpu_vcpu_cp15_trans_fault(vcpu, uregs, 
						ifar, fs, 0, 0, 0, FALSE);
		crash_dump = TRUE;
		break;
	case IFSR_FS_ACCESS_FAULT_SECTION:
	case IFSR_FS_ACCESS_FAULT_PAGE:
		rc = cpu_vcpu_cp15_access_fault(vcpu, uregs, 
						ifar, fs, 0, 0, 0);
		crash_dump = TRUE;
		break;
	case IFSR_FS_DOMAIN_FAULT_SECTION:
	case IFSR_FS_DOMAIN_FAULT_PAGE:
		rc = cpu_vcpu_cp15_domain_fault(vcpu, uregs, 
						ifar, fs, 0, 0, 0);
		crash_dump = TRUE;
		break;
	case IFSR_FS_PERM_FAULT_SECTION:
	case IFSR_FS_PERM_FAULT_PAGE:
		rc = cpu_vcpu_cp15_perm_fault(vcpu, uregs, 
						ifar, fs, 0, 0, 0);
		crash_dump = TRUE;
		break;
	case IFSR_FS_DEBUG_EVENT:
	case IFSR_FS_SYNC_EXT_ABORT:
	case IFSR_FS_IMP_VALID_LOCKDOWN:
	case IFSR_FS_IMP_VALID_COPROC_ABORT:
	case IFSR_FS_MEM_ACCESS_SYNC_PARITY_ERROR:
		break;
	default:
		break; 
	};

	if (rc && crash_dump) {
		vmm_printf("\n");
		vmm_printf("%s: error %d\n", __func__, rc);
		vmm_printf("%s: vcpu_id = %d, ifar = 0x%x, ifsr = 0x%x\n", 
				__func__, vcpu->id, ifar, ifsr);
		cpu_vcpu_dump_user_reg(vcpu, uregs);
	}

	vmm_scheduler_irq_exit(uregs);
}
Esempio n. 25
0
void do_data_abort(vmm_user_regs_t * uregs)
{
	int rc = VMM_EFAIL; 
	bool crash_dump = FALSE;
	u32 dfsr, dfar, fs, dom, wnr;
	vmm_vcpu_t * vcpu;
	cpu_l1tbl_t * l1;
	cpu_page_t pg;

	dfsr = read_dfsr();
	dfar = read_dfar();
	fs = (dfsr & DFSR_FS4_MASK) >> DFSR_FS4_SHIFT;
	fs = (fs << 4) | (dfsr & DFSR_FS_MASK);
	wnr = (dfsr & DFSR_WNR_MASK) >> DFSR_WNR_SHIFT;
	dom = (dfsr & DFSR_DOM_MASK) >> DFSR_DOM_SHIFT;

	if ((uregs->cpsr & CPSR_MODE_MASK) != CPSR_MODE_USER) {
		if (fs != DFSR_FS_TRANS_FAULT_SECTION ||
		    fs != DFSR_FS_TRANS_FAULT_PAGE) {
			vmm_panic("%s: unexpected prefetch abort\n"
				  "%s: dfsr = 0x%08x, dfar = 0x%08x\n", 
				  __func__, __func__, dfsr, dfar);
		}
		rc = cpu_mmu_get_reserved_page(dfar, &pg);
		if (rc) {
			vmm_panic("%s: cannot find reserved page\n"
				  "%s: dfsr = 0x%08x, dfar = 0x%08x\n", 
				  __func__, __func__, dfsr, dfar);
		}
		l1 = cpu_mmu_l1tbl_current();
		if (!l1) {
			vmm_panic("%s: cannot find l1 table\n"
				  "%s: dfsr = 0x%08x, dfar = 0x%08x\n",
				  __func__, __func__, dfsr, dfar);
		}
		rc = cpu_mmu_map_page(l1, &pg);
		if (rc) {
			vmm_panic("%s: cannot map page in l1 table\n"
				  "%s: dfsr = 0x%08x, dfar = 0x%08x\n",
				  __func__, __func__, dfsr, dfar);
		}
		return;
	}

	vmm_scheduler_irq_enter(uregs, TRUE);

	vcpu = vmm_scheduler_current_vcpu();

	switch(fs) {
	case DFSR_FS_ALIGN_FAULT:
		break;
	case DFSR_FS_ICACHE_MAINT_FAULT:
		break;
	case DFSR_FS_TTBL_WALK_SYNC_EXT_ABORT_1:
	case DFSR_FS_TTBL_WALK_SYNC_EXT_ABORT_2:
		break;
	case DFSR_FS_TTBL_WALK_SYNC_PARITY_ERROR_1:
	case DFSR_FS_TTBL_WALK_SYNC_PARITY_ERROR_2:
		break;
	case DFSR_FS_TRANS_FAULT_SECTION:
	case DFSR_FS_TRANS_FAULT_PAGE:
		rc = cpu_vcpu_cp15_trans_fault(vcpu, uregs, 
						dfar, fs, dom, wnr, 1, FALSE);
		crash_dump = TRUE;
		break;
	case DFSR_FS_ACCESS_FAULT_SECTION:
	case DFSR_FS_ACCESS_FAULT_PAGE:
		rc = cpu_vcpu_cp15_access_fault(vcpu, uregs, 
						dfar, fs, dom, wnr, 1);
		crash_dump = TRUE;
		break;
	case DFSR_FS_DOMAIN_FAULT_SECTION:
	case DFSR_FS_DOMAIN_FAULT_PAGE:
		rc = cpu_vcpu_cp15_domain_fault(vcpu, uregs, 
						dfar, fs, dom, wnr, 1);
		crash_dump = TRUE;
		break;
	case DFSR_FS_PERM_FAULT_SECTION:
	case DFSR_FS_PERM_FAULT_PAGE:
		rc = cpu_vcpu_cp15_perm_fault(vcpu, uregs, 
						dfar, fs, dom, wnr, 1);
		if ((dfar & ~(sizeof(vcpu->sregs->cp15.ovect) - 1)) != 
						vcpu->sregs->cp15.ovect_base) {
			crash_dump = FALSE;
		}
		break;
	case DFSR_FS_DEBUG_EVENT:
	case DFSR_FS_SYNC_EXT_ABORT:
	case DFSR_FS_IMP_VALID_LOCKDOWN:
	case DFSR_FS_IMP_VALID_COPROC_ABORT:
	case DFSR_FS_MEM_ACCESS_SYNC_PARITY_ERROR:
	case DFSR_FS_ASYNC_EXT_ABORT:
	case DFSR_FS_MEM_ACCESS_ASYNC_PARITY_ERROR:
		break;
	default:
		break;
	};

	if (rc && crash_dump) {
		vmm_printf("\n");
		vmm_printf("%s: error %d\n", __func__, rc);
		vmm_printf("%s: vcpu_id = %d, dfar = 0x%x, dfsr = 0x%x\n", 
				__func__, vcpu->id, dfar, dfsr);
		cpu_vcpu_dump_user_reg(vcpu, uregs);
	}

	vmm_scheduler_irq_exit(uregs);
}
Esempio n. 26
0
void do_handle_trap(arch_regs_t *regs, unsigned long cause)
{
	int rc = VMM_OK;
	bool panic = TRUE;
	const char *msg = "trap handling failed";
	struct vmm_vcpu *vcpu;

	if ((cause == CAUSE_STORE_PAGE_FAULT) &&
	    !(regs->hstatus & HSTATUS_SPV) &&
	    (regs->sepc == preempt_orphan_pc)) {
		regs->sepc += 4;
		vmm_scheduler_preempt_orphan(regs);
		return;
	}

	vmm_scheduler_irq_enter(regs, TRUE);

	vcpu = vmm_scheduler_current_vcpu();
	if (!vcpu || !vcpu->is_normal) {
		rc = VMM_EFAIL;
		msg = "unexpected trap";
		goto done;
	}

	switch (cause) {
	case CAUSE_ILLEGAL_INSTRUCTION:
		msg = "illegal instruction fault failed";
		if (regs->hstatus & HSTATUS_SPV) {
			rc = cpu_vcpu_illegal_insn_fault(vcpu, regs,
							 csr_read(stval));
			panic = FALSE;
		} else {
			rc = VMM_EINVALID;
		}
		break;
	case CAUSE_FETCH_PAGE_FAULT:
	case CAUSE_LOAD_PAGE_FAULT:
	case CAUSE_STORE_PAGE_FAULT:
		msg = "page fault failed";
		if ((regs->hstatus & HSTATUS_SPV) &&
		    (regs->hstatus & HSTATUS_STL)) {
			rc = cpu_vcpu_page_fault(vcpu, regs,
						 cause, csr_read(stval));
			panic = FALSE;
		} else {
			rc = VMM_EINVALID;
		}
		break;
	default:
		rc = VMM_EFAIL;
		break;
	};

	if (rc) {
		vmm_manager_vcpu_halt(vcpu);
	}

done:
	if (rc) {
		do_error(vcpu, regs, cause, msg, rc, panic);
	}

	vmm_scheduler_irq_exit(regs);
}
Esempio n. 27
0
void do_data_abort(arch_regs_t * uregs)
{
	int rc = VMM_EFAIL; 
	bool crash_dump = FALSE;
	u32 dfsr, dfar, fs, dom, wnr;
	struct vmm_vcpu * vcpu;
	struct cpu_l1tbl * l1;
	struct cpu_page pg;

	dfsr = read_dfsr();
	dfar = read_dfar();

	fs = (dfsr & DFSR_FS_MASK);
#if !defined(CONFIG_ARMV5)
	fs |= (dfsr & DFSR_FS4_MASK) >> (DFSR_FS4_SHIFT - 4);
#endif
	wnr = (dfsr & DFSR_WNR_MASK) >> DFSR_WNR_SHIFT;
	dom = (dfsr & DFSR_DOM_MASK) >> DFSR_DOM_SHIFT;

	if ((uregs->cpsr & CPSR_MODE_MASK) != CPSR_MODE_USER) {
		if (fs != DFSR_FS_TRANS_FAULT_SECTION &&
		    fs != DFSR_FS_TRANS_FAULT_PAGE) {
			vmm_panic("%s: unexpected data abort\n"
				  "%s: pc = 0x%08x, dfsr = 0x%08x, dfar = 0x%08x\n", 
				  __func__, __func__, uregs->pc, dfsr, dfar);
		}
		rc = cpu_mmu_get_reserved_page(dfar, &pg);
		if (rc) {
			/* If we were in normal context then just handle
			 * trans fault for current normal VCPU and exit
			 * else there is nothing we can do so panic.
			 */
			if (vmm_scheduler_normal_context()) {
				vcpu = vmm_scheduler_current_vcpu();
				cpu_vcpu_cp15_trans_fault(vcpu, uregs, 
						dfar, fs, dom, wnr, 1, FALSE);
				return;
			}
			vmm_panic("%s: cannot find reserved page\n"
				  "%s: dfsr = 0x%08x, dfar = 0x%08x\n", 
				  __func__, __func__, dfsr, dfar);
		}
		l1 = cpu_mmu_l1tbl_current();
		if (!l1) {
			vmm_panic("%s: cannot find l1 table\n"
				  "%s: dfsr = 0x%08x, dfar = 0x%08x\n",
				  __func__, __func__, dfsr, dfar);
		}
		rc = cpu_mmu_map_page(l1, &pg);
		if (rc) {
			vmm_panic("%s: cannot map page in l1 table\n"
				  "%s: dfsr = 0x%08x, dfar = 0x%08x\n",
				  __func__, __func__, dfsr, dfar);
		}
		return;
	}

	vcpu = vmm_scheduler_current_vcpu();

	vmm_scheduler_irq_enter(uregs, TRUE);

	switch(fs) {
	case DFSR_FS_ALIGN_FAULT:
		break;
	case DFSR_FS_ICACHE_MAINT_FAULT:
		break;
	case DFSR_FS_TTBL_WALK_SYNC_EXT_ABORT_1:
	case DFSR_FS_TTBL_WALK_SYNC_EXT_ABORT_2:
		break;
	case DFSR_FS_TTBL_WALK_SYNC_PARITY_ERROR_1:
	case DFSR_FS_TTBL_WALK_SYNC_PARITY_ERROR_2:
		break;
	case DFSR_FS_TRANS_FAULT_SECTION:
	case DFSR_FS_TRANS_FAULT_PAGE:
		rc = cpu_vcpu_cp15_trans_fault(vcpu, uregs, 
						dfar, fs, dom, wnr, 1, FALSE);
		crash_dump = TRUE;
		break;
	case DFSR_FS_ACCESS_FAULT_SECTION:
	case DFSR_FS_ACCESS_FAULT_PAGE:
		rc = cpu_vcpu_cp15_access_fault(vcpu, uregs, 
						dfar, fs, dom, wnr, 1);
		crash_dump = TRUE;
		break;
	case DFSR_FS_DOMAIN_FAULT_SECTION:
	case DFSR_FS_DOMAIN_FAULT_PAGE:
		rc = cpu_vcpu_cp15_domain_fault(vcpu, uregs, 
						dfar, fs, dom, wnr, 1);
		crash_dump = TRUE;
		break;
	case DFSR_FS_PERM_FAULT_SECTION:
	case DFSR_FS_PERM_FAULT_PAGE:
		rc = cpu_vcpu_cp15_perm_fault(vcpu, uregs, 
						dfar, fs, dom, wnr, 1);
		if ((dfar & ~(TTBL_L2TBL_SMALL_PAGE_SIZE - 1)) != 
						arm_priv(vcpu)->cp15.ovect_base) {
			crash_dump = FALSE;
		}
		break;
	case DFSR_FS_DEBUG_EVENT:
	case DFSR_FS_SYNC_EXT_ABORT:
	case DFSR_FS_IMP_VALID_LOCKDOWN:
	case DFSR_FS_IMP_VALID_COPROC_ABORT:
	case DFSR_FS_MEM_ACCESS_SYNC_PARITY_ERROR:
	case DFSR_FS_ASYNC_EXT_ABORT:
	case DFSR_FS_MEM_ACCESS_ASYNC_PARITY_ERROR:
		break;
	default:
		break;
	};

	if (rc && crash_dump) {
		vmm_printf("\n");
		vmm_printf("%s: error %d\n", __func__, rc);
		vmm_printf("%s: vcpu_id = %d, dfar = 0x%x, dfsr = 0x%x\n", 
				__func__, vcpu->id, dfar, dfsr);
		cpu_vcpu_dump_user_reg(vcpu, uregs);
	}

	vmm_scheduler_irq_exit(uregs);
}
Esempio n. 28
0
void do_prefetch_abort(arch_regs_t * uregs)
{
	int rc = VMM_EFAIL;
	bool crash_dump = FALSE;
	u32 ifsr, ifar, fs;
	struct vmm_vcpu * vcpu;

	ifsr = read_ifsr();
	ifar = read_ifar();

	fs = (ifsr & IFSR_FS_MASK);
#if !defined(CONFIG_ARMV5)
	fs |= (ifsr & IFSR_FS4_MASK) >> (IFSR_FS4_SHIFT - 4);
#endif

	if ((uregs->cpsr & CPSR_MODE_MASK) != CPSR_MODE_USER) {
		struct cpu_l1tbl * l1;
		struct cpu_page pg;
		if (fs != IFSR_FS_TRANS_FAULT_SECTION &&
		    fs != IFSR_FS_TRANS_FAULT_PAGE) {
			vmm_panic("%s: unexpected prefetch abort\n"
				  "%s: pc = 0x%08x, ifsr = 0x%08x, ifar = 0x%08x\n", 
				  __func__, __func__, uregs->pc, ifsr, ifar);
		}
		rc = cpu_mmu_get_reserved_page((virtual_addr_t)ifar, &pg);
		if (rc) {
			vmm_panic("%s: cannot find reserved page\n"
				  "%s: ifsr = 0x%08x, ifar = 0x%08x\n", 
				  __func__, __func__, ifsr, ifar);
		}
		l1 = cpu_mmu_l1tbl_current();
		if (!l1) {
			vmm_panic("%s: cannot find l1 table\n"
				  "%s: ifsr = 0x%08x, ifar = 0x%08x\n",
				  __func__, __func__, ifsr, ifar);
		}
		rc = cpu_mmu_map_page(l1, &pg);
		if (rc) {
			vmm_panic("%s: cannot map page in l1 table\n"
				  "%s: ifsr = 0x%08x, ifar = 0x%08x\n",
				  __func__, __func__, ifsr, ifar);
		}
		return;
	}

	vcpu = vmm_scheduler_current_vcpu();

	if ((uregs->pc & ~(TTBL_L2TBL_SMALL_PAGE_SIZE - 1)) == 
	    arm_priv(vcpu)->cp15.ovect_base) {
		uregs->pc = (virtual_addr_t)arm_guest_priv(vcpu->guest)->ovect 
			    + (uregs->pc & (TTBL_L2TBL_SMALL_PAGE_SIZE - 1));
		return;
	}

	vmm_scheduler_irq_enter(uregs, TRUE);

	switch(fs) {
	case IFSR_FS_TTBL_WALK_SYNC_EXT_ABORT_1:
	case IFSR_FS_TTBL_WALK_SYNC_EXT_ABORT_2:
		break;
	case IFSR_FS_TTBL_WALK_SYNC_PARITY_ERROR_1:
	case IFSR_FS_TTBL_WALK_SYNC_PARITY_ERROR_2:
		break;
	case IFSR_FS_TRANS_FAULT_SECTION:
	case IFSR_FS_TRANS_FAULT_PAGE:
		rc = cpu_vcpu_cp15_trans_fault(vcpu, uregs, 
						ifar, fs, 0, 0, 0, FALSE);
		crash_dump = TRUE;
		break;
	case IFSR_FS_ACCESS_FAULT_SECTION:
	case IFSR_FS_ACCESS_FAULT_PAGE:
		rc = cpu_vcpu_cp15_access_fault(vcpu, uregs, 
						ifar, fs, 0, 0, 0);
		crash_dump = TRUE;
		break;
	case IFSR_FS_DOMAIN_FAULT_SECTION:
	case IFSR_FS_DOMAIN_FAULT_PAGE:
		rc = cpu_vcpu_cp15_domain_fault(vcpu, uregs, 
						ifar, fs, 0, 0, 0);
		crash_dump = TRUE;
		break;
	case IFSR_FS_PERM_FAULT_SECTION:
	case IFSR_FS_PERM_FAULT_PAGE:
		rc = cpu_vcpu_cp15_perm_fault(vcpu, uregs, 
						ifar, fs, 0, 0, 0);
		crash_dump = TRUE;
		break;
	case IFSR_FS_DEBUG_EVENT:
	case IFSR_FS_SYNC_EXT_ABORT:
	case IFSR_FS_IMP_VALID_LOCKDOWN:
	case IFSR_FS_IMP_VALID_COPROC_ABORT:
	case IFSR_FS_MEM_ACCESS_SYNC_PARITY_ERROR:
		break;
	default:
		break; 
	};

	if (rc && crash_dump) {
		vmm_printf("\n");
		vmm_printf("%s: error %d\n", __func__, rc);
		vmm_printf("%s: vcpu_id = %d, ifar = 0x%x, ifsr = 0x%x\n", 
				__func__, vcpu->id, ifar, ifsr);
		cpu_vcpu_dump_user_reg(vcpu, uregs);
	}

	vmm_scheduler_irq_exit(uregs);
}
Esempio n. 29
0
void do_hyp_trap(arch_regs_t *regs)
{
	int rc = VMM_OK;
	u32 hsr, ec, il, iss;
	virtual_addr_t far;
	physical_addr_t fipa = 0;
	struct vmm_vcpu *vcpu;

	hsr = read_hsr();
	ec = (hsr & HSR_EC_MASK) >> HSR_EC_SHIFT;
	il = (hsr & HSR_IL_MASK) >> HSR_IL_SHIFT;
	iss = (hsr & HSR_ISS_MASK) >> HSR_ISS_SHIFT;

	vcpu = vmm_scheduler_current_vcpu();

	/* We dont expect any faults from hypervisor code itself 
	 * so, any trap we get from hypervisor mode means something
	 * unexpected has occured.
	 */
	if ((regs->cpsr & CPSR_MODE_MASK) == CPSR_MODE_HYPERVISOR) {
		vmm_printf("%s: CPU%d unexpected exception\n", 
			   __func__, vmm_smp_processor_id());
		vmm_printf("%s: Current VCPU=%s HSR=0x%08x\n",
			   __func__, (vcpu) ? vcpu->name : "(NULL)", 
			   read_hsr());
		vmm_printf("%s: HPFAR=0x%08x HIFAR=0x%08x HDFAR=0x%08x\n",
			   __func__, read_hpfar(), read_hifar(), read_hdfar());
		cpu_vcpu_dump_user_reg(regs);
		vmm_panic("%s: please reboot ...\n", __func__);
	}

	vmm_scheduler_irq_enter(regs, TRUE);

	switch (ec) {
	case EC_UNKNOWN:
		/* We dont expect to get this trap so error */
		rc = VMM_EFAIL;
		break;
	case EC_TRAP_WFI_WFE:
		/* WFI emulation */
		rc = cpu_vcpu_emulate_wfi_wfe(vcpu, regs, il, iss);
		break;
	case EC_TRAP_MCR_MRC_CP15:
		/* MCR/MRC CP15 emulation */
		rc = cpu_vcpu_emulate_mcr_mrc_cp15(vcpu, regs, il, iss);
		break;
	case EC_TRAP_MCRR_MRRC_CP15:
		/* MCRR/MRRC CP15 emulation */
		rc = cpu_vcpu_emulate_mcrr_mrrc_cp15(vcpu, regs, il, iss);
		break;
	case EC_TRAP_MCR_MRC_CP14:
		/* MCR/MRC CP14 emulation */
		rc = cpu_vcpu_emulate_mcr_mrc_cp14(vcpu, regs, il, iss);
		break;
	case EC_TRAP_LDC_STC_CP14:
		/* LDC/STC CP14 emulation */
		rc = cpu_vcpu_emulate_ldc_stc_cp14(vcpu, regs, il, iss);
		break;
	case EC_TRAP_CP0_TO_CP13:
		/* CP0 to CP13 emulation */
		rc = cpu_vcpu_emulate_cp0_cp13(vcpu, regs, il, iss);
		break;
	case EC_TRAP_VMRS:
		/* MRC (or VMRS) to CP10 for MVFR0, MVFR1 or FPSID */
		rc = cpu_vcpu_emulate_vmrs(vcpu, regs, il, iss);
		break;
	case EC_TRAP_JAZELLE:
		/* Jazelle emulation */
		rc = cpu_vcpu_emulate_jazelle(vcpu, regs, il, iss);
		break;
	case EC_TRAP_BXJ:
		/* BXJ emulation */
		rc = cpu_vcpu_emulate_bxj(vcpu, regs, il, iss);
		break;
	case EC_TRAP_MRRC_CP14:
		/* MRRC to CP14 emulation */
		rc = cpu_vcpu_emulate_mrrc_cp14(vcpu, regs, il, iss);
		break;
	case EC_TRAP_SVC:
		/* We dont expect to get this trap so error */
		rc = VMM_EFAIL;
		break;
	case EC_TRAP_HVC:
		/* Hypercall or HVC emulation */
		rc = cpu_vcpu_emulate_hvc(vcpu, regs, il, iss);
		break;
	case EC_TRAP_SMC:
		/* System Monitor Call or SMC emulation */
		rc = cpu_vcpu_emulate_smc(vcpu, regs, il, iss);
		break;
	case EC_TRAP_STAGE2_INST_ABORT:
		/* Stage2 instruction abort */
		far  = read_hifar();
		fipa = (read_hpfar() & HPFAR_FIPA_MASK) >> HPFAR_FIPA_SHIFT;
		fipa = fipa << HPFAR_FIPA_PAGE_SHIFT;
		fipa = fipa | (far & HPFAR_FIPA_PAGE_MASK);
		rc = cpu_vcpu_inst_abort(vcpu, regs, il, iss, far, fipa);
		break;
	case EC_TRAP_STAGE1_INST_ABORT:
		/* We dont expect to get this trap so error */
		rc = VMM_EFAIL;
		break;
	case EC_TRAP_STAGE2_DATA_ABORT:
		/* Stage2 data abort */
		far  = read_hdfar();
		fipa = (read_hpfar() & HPFAR_FIPA_MASK) >> HPFAR_FIPA_SHIFT;
		fipa = fipa << HPFAR_FIPA_PAGE_SHIFT;
		fipa = fipa | (far & HPFAR_FIPA_PAGE_MASK);
		rc = cpu_vcpu_data_abort(vcpu, regs, il, iss, far, fipa);
		break;
	case EC_TRAP_STAGE1_DATA_ABORT:
		/* We dont expect to get this trap so error */
		rc = VMM_EFAIL;
		break;
	default:
		/* Unknown EC value so error */
		rc = VMM_EFAIL;
		break;
	};

	if (rc) {
		vmm_printf("\n%s: ec=0x%x, il=0x%x, iss=0x%x,"
			   " fipa=0x%x, error=%d\n", __func__,
			   ec, il, iss, fipa, rc);
		if (vmm_manager_vcpu_get_state(vcpu) != VMM_VCPU_STATE_HALTED) {
			cpu_vcpu_halt(vcpu, regs);
		}
	}

	vmm_scheduler_irq_exit(regs);
}