Beispiel #1
0
u32 cpu_vcpu_regmode_read(struct vmm_vcpu *vcpu, 
			  arch_regs_t *regs, 
			  u32 mode,
			  u32 reg_num)
{
	u32 hwreg;
	switch (reg_num) {
	case 0:
	case 1:
	case 2:
	case 3:
	case 4:
	case 5:
	case 6:
	case 7:
		return regs->gpr[reg_num];
	case 8:
		if (mode == CPSR_MODE_FIQ) {
			asm volatile (" mrs     %0, r8_fiq\n\t" 
				      :"=r" (hwreg)::"memory", "cc");
			arm_priv(vcpu)->gpr_fiq[reg_num - 8] = hwreg;
			return arm_priv(vcpu)->gpr_fiq[reg_num - 8];
		} else {
			return regs->gpr[reg_num];
		}
	case 9:
		if (mode == CPSR_MODE_FIQ) {
			asm volatile (" mrs     %0, r9_fiq\n\t" 
				      :"=r" (hwreg)::"memory", "cc");
			arm_priv(vcpu)->gpr_fiq[reg_num - 8] = hwreg;
			return arm_priv(vcpu)->gpr_fiq[reg_num - 8];
		} else {
			return regs->gpr[reg_num];
void cpu_vcpu_cpsr_update(struct vmm_vcpu * vcpu, 
			  arch_regs_t * regs,
			  u32 new_cpsr,
			  u32 new_cpsr_mask)
{
	bool mode_change;
	/* Sanity check */
	if (!vcpu && !vcpu->is_normal) {
		return;
	}
	new_cpsr &= new_cpsr_mask;
	/* Determine if mode is changing */
	mode_change = FALSE;
	if ((new_cpsr_mask & CPSR_MODE_MASK) &&
	    ((arm_priv(vcpu)->cpsr & CPSR_MODE_MASK) != 
					(new_cpsr & CPSR_MODE_MASK))) {
		mode_change = TRUE;
		/* Save banked registers for old CPSR */
		cpu_vcpu_banked_regs_save(vcpu, regs);
	}
	/* Set the new priviledged bits of CPSR */
	arm_priv(vcpu)->cpsr &= (~CPSR_PRIVBITS_MASK | ~new_cpsr_mask);
	arm_priv(vcpu)->cpsr |= new_cpsr & CPSR_PRIVBITS_MASK & new_cpsr_mask;
	/* Set the new user bits of CPSR */
	regs->cpsr &= (~CPSR_USERBITS_MASK | ~new_cpsr_mask);
	regs->cpsr |= new_cpsr & CPSR_USERBITS_MASK & new_cpsr_mask;
	/* If mode is changing then */
	if (mode_change) {
		/* Restore values of banked registers for new CPSR */
		cpu_vcpu_banked_regs_restore(vcpu, regs);
		/* Synchronize CP15 state to change in mode */
		cpu_vcpu_cp15_sync_cpsr(vcpu);
	}
	return;
}
Beispiel #3
0
int arch_vcpu_irq_deassert(struct vmm_vcpu *vcpu, u32 irq_no, u32 reason)
{
	u32 hcr;

	/* Skip IRQ & FIQ if VGIC available */
	if (arm_vgic_avail(vcpu) &&
	    ((irq_no == CPU_EXTERNAL_IRQ) || 
	     (irq_no == CPU_EXTERNAL_FIQ))) {
		return VMM_OK;
	}

	hcr = arm_priv(vcpu)->hcr;

	switch(irq_no) {
	case CPU_EXTERNAL_IRQ:
		hcr &= ~HCR_VI_MASK;
		break;
	case CPU_EXTERNAL_FIQ:
		hcr &= ~HCR_VF_MASK;
		break;
	default:
		return VMM_EFAIL;
		break;
	};

	arm_priv(vcpu)->hcr = hcr;
	if (vmm_scheduler_current_vcpu() == vcpu) {
		write_hcr(hcr);
	}

	return VMM_OK;
}
Beispiel #4
0
int cpu_vcpu_vfp_trap(struct vmm_vcpu *vcpu,
		      arch_regs_t *regs,
		      u32 il, u32 iss,
		      bool is_asimd)
{
	struct arm_priv *p = arm_priv(vcpu);
	struct arm_priv_vfp *vfp = &p->vfp;

	/* Inject undefined exception if:
	 * 1. VCPU does not have VFPv3 feature
	 */
	if (!arm_feature(vcpu, ARM_FEATURE_VFP3)) {
		/* Inject undefined exception */
		cpu_vcpu_inject_undef(vcpu, regs);
		return VMM_OK;
	}

	/* If VFP/ASIMD traps were enabled then:
	 * 1. Disable VFP/ASIMD traps
	 * 2. Restore VFP/ASIMD regs
	 */
	p->hcptr &= ~(HCPTR_TASE_MASK);
	p->hcptr &= ~(HCPTR_TCP11_MASK|HCPTR_TCP10_MASK);
	write_hcptr(p->hcptr);
	cpu_vcpu_vfp_regs_restore(vfp);

	return VMM_OK;
}
Beispiel #5
0
void cpu_vcpu_vfp_save(struct vmm_vcpu *vcpu)
{
	struct arm_priv *p = arm_priv(vcpu);
	struct arm_priv_vfp *vfp = &p->vfp;

	/* Do nothing if:
	 * 1. VCPU does not have VFPv3 feature
	 */
	if (!arm_feature(vcpu, ARM_FEATURE_VFP3)) {
		return;
	}

	/* If VFP/ASIMD traps were disabled then:
	 * 1. Save VFP/ASIMD regs
	 * 2. Enable VFP/ASIMD traps
	 */
	if (!(p->hcptr & (HCPTR_TCP11_MASK|HCPTR_TCP10_MASK))) {
		cpu_vcpu_vfp_regs_save(vfp);
	}

	/* Force disable FPU
	 * Note: We don't use VFP in hypervisor so
	 * better disable it.
	 */
	write_fpexc(read_fpexc() & ~FPEXC_EN_MASK);
}
void arch_vcpu_regs_dump(struct vmm_chardev *cdev, struct vmm_vcpu *vcpu)
{
	struct arm_priv *p;

	/* For both Normal & Orphan VCPUs */
	__cpu_vcpu_dump_user_reg(cdev, arm_regs(vcpu));

	/* For only Normal VCPUs */
	if (!vcpu->is_normal) {
		return;
	}

	/* Get private context */
	p = arm_priv(vcpu);

	/* Hypervisor context */
	vmm_cprintf(cdev, "Hypervisor EL2 Registers\n");
	vmm_cprintf(cdev, " %11s=0x%016lx %11s=0x%016lx\n",
		    "HCR_EL2", p->hcr,
		    "CPTR_EL2", p->cptr);
	vmm_cprintf(cdev, " %11s=0x%016lx %11s=0x%016lx\n",
		    "HSTR_EL2", p->hstr,
		    "TTBR_EL2", arm_guest_priv(vcpu->guest)->ttbl->tbl_pa);

	/* Print VFP context */
	cpu_vcpu_vfp_dump(cdev, vcpu);

	/* Print sysregs context */
	cpu_vcpu_sysregs_dump(cdev, vcpu);
}
void cpu_vcpu_spsr32_update(struct vmm_vcpu *vcpu, u32 mode, u32 new_spsr)
{
	struct arm_priv_sysregs *s = &arm_priv(vcpu)->sysregs;

	switch (mode) {
	case CPSR_MODE_ABORT:
		msr(spsr_abt, new_spsr);
		s->spsr_abt = new_spsr;
		break;
	case CPSR_MODE_UNDEFINED:
		msr(spsr_und, new_spsr);
		s->spsr_und = new_spsr;
		break;
	case CPSR_MODE_SUPERVISOR:
		msr(spsr_el1, new_spsr);
		s->spsr_el1 = new_spsr;
		break;
	case CPSR_MODE_IRQ:
		msr(spsr_irq, new_spsr);
		s->spsr_irq = new_spsr;
		break;
	case CPSR_MODE_FIQ:
		msr(spsr_fiq, new_spsr);
		s->spsr_fiq = new_spsr;
		break;
	case CPSR_MODE_HYPERVISOR:
		msr(spsr_el2, new_spsr);
		break;
	default:
		break;
	};
}
Beispiel #8
0
void do_soft_irq(arch_regs_t * uregs)
{
	int rc = VMM_OK;
	struct vmm_vcpu * vcpu;

	if ((uregs->cpsr & CPSR_MODE_MASK) != CPSR_MODE_USER) {
		vmm_panic("%s: unexpected exception\n", __func__);
	}

	vmm_scheduler_irq_enter(uregs, TRUE);

	vcpu = vmm_scheduler_current_vcpu();

	/* If vcpu priviledge is user then generate exception 
	 * and return without emulating instruction 
	 */
	if ((arm_priv(vcpu)->cpsr & CPSR_MODE_MASK) == CPSR_MODE_USER) {
		vmm_vcpu_irq_assert(vcpu, CPU_SOFT_IRQ, 0x0);
	} else {
		if (uregs->cpsr & CPSR_THUMB_ENABLED) {
			rc = cpu_vcpu_hypercall_thumb(vcpu, uregs, 
							*((u32 *)uregs->pc));
		} else {
			rc = cpu_vcpu_hypercall_arm(vcpu, uregs, 
							*((u32 *)uregs->pc));
		}
	}

	if (rc) {
		vmm_printf("%s: error %d\n", __func__, rc);
	}

	vmm_scheduler_irq_exit(uregs);
}
u32 cpu_vcpu_spsr_retrieve(struct vmm_vcpu * vcpu)
{
	/* Find out correct SPSR */
	switch (arm_priv(vcpu)->cpsr & CPSR_MODE_MASK) {
	case CPSR_MODE_ABORT:
		return arm_priv(vcpu)->spsr_abt;
		break;
	case CPSR_MODE_UNDEFINED:
		return arm_priv(vcpu)->spsr_und;
		break;
	case CPSR_MODE_MONITOR:
		return arm_priv(vcpu)->spsr_mon;
		break;
	case CPSR_MODE_SUPERVISOR:
		return arm_priv(vcpu)->spsr_svc;
		break;
	case CPSR_MODE_IRQ:
		return arm_priv(vcpu)->spsr_irq;
		break;
	case CPSR_MODE_FIQ:
		return arm_priv(vcpu)->spsr_fiq;
		break;
	default:
		break;
	};
	return 0;
}
Beispiel #10
0
int cpu_vcpu_mem_read(struct vmm_vcpu *vcpu, 
			arch_regs_t *regs,
			virtual_addr_t addr, 
			void *dst, u32 dst_len, 
			bool force_unpriv)
{
	struct cpu_page pg;
	register int rc = VMM_OK;
	register u32 vind, ecode;
	register struct cpu_page *pgp = &pg;
	if ((addr & ~(TTBL_L2TBL_SMALL_PAGE_SIZE - 1)) ==
	    arm_priv(vcpu)->cp15.ovect_base) {
		if ((arm_priv(vcpu)->cpsr & CPSR_MODE_MASK) == CPSR_MODE_USER) {
			force_unpriv = TRUE;
		}
		if ((ecode = cpu_vcpu_cp15_find_page(vcpu, addr,
						     CP15_ACCESS_READ,
						     force_unpriv, &pg))) {
			cpu_vcpu_cp15_assert_fault(vcpu, regs, addr,
						   (ecode >> 4), (ecode & 0xF),
						   0, 1);
			return VMM_EFAIL;
		}
		vind = addr & (TTBL_L2TBL_SMALL_PAGE_SIZE - 1);
		switch (dst_len) {
		case 4:
			vind &= ~(0x4 - 1);
			vind /= 0x4;
			*((u32 *) dst) = arm_guest_priv(vcpu->guest)->ovect[vind];
			break;
		case 2:
			vind &= ~(0x2 - 1);
			vind /= 0x2;
			*((u16 *) dst) =
			    ((u16 *)arm_guest_priv(vcpu->guest)->ovect)[vind];
			break;
		case 1:
			*((u8 *) dst) =
			    ((u8 *)arm_guest_priv(vcpu->guest)->ovect)[vind];
			break;
		default:
			return VMM_EFAIL;
			break;
		};
	} else {
int arch_vcpu_irq_assert(struct vmm_vcpu *vcpu, u32 irq_no, u64 reason)
{
	u64 hcr;
	bool update_hcr;
	irq_flags_t flags;

	/* Skip IRQ & FIQ if VGIC available */
	if (arm_vgic_avail(vcpu) &&
	    ((irq_no == CPU_EXTERNAL_IRQ) || 
	     (irq_no == CPU_EXTERNAL_FIQ))) {
		return VMM_OK;
	}

	vmm_spin_lock_irqsave(&arm_priv(vcpu)->hcr_lock, flags);

	hcr = arm_priv(vcpu)->hcr;
	update_hcr = FALSE;

	switch(irq_no) {
	case CPU_EXTERNAL_IRQ:
		hcr |= HCR_VI_MASK;
		/* VI bit will be cleared on deassertion */
		update_hcr = TRUE;
		break;
	case CPU_EXTERNAL_FIQ:
		hcr |= HCR_VF_MASK;
		/* VF bit will be cleared on deassertion */
		update_hcr = TRUE;
		break;
	default:
		break;
	};

	if (update_hcr) {
		arm_priv(vcpu)->hcr = hcr;
		if (vcpu == vmm_scheduler_current_vcpu()) {
			msr(hcr_el2, hcr);
		}
	}

	vmm_spin_unlock_irqrestore(&arm_priv(vcpu)->hcr_lock, flags);

	return VMM_OK;
}
Beispiel #12
0
int arch_vcpu_irq_deassert(struct vmm_vcpu *vcpu, u32 irq_no, u64 reason)
{
	u32 hcr;
	bool update_hcr;
	irq_flags_t flags;

	/* Skip IRQ & FIQ if VGIC available */
	if (arm_vgic_avail(vcpu) &&
	    ((irq_no == CPU_EXTERNAL_IRQ) || 
	     (irq_no == CPU_EXTERNAL_FIQ))) {
		return VMM_OK;
	}

	vmm_spin_lock_irqsave_lite(&arm_priv(vcpu)->hcr_lock, flags);

	hcr = arm_priv(vcpu)->hcr;
	update_hcr = FALSE;

	switch(irq_no) {
	case CPU_EXTERNAL_IRQ:
		hcr &= ~HCR_VI_MASK;
		update_hcr = TRUE;
		break;
	case CPU_EXTERNAL_FIQ:
		hcr &= ~HCR_VF_MASK;
		update_hcr = TRUE;
		break;
	default:
		break;
	};

	if (update_hcr) {
		arm_priv(vcpu)->hcr = hcr;
		if (vmm_scheduler_current_vcpu() == vcpu) {
			write_hcr(hcr);
		}
	}

	vmm_spin_unlock_irqrestore_lite(&arm_priv(vcpu)->hcr_lock, flags);

	return VMM_OK;
}
u32 cpu_vcpu_cpsr_retrieve(struct vmm_vcpu * vcpu,
			  arch_regs_t * regs)
{
	if (!vcpu || !regs) {
		return 0;
	}
	if (vcpu->is_normal) {
		return (regs->cpsr & CPSR_USERBITS_MASK) |
			(arm_priv(vcpu)->cpsr & ~CPSR_USERBITS_MASK);
	} else {
		return regs->cpsr;
	}
}
Beispiel #14
0
int arch_vcpu_irq_execute(struct vmm_vcpu *vcpu,
			  arch_regs_t *regs, 
			  u32 irq_no, u64 reason)
{
	int rc;
	irq_flags_t flags;

	/* Skip IRQ & FIQ if VGIC available */
	if (arm_vgic_avail(vcpu) &&
	    ((irq_no == CPU_EXTERNAL_IRQ) || 
	     (irq_no == CPU_EXTERNAL_FIQ))) {
		return VMM_OK;
	}

	/* Undefined, Data abort, and Prefetch abort 
	 * can only be emulated in normal context.
	 */
	switch(irq_no) {
	case CPU_UNDEF_INST_IRQ:
		rc = cpu_vcpu_inject_undef(vcpu, regs);
		break;
	case CPU_PREFETCH_ABORT_IRQ:
		rc = cpu_vcpu_inject_pabt(vcpu, regs);
		break;
	case CPU_DATA_ABORT_IRQ:
		rc = cpu_vcpu_inject_dabt(vcpu, regs, (virtual_addr_t)reason);
		break;
	default:
		rc = VMM_OK;
		break;
	};

	/* Update HCR in HW */
	vmm_spin_lock_irqsave_lite(&arm_priv(vcpu)->hcr_lock, flags);
	write_hcr(arm_priv(vcpu)->hcr);
	vmm_spin_unlock_irqrestore_lite(&arm_priv(vcpu)->hcr_lock, flags);

	return rc;
}
Beispiel #15
0
int arch_vcpu_irq_assert(struct vmm_vcpu *vcpu, u32 irq_no, u32 reason)
{
	u32 hcr;

	/* Skip IRQ & FIQ if VGIC available */
	if (arm_vgic_avail(vcpu) &&
	    ((irq_no == CPU_EXTERNAL_IRQ) || 
	     (irq_no == CPU_EXTERNAL_FIQ))) {
		return VMM_OK;
	}

	hcr = arm_priv(vcpu)->hcr;

	switch(irq_no) {
	case CPU_DATA_ABORT_IRQ:
		hcr |= HCR_VA_MASK;
		/* VA bit is auto-cleared */
		break;
	case CPU_EXTERNAL_IRQ:
		hcr |= HCR_VI_MASK;
		/* VI bit will be cleared on deassertion */
		break;
	case CPU_EXTERNAL_FIQ:
		hcr |= HCR_VF_MASK;
		/* VF bit will be cleared on deassertion */
		break;
	default:
		return VMM_EFAIL;
		break;
	};

	arm_priv(vcpu)->hcr = hcr;
	if (vmm_scheduler_current_vcpu() == vcpu) {
		write_hcr(hcr);
	}

	return VMM_OK;
}
u32 cpu_vcpu_regmode_read(struct vmm_vcpu * vcpu, 
			  arch_regs_t * regs, 
			  u32 mode,
			  u32 reg_num)
{
	u32 hwreg;
	if (vcpu != vmm_scheduler_current_vcpu()) {
		/* This function should only be called for current VCPU */
		while (1); /* Hang !!! */
	}
	switch (reg_num) {
	case 0:
	case 1:
	case 2:
	case 3:
	case 4:
	case 5:
	case 6:
	case 7:
		return regs->gpr[reg_num];
	case 8:
		if (mode == CPSR_MODE_FIQ) {
			asm volatile (" mrs     %0, r8_fiq\n\t" 
				      :"=r" (hwreg)::"memory", "cc");
			arm_priv(vcpu)->gpr_fiq[reg_num - 8] = hwreg;
			return arm_priv(vcpu)->gpr_fiq[reg_num - 8];
		} else {
			return regs->gpr[reg_num];
		}
	case 9:
		if (mode == CPSR_MODE_FIQ) {
			asm volatile (" mrs     %0, r9_fiq\n\t" 
				      :"=r" (hwreg)::"memory", "cc");
			arm_priv(vcpu)->gpr_fiq[reg_num - 8] = hwreg;
			return arm_priv(vcpu)->gpr_fiq[reg_num - 8];
		} else {
			return regs->gpr[reg_num];
Beispiel #17
0
void cpu_vcpu_vfp_restore(struct vmm_vcpu *vcpu)
{
	struct arm_priv *p = arm_priv(vcpu);

	/* Make sure we trap VFP/ASIMD */
	p->hcptr |= (HCPTR_TASE_MASK);
	p->hcptr |= (HCPTR_TCP11_MASK|HCPTR_TCP10_MASK);

	/* Force enable FPU
	 * Note: If FPU is not enabled then we don't get
	 * VFP traps so we force enable FPU and setup
	 * traps using HCPTR register.
	 */
	write_fpexc(read_fpexc() | FPEXC_EN_MASK);
}
Beispiel #18
0
int arch_vcpu_irq_execute(struct vmm_vcpu *vcpu,
			  arch_regs_t *regs, 
			  u32 irq_no, u32 reason)
{
	/* Skip IRQ & FIQ if VGIC available */
	if (arm_vgic_avail(vcpu) &&
	    ((irq_no == CPU_EXTERNAL_IRQ) || 
	     (irq_no == CPU_EXTERNAL_FIQ))) {
		return VMM_OK;
	}

	write_hcr(arm_priv(vcpu)->hcr);

	return VMM_OK;
}
Beispiel #19
0
int cpu_vcpu_vfp_init(struct vmm_vcpu *vcpu)
{
	u32 fpu;
	struct arm_priv_vfp *vfp = &arm_priv(vcpu)->vfp;

	/* Clear VCPU VFP context */
	memset(vfp, 0, sizeof(struct arm_priv_vfp));

	/* If host HW does not have VFP (i.e. software VFP) then
	 * clear all VFP feature flags so that VCPU always gets
	 * undefined exception when accessing VFP registers.
	 */
	if (!cpu_supports_fpu()) {
		goto no_vfp_for_vcpu;
	}

	/* If Host HW does not support VFPv3 or higher then
	 * clear all VFP feature flags so that VCPU always gets
	 * undefined exception when accessing VFP registers.
	 */
	fpu = (read_fpsid() & FPSID_ARCH_MASK) >>  FPSID_ARCH_SHIFT;
	if ((fpu <= 1) || !arm_feature(vcpu, ARM_FEATURE_VFP3)) {
		goto no_vfp_for_vcpu;
	}

	/* Current strategy is to show VFP identification registers
	 * same as underlying Host HW so that Guest sees same VFP
	 * capabilities as Host HW.
	 */
	vfp->fpsid = read_fpsid();
	vfp->mvfr0 = read_mvfr0();
	vfp->mvfr1 = read_mvfr1();

	return VMM_OK;

no_vfp_for_vcpu:
	arm_clear_feature(vcpu, ARM_FEATURE_MVFR);
	arm_clear_feature(vcpu, ARM_FEATURE_VFP);
	arm_clear_feature(vcpu, ARM_FEATURE_VFP3);
	arm_clear_feature(vcpu, ARM_FEATURE_VFP4);
	return VMM_OK;
}
Beispiel #20
0
void cpu_vcpu_vfp_dump(struct vmm_chardev *cdev, struct vmm_vcpu *vcpu)
{
	u32 i;
	struct arm_priv_vfp *vfp = &arm_priv(vcpu)->vfp;

	/* Do nothing if:
	 * 1. VCPU does not have VFPv3 feature
	 */
	if (!arm_feature(vcpu, ARM_FEATURE_VFP3)) {
		return;
	}

	vmm_cprintf(cdev, "VFP Identification Registers\n");
	vmm_cprintf(cdev, " %7s=0x%08x %7s=0x%08x %7s=0x%08x\n",
		    "FPSID", vfp->fpsid,
		    "MVFR0", vfp->mvfr0,
		    "MVFR1", vfp->mvfr1);
	vmm_cprintf(cdev, "VFP System Registers\n");
	vmm_cprintf(cdev, " %7s=0x%08x %7s=0x%08x %7s=0x%08x\n",
		    "FPEXC", vfp->fpexc,
		    "FPSCR", vfp->fpscr,
		    "FPINST", vfp->fpinst);
	vmm_cprintf(cdev, " %7s=0x%08x\n",
		    "FPINST2", vfp->fpinst2);
	vmm_cprintf(cdev, "VFP Data Registers");
	for (i = 0; i < 32; i++) {
		if (i % 2 == 0) {
			vmm_cprintf(cdev, "\n");
		}
		if (i < 16) {
			vmm_cprintf(cdev, " %5s%02d=0x%016llx",
				   "D", (i), vfp->fpregs1[i]);
		} else {
			vmm_cprintf(cdev, " %5s%02d=0x%016llx",
				   "D", (i), vfp->fpregs2[i-16]);
		}
	}
	vmm_cprintf(cdev, "\n");
}
void arch_vcpu_stat_dump(struct vmm_vcpu * vcpu)
{
#ifdef CONFIG_ARM32_FUNCSTATS
	int index;

	if (!vcpu || !arm_priv(vcpu)) {
		return;
	}

	vmm_printf("%-30s %-10s %s\n", "Function Name","Time/Call", "# Calls");

	for (index=0; index < ARM_FUNCSTAT_MAX; index++) {
		if (arm_priv(vcpu)->funcstat[index].exit_count) { 
			vmm_printf("%-30s %-10u %u\n", 
			arm_priv(vcpu)->funcstat[index].function_name, 
			(u32)vmm_udiv64(arm_priv(vcpu)->funcstat[index].time, 
			arm_priv(vcpu)->funcstat[index].exit_count), 
			arm_priv(vcpu)->funcstat[index].exit_count); 
		} 
	} 
#else
	vmm_printf("Not selected in Xvisor config\n");
#endif
}
void arch_vcpu_regs_dump(struct vmm_vcpu * vcpu)
{
	u32 ite;
	/* For both Normal & Orphan VCPUs */
	cpu_vcpu_dump_user_reg(vcpu, arm_regs(vcpu));
	/* For only Normal VCPUs */
	if (!vcpu->is_normal) {
		return;
	}
	vmm_printf("  User Mode Registers (Banked)\n");
	vmm_printf("    SP=0x%08x       LR=0x%08x\n",
		   arm_priv(vcpu)->sp_usr, arm_priv(vcpu)->lr_usr);
	vmm_printf("  Supervisor Mode Registers (Banked)\n");
	vmm_printf("    SP=0x%08x       LR=0x%08x       SPSR=0x%08x\n",
		   arm_priv(vcpu)->sp_svc, arm_priv(vcpu)->lr_svc,
		   arm_priv(vcpu)->spsr_svc);
	vmm_printf("  Monitor Mode Registers (Banked)\n");
	vmm_printf("    SP=0x%08x       LR=0x%08x       SPSR=0x%08x\n",
		   arm_priv(vcpu)->sp_mon, arm_priv(vcpu)->lr_mon,
		   arm_priv(vcpu)->spsr_mon);
	vmm_printf("  Abort Mode Registers (Banked)\n");
	vmm_printf("    SP=0x%08x       LR=0x%08x       SPSR=0x%08x\n",
		   arm_priv(vcpu)->sp_abt, arm_priv(vcpu)->lr_abt,
		   arm_priv(vcpu)->spsr_abt);
	vmm_printf("  Undefined Mode Registers (Banked)\n");
	vmm_printf("    SP=0x%08x       LR=0x%08x       SPSR=0x%08x\n",
		   arm_priv(vcpu)->sp_und, arm_priv(vcpu)->lr_und,
		   arm_priv(vcpu)->spsr_und);
	vmm_printf("  IRQ Mode Registers (Banked)\n");
	vmm_printf("    SP=0x%08x       LR=0x%08x       SPSR=0x%08x\n",
		   arm_priv(vcpu)->sp_irq, arm_priv(vcpu)->lr_irq,
		   arm_priv(vcpu)->spsr_irq);
	vmm_printf("  FIQ Mode Registers (Banked)\n");
	vmm_printf("    SP=0x%08x       LR=0x%08x       SPSR=0x%08x",
		   arm_priv(vcpu)->sp_fiq, arm_priv(vcpu)->lr_fiq,
		   arm_priv(vcpu)->spsr_fiq);
	for (ite = 0; ite < 5; ite++) {
		if (ite % 3 == 0)
			vmm_printf("\n");
		vmm_printf("    R%02d=0x%08x  ", (ite + 8),
			   arm_priv(vcpu)->gpr_fiq[ite]);
	}
	vmm_printf("\n");
}
int arch_vcpu_regs_init(struct vmm_vcpu * vcpu)
{
	u32 ite, cpuid = ARM_CPUID_CORTEXA8;
	/* Initialize User Mode Registers */
	/* For both Orphan & Normal VCPUs */
	vmm_memset(arm_regs(vcpu), 0, sizeof(arch_regs_t));
	arm_regs(vcpu)->pc = vcpu->start_pc;
	if (vcpu->is_normal) {
		arm_regs(vcpu)->cpsr  = CPSR_ZERO_MASK;
		arm_regs(vcpu)->cpsr |= CPSR_ASYNC_ABORT_DISABLED;
		arm_regs(vcpu)->cpsr |= CPSR_MODE_USER;
	} else {
		arm_regs(vcpu)->cpsr  = CPSR_ZERO_MASK;
		arm_regs(vcpu)->cpsr |= CPSR_ASYNC_ABORT_DISABLED;
		arm_regs(vcpu)->cpsr |= CPSR_MODE_SUPERVISOR;
		arm_regs(vcpu)->sp = vcpu->start_sp;
	}
	/* Initialize Supervisor Mode Registers */
	/* For only Normal VCPUs */
	if (!vcpu->is_normal) {
		return VMM_OK;
	}
	if (!vcpu->reset_count) {
		vcpu->arch_priv = vmm_malloc(sizeof(arm_priv_t));
		vmm_memset(arm_priv(vcpu), 0, sizeof(arm_priv_t));
		arm_priv(vcpu)->cpsr = CPSR_ASYNC_ABORT_DISABLED | 
				   CPSR_IRQ_DISABLED |
				   CPSR_FIQ_DISABLED | 
				   CPSR_MODE_SUPERVISOR;
	} else {
		for (ite = 0; ite < CPU_FIQ_GPR_COUNT; ite++) {
			arm_priv(vcpu)->gpr_usr[ite] = 0x0;
			arm_priv(vcpu)->gpr_fiq[ite] = 0x0;
		}
		arm_priv(vcpu)->sp_usr = 0x0;
		arm_priv(vcpu)->lr_usr = 0x0;
		arm_priv(vcpu)->sp_svc = 0x0;
		arm_priv(vcpu)->lr_svc = 0x0;
		arm_priv(vcpu)->spsr_svc = 0x0;
		arm_priv(vcpu)->sp_mon = 0x0;
		arm_priv(vcpu)->lr_mon = 0x0;
		arm_priv(vcpu)->spsr_mon = 0x0;
		arm_priv(vcpu)->sp_abt = 0x0;
		arm_priv(vcpu)->lr_abt = 0x0;
		arm_priv(vcpu)->spsr_abt = 0x0;
		arm_priv(vcpu)->sp_und = 0x0;
		arm_priv(vcpu)->lr_und = 0x0;
		arm_priv(vcpu)->spsr_und = 0x0;
		arm_priv(vcpu)->sp_irq = 0x0;
		arm_priv(vcpu)->lr_irq = 0x0;
		arm_priv(vcpu)->spsr_irq = 0x0;
		arm_priv(vcpu)->sp_fiq = 0x0;
		arm_priv(vcpu)->lr_fiq = 0x0;
		arm_priv(vcpu)->spsr_fiq = 0x0;
		cpu_vcpu_cpsr_update(vcpu, 
				     arm_regs(vcpu), 
				     (CPSR_ZERO_MASK |
					CPSR_ASYNC_ABORT_DISABLED | 
					CPSR_IRQ_DISABLED |
					CPSR_FIQ_DISABLED | 
					CPSR_MODE_SUPERVISOR),
				     CPSR_ALLBITS_MASK);
	}
	if (!vcpu->reset_count) {
		arm_priv(vcpu)->features = 0;
		switch (cpuid) {
		case ARM_CPUID_CORTEXA8:
			arm_set_feature(vcpu, ARM_FEATURE_V4T);
			arm_set_feature(vcpu, ARM_FEATURE_V5);
			arm_set_feature(vcpu, ARM_FEATURE_V6);
			arm_set_feature(vcpu, ARM_FEATURE_V6K);
			arm_set_feature(vcpu, ARM_FEATURE_V7);
			arm_set_feature(vcpu, ARM_FEATURE_AUXCR);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2);
			arm_set_feature(vcpu, ARM_FEATURE_VFP);
			arm_set_feature(vcpu, ARM_FEATURE_VFP3);
			arm_set_feature(vcpu, ARM_FEATURE_NEON);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2EE);
			break;
		case ARM_CPUID_CORTEXA9:
			arm_set_feature(vcpu, ARM_FEATURE_V4T);
			arm_set_feature(vcpu, ARM_FEATURE_V5);
			arm_set_feature(vcpu, ARM_FEATURE_V6);
			arm_set_feature(vcpu, ARM_FEATURE_V6K);
			arm_set_feature(vcpu, ARM_FEATURE_V7);
			arm_set_feature(vcpu, ARM_FEATURE_AUXCR);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2);
			arm_set_feature(vcpu, ARM_FEATURE_VFP);
			arm_set_feature(vcpu, ARM_FEATURE_VFP3);
			arm_set_feature(vcpu, ARM_FEATURE_VFP_FP16);
			arm_set_feature(vcpu, ARM_FEATURE_NEON);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2EE);
			arm_set_feature(vcpu, ARM_FEATURE_V7MP);
			break;
		default:
			break;
		};
	}
#ifdef CONFIG_ARM32_FUNCSTATS
	for (ite=0; ite < ARM_FUNCSTAT_MAX; ite++) {
		arm_priv(vcpu)->funcstat[ite].function_name = NULL;
		arm_priv(vcpu)->funcstat[ite].entry_count = 0;
		arm_priv(vcpu)->funcstat[ite].exit_count = 0;
		arm_priv(vcpu)->funcstat[ite].time = 0;
	}
#endif
	return cpu_vcpu_cp15_init(vcpu, cpuid);
}
void cpu_vcpu_banked_regs_save(struct vmm_vcpu * vcpu, arch_regs_t * src)
{
	if (!vcpu || !vcpu->is_normal || !src) {
		return;
	}
	switch (arm_priv(vcpu)->cpsr & CPSR_MODE_MASK) {
	case CPSR_MODE_USER:
		arm_priv(vcpu)->gpr_usr[0] = src->gpr[8];
		arm_priv(vcpu)->gpr_usr[1] = src->gpr[9];
		arm_priv(vcpu)->gpr_usr[2] = src->gpr[10];
		arm_priv(vcpu)->gpr_usr[3] = src->gpr[11];
		arm_priv(vcpu)->gpr_usr[4] = src->gpr[12];
		arm_priv(vcpu)->sp_usr = src->sp;
		arm_priv(vcpu)->lr_usr = src->lr;
		break;
	case CPSR_MODE_SYSTEM:
		arm_priv(vcpu)->gpr_usr[0] = src->gpr[8];
		arm_priv(vcpu)->gpr_usr[1] = src->gpr[9];
		arm_priv(vcpu)->gpr_usr[2] = src->gpr[10];
		arm_priv(vcpu)->gpr_usr[3] = src->gpr[11];
		arm_priv(vcpu)->gpr_usr[4] = src->gpr[12];
		arm_priv(vcpu)->sp_usr = src->sp;
		arm_priv(vcpu)->lr_usr = src->lr;
		break;
	case CPSR_MODE_ABORT:
		arm_priv(vcpu)->gpr_usr[0] = src->gpr[8];
		arm_priv(vcpu)->gpr_usr[1] = src->gpr[9];
		arm_priv(vcpu)->gpr_usr[2] = src->gpr[10];
		arm_priv(vcpu)->gpr_usr[3] = src->gpr[11];
		arm_priv(vcpu)->gpr_usr[4] = src->gpr[12];
		arm_priv(vcpu)->sp_abt = src->sp;
		arm_priv(vcpu)->lr_abt = src->lr;
		break;
	case CPSR_MODE_UNDEFINED:
		arm_priv(vcpu)->gpr_usr[0] = src->gpr[8];
		arm_priv(vcpu)->gpr_usr[1] = src->gpr[9];
		arm_priv(vcpu)->gpr_usr[2] = src->gpr[10];
		arm_priv(vcpu)->gpr_usr[3] = src->gpr[11];
		arm_priv(vcpu)->gpr_usr[4] = src->gpr[12];
		arm_priv(vcpu)->sp_und = src->sp;
		arm_priv(vcpu)->lr_und = src->lr;
		break;
	case CPSR_MODE_MONITOR:
		arm_priv(vcpu)->gpr_usr[0] = src->gpr[8];
		arm_priv(vcpu)->gpr_usr[1] = src->gpr[9];
		arm_priv(vcpu)->gpr_usr[2] = src->gpr[10];
		arm_priv(vcpu)->gpr_usr[3] = src->gpr[11];
		arm_priv(vcpu)->gpr_usr[4] = src->gpr[12];
		arm_priv(vcpu)->sp_mon = src->sp;
		arm_priv(vcpu)->lr_mon = src->lr;
		break;
	case CPSR_MODE_SUPERVISOR:
		arm_priv(vcpu)->gpr_usr[0] = src->gpr[8];
		arm_priv(vcpu)->gpr_usr[1] = src->gpr[9];
		arm_priv(vcpu)->gpr_usr[2] = src->gpr[10];
		arm_priv(vcpu)->gpr_usr[3] = src->gpr[11];
		arm_priv(vcpu)->gpr_usr[4] = src->gpr[12];
		arm_priv(vcpu)->sp_svc = src->sp;
		arm_priv(vcpu)->lr_svc = src->lr;
		break;
	case CPSR_MODE_IRQ:
		arm_priv(vcpu)->gpr_usr[0] = src->gpr[8];
		arm_priv(vcpu)->gpr_usr[1] = src->gpr[9];
		arm_priv(vcpu)->gpr_usr[2] = src->gpr[10];
		arm_priv(vcpu)->gpr_usr[3] = src->gpr[11];
		arm_priv(vcpu)->gpr_usr[4] = src->gpr[12];
		arm_priv(vcpu)->sp_irq = src->sp;
		arm_priv(vcpu)->lr_irq = src->lr;
		break;
	case CPSR_MODE_FIQ:
		arm_priv(vcpu)->gpr_fiq[0] = src->gpr[8];
		arm_priv(vcpu)->gpr_fiq[1] = src->gpr[9];
		arm_priv(vcpu)->gpr_fiq[2] = src->gpr[10];
		arm_priv(vcpu)->gpr_fiq[3] = src->gpr[11];
		arm_priv(vcpu)->gpr_fiq[4] = src->gpr[12];
		arm_priv(vcpu)->sp_fiq = src->sp;
		arm_priv(vcpu)->lr_fiq = src->lr;
		break;
	default:
		break;
	};
}
void cpu_vcpu_regmode_write(struct vmm_vcpu * vcpu, 
			    arch_regs_t * regs, 
			    u32 mode,
			    u32 reg_num,
			    u32 reg_val)
{
	u32 curmode = arm_priv(vcpu)->cpsr & CPSR_MODE_MASK;
	if (mode == curmode) {
		cpu_vcpu_reg_write(vcpu, regs, reg_num, reg_val);
	} else {
		switch (reg_num) {
		case 0:
		case 1:
		case 2:
		case 3:
		case 4:
		case 5:
		case 6:
		case 7:
			regs->gpr[reg_num] = reg_val;
			break;
		case 8:
		case 9:
		case 10:
		case 11:
		case 12:
			if (curmode == CPSR_MODE_FIQ) {
				arm_priv(vcpu)->gpr_usr[reg_num - 8] = reg_val;
			} else {
				if (mode == CPSR_MODE_FIQ) {
					arm_priv(vcpu)->gpr_fiq[reg_num - 8] = 
								reg_val;
				} else {
					regs->gpr[reg_num] = reg_val;
				}
			}
			break;
		case 13:
			switch (mode) {
			case CPSR_MODE_USER:
			case CPSR_MODE_SYSTEM:
				arm_priv(vcpu)->sp_usr = reg_val;
				break;
			case CPSR_MODE_FIQ:
				arm_priv(vcpu)->sp_fiq = reg_val;
				break;
			case CPSR_MODE_IRQ:
				arm_priv(vcpu)->sp_irq = reg_val;
				break;
			case CPSR_MODE_SUPERVISOR:
				arm_priv(vcpu)->sp_svc = reg_val;
				break;
			case CPSR_MODE_ABORT:
				arm_priv(vcpu)->sp_abt = reg_val;
				break;
			case CPSR_MODE_UNDEFINED:
				arm_priv(vcpu)->sp_und = reg_val;
				break;
			case CPSR_MODE_MONITOR:
				arm_priv(vcpu)->sp_mon = reg_val;
				break;
			default:
				break;
			};
			break;
		case 14:
			switch (mode) {
			case CPSR_MODE_USER:
			case CPSR_MODE_SYSTEM:
				arm_priv(vcpu)->lr_usr = reg_val;
				break;
			case CPSR_MODE_FIQ:
				arm_priv(vcpu)->lr_fiq = reg_val;
				break;
			case CPSR_MODE_IRQ:
				arm_priv(vcpu)->lr_irq = reg_val;
				break;
			case CPSR_MODE_SUPERVISOR:
				arm_priv(vcpu)->lr_svc = reg_val;
				break;
			case CPSR_MODE_ABORT:
				arm_priv(vcpu)->lr_abt = reg_val;
				break;
			case CPSR_MODE_UNDEFINED:
				arm_priv(vcpu)->lr_und = reg_val;
				break;
			case CPSR_MODE_MONITOR:
				arm_priv(vcpu)->lr_mon = reg_val;
				break;
			default:
				break;
			};
			break;
		case 15:
			regs->pc = reg_val;
			break;
		default:
			break;
		};
	}
}
u32 cpu_vcpu_regmode_read(struct vmm_vcpu * vcpu, 
			  arch_regs_t * regs, 
			  u32 mode,
			  u32 reg_num)
{
	u32 curmode = arm_priv(vcpu)->cpsr & CPSR_MODE_MASK;
	if (mode == curmode) {
		return cpu_vcpu_reg_read(vcpu, regs, reg_num);
	} else {
		switch (reg_num) {
		case 0:
		case 1:
		case 2:
		case 3:
		case 4:
		case 5:
		case 6:
		case 7:
			return regs->gpr[reg_num];
			break;
		case 8:
		case 9:
		case 10:
		case 11:
		case 12:
			if (curmode == CPSR_MODE_FIQ) {
				return arm_priv(vcpu)->gpr_usr[reg_num - 8];
			} else {
				if (mode == CPSR_MODE_FIQ) {
					return arm_priv(vcpu)->
							gpr_fiq[reg_num - 8];
				} else {
					return regs->gpr[reg_num];
				}
			}
			break;
		case 13:
			switch (mode) {
			case CPSR_MODE_USER:
			case CPSR_MODE_SYSTEM:
				return arm_priv(vcpu)->sp_usr;
				break;
			case CPSR_MODE_FIQ:
				return arm_priv(vcpu)->sp_fiq;
				break;
			case CPSR_MODE_IRQ:
				return arm_priv(vcpu)->sp_irq;
				break;
			case CPSR_MODE_SUPERVISOR:
				return arm_priv(vcpu)->sp_svc;
				break;
			case CPSR_MODE_ABORT:
				return arm_priv(vcpu)->sp_abt;
				break;
			case CPSR_MODE_UNDEFINED:
				return arm_priv(vcpu)->sp_und;
				break;
			case CPSR_MODE_MONITOR:
				return arm_priv(vcpu)->sp_mon;
				break;
			default:
				break;
			};
			break;
		case 14:
			switch (mode) {
			case CPSR_MODE_USER:
			case CPSR_MODE_SYSTEM:
				return arm_priv(vcpu)->lr_usr;
				break;
			case CPSR_MODE_FIQ:
				return arm_priv(vcpu)->lr_fiq;
				break;
			case CPSR_MODE_IRQ:
				return arm_priv(vcpu)->lr_irq;
				break;
			case CPSR_MODE_SUPERVISOR:
				return arm_priv(vcpu)->lr_svc;
				break;
			case CPSR_MODE_ABORT:
				return arm_priv(vcpu)->lr_abt;
				break;
			case CPSR_MODE_UNDEFINED:
				return arm_priv(vcpu)->lr_und;
				break;
			case CPSR_MODE_MONITOR:
				return arm_priv(vcpu)->lr_mon;
				break;
			default:
				break;
			};
			break;
		case 15:
			return regs->pc;
			break;
		default:
			break;
		};
	}
	return 0x0;
}
Beispiel #27
0
void do_prefetch_abort(arch_regs_t * uregs)
{
	int rc = VMM_EFAIL;
	bool crash_dump = FALSE;
	u32 ifsr, ifar, fs;
	struct vmm_vcpu * vcpu;

	ifsr = read_ifsr();
	ifar = read_ifar();

	fs = (ifsr & IFSR_FS_MASK);
#if !defined(CONFIG_ARMV5)
	fs |= (ifsr & IFSR_FS4_MASK) >> (IFSR_FS4_SHIFT - 4);
#endif

	if ((uregs->cpsr & CPSR_MODE_MASK) != CPSR_MODE_USER) {
		struct cpu_l1tbl * l1;
		struct cpu_page pg;
		if (fs != IFSR_FS_TRANS_FAULT_SECTION &&
		    fs != IFSR_FS_TRANS_FAULT_PAGE) {
			vmm_panic("%s: unexpected prefetch abort\n"
				  "%s: pc = 0x%08x, ifsr = 0x%08x, ifar = 0x%08x\n", 
				  __func__, __func__, uregs->pc, ifsr, ifar);
		}
		rc = cpu_mmu_get_reserved_page((virtual_addr_t)ifar, &pg);
		if (rc) {
			vmm_panic("%s: cannot find reserved page\n"
				  "%s: ifsr = 0x%08x, ifar = 0x%08x\n", 
				  __func__, __func__, ifsr, ifar);
		}
		l1 = cpu_mmu_l1tbl_current();
		if (!l1) {
			vmm_panic("%s: cannot find l1 table\n"
				  "%s: ifsr = 0x%08x, ifar = 0x%08x\n",
				  __func__, __func__, ifsr, ifar);
		}
		rc = cpu_mmu_map_page(l1, &pg);
		if (rc) {
			vmm_panic("%s: cannot map page in l1 table\n"
				  "%s: ifsr = 0x%08x, ifar = 0x%08x\n",
				  __func__, __func__, ifsr, ifar);
		}
		return;
	}

	vcpu = vmm_scheduler_current_vcpu();

	if ((uregs->pc & ~(TTBL_L2TBL_SMALL_PAGE_SIZE - 1)) == 
	    arm_priv(vcpu)->cp15.ovect_base) {
		uregs->pc = (virtual_addr_t)arm_guest_priv(vcpu->guest)->ovect 
			    + (uregs->pc & (TTBL_L2TBL_SMALL_PAGE_SIZE - 1));
		return;
	}

	vmm_scheduler_irq_enter(uregs, TRUE);

	switch(fs) {
	case IFSR_FS_TTBL_WALK_SYNC_EXT_ABORT_1:
	case IFSR_FS_TTBL_WALK_SYNC_EXT_ABORT_2:
		break;
	case IFSR_FS_TTBL_WALK_SYNC_PARITY_ERROR_1:
	case IFSR_FS_TTBL_WALK_SYNC_PARITY_ERROR_2:
		break;
	case IFSR_FS_TRANS_FAULT_SECTION:
	case IFSR_FS_TRANS_FAULT_PAGE:
		rc = cpu_vcpu_cp15_trans_fault(vcpu, uregs, 
						ifar, fs, 0, 0, 0, FALSE);
		crash_dump = TRUE;
		break;
	case IFSR_FS_ACCESS_FAULT_SECTION:
	case IFSR_FS_ACCESS_FAULT_PAGE:
		rc = cpu_vcpu_cp15_access_fault(vcpu, uregs, 
						ifar, fs, 0, 0, 0);
		crash_dump = TRUE;
		break;
	case IFSR_FS_DOMAIN_FAULT_SECTION:
	case IFSR_FS_DOMAIN_FAULT_PAGE:
		rc = cpu_vcpu_cp15_domain_fault(vcpu, uregs, 
						ifar, fs, 0, 0, 0);
		crash_dump = TRUE;
		break;
	case IFSR_FS_PERM_FAULT_SECTION:
	case IFSR_FS_PERM_FAULT_PAGE:
		rc = cpu_vcpu_cp15_perm_fault(vcpu, uregs, 
						ifar, fs, 0, 0, 0);
		crash_dump = TRUE;
		break;
	case IFSR_FS_DEBUG_EVENT:
	case IFSR_FS_SYNC_EXT_ABORT:
	case IFSR_FS_IMP_VALID_LOCKDOWN:
	case IFSR_FS_IMP_VALID_COPROC_ABORT:
	case IFSR_FS_MEM_ACCESS_SYNC_PARITY_ERROR:
		break;
	default:
		break; 
	};

	if (rc && crash_dump) {
		vmm_printf("\n");
		vmm_printf("%s: error %d\n", __func__, rc);
		vmm_printf("%s: vcpu_id = %d, ifar = 0x%x, ifsr = 0x%x\n", 
				__func__, vcpu->id, ifar, ifsr);
		cpu_vcpu_dump_user_reg(vcpu, uregs);
	}

	vmm_scheduler_irq_exit(uregs);
}
int cpu_vcpu_spsr_update(struct vmm_vcpu * vcpu, 
			 u32 new_spsr,
			 u32 new_spsr_mask)
{
	/* Sanity check */
	if (!vcpu && !vcpu->is_normal) {
		return VMM_EFAIL;
	}
	if ((arm_priv(vcpu)->cpsr & CPSR_MODE_MASK) == CPSR_MODE_USER) {
		return VMM_EFAIL;
	}
	new_spsr &= new_spsr_mask;
	/* Update appropriate SPSR */
	switch (arm_priv(vcpu)->cpsr & CPSR_MODE_MASK) {
	case CPSR_MODE_ABORT:
		arm_priv(vcpu)->spsr_abt &= ~new_spsr_mask;
		arm_priv(vcpu)->spsr_abt |= new_spsr;
		break;
	case CPSR_MODE_UNDEFINED:
		arm_priv(vcpu)->spsr_und &= ~new_spsr_mask;
		arm_priv(vcpu)->spsr_und |= new_spsr;
		break;
	case CPSR_MODE_MONITOR:
		arm_priv(vcpu)->spsr_mon &= ~new_spsr_mask;
		arm_priv(vcpu)->spsr_mon |= new_spsr;
		break;
	case CPSR_MODE_SUPERVISOR:
		arm_priv(vcpu)->spsr_svc &= ~new_spsr_mask;
		arm_priv(vcpu)->spsr_svc |= new_spsr;
		break;
	case CPSR_MODE_IRQ:
		arm_priv(vcpu)->spsr_irq &= ~new_spsr_mask;
		arm_priv(vcpu)->spsr_irq |= new_spsr;
		break;
	case CPSR_MODE_FIQ:
		arm_priv(vcpu)->spsr_fiq &= ~new_spsr_mask;
		arm_priv(vcpu)->spsr_fiq |= new_spsr;
		break;
	default:
		break;
	};
	/* Return success */
	return VMM_OK;
}
Beispiel #29
0
void do_data_abort(arch_regs_t * uregs)
{
	int rc = VMM_EFAIL; 
	bool crash_dump = FALSE;
	u32 dfsr, dfar, fs, dom, wnr;
	struct vmm_vcpu * vcpu;
	struct cpu_l1tbl * l1;
	struct cpu_page pg;

	dfsr = read_dfsr();
	dfar = read_dfar();

	fs = (dfsr & DFSR_FS_MASK);
#if !defined(CONFIG_ARMV5)
	fs |= (dfsr & DFSR_FS4_MASK) >> (DFSR_FS4_SHIFT - 4);
#endif
	wnr = (dfsr & DFSR_WNR_MASK) >> DFSR_WNR_SHIFT;
	dom = (dfsr & DFSR_DOM_MASK) >> DFSR_DOM_SHIFT;

	if ((uregs->cpsr & CPSR_MODE_MASK) != CPSR_MODE_USER) {
		if (fs != DFSR_FS_TRANS_FAULT_SECTION &&
		    fs != DFSR_FS_TRANS_FAULT_PAGE) {
			vmm_panic("%s: unexpected data abort\n"
				  "%s: pc = 0x%08x, dfsr = 0x%08x, dfar = 0x%08x\n", 
				  __func__, __func__, uregs->pc, dfsr, dfar);
		}
		rc = cpu_mmu_get_reserved_page(dfar, &pg);
		if (rc) {
			/* If we were in normal context then just handle
			 * trans fault for current normal VCPU and exit
			 * else there is nothing we can do so panic.
			 */
			if (vmm_scheduler_normal_context()) {
				vcpu = vmm_scheduler_current_vcpu();
				cpu_vcpu_cp15_trans_fault(vcpu, uregs, 
						dfar, fs, dom, wnr, 1, FALSE);
				return;
			}
			vmm_panic("%s: cannot find reserved page\n"
				  "%s: dfsr = 0x%08x, dfar = 0x%08x\n", 
				  __func__, __func__, dfsr, dfar);
		}
		l1 = cpu_mmu_l1tbl_current();
		if (!l1) {
			vmm_panic("%s: cannot find l1 table\n"
				  "%s: dfsr = 0x%08x, dfar = 0x%08x\n",
				  __func__, __func__, dfsr, dfar);
		}
		rc = cpu_mmu_map_page(l1, &pg);
		if (rc) {
			vmm_panic("%s: cannot map page in l1 table\n"
				  "%s: dfsr = 0x%08x, dfar = 0x%08x\n",
				  __func__, __func__, dfsr, dfar);
		}
		return;
	}

	vcpu = vmm_scheduler_current_vcpu();

	vmm_scheduler_irq_enter(uregs, TRUE);

	switch(fs) {
	case DFSR_FS_ALIGN_FAULT:
		break;
	case DFSR_FS_ICACHE_MAINT_FAULT:
		break;
	case DFSR_FS_TTBL_WALK_SYNC_EXT_ABORT_1:
	case DFSR_FS_TTBL_WALK_SYNC_EXT_ABORT_2:
		break;
	case DFSR_FS_TTBL_WALK_SYNC_PARITY_ERROR_1:
	case DFSR_FS_TTBL_WALK_SYNC_PARITY_ERROR_2:
		break;
	case DFSR_FS_TRANS_FAULT_SECTION:
	case DFSR_FS_TRANS_FAULT_PAGE:
		rc = cpu_vcpu_cp15_trans_fault(vcpu, uregs, 
						dfar, fs, dom, wnr, 1, FALSE);
		crash_dump = TRUE;
		break;
	case DFSR_FS_ACCESS_FAULT_SECTION:
	case DFSR_FS_ACCESS_FAULT_PAGE:
		rc = cpu_vcpu_cp15_access_fault(vcpu, uregs, 
						dfar, fs, dom, wnr, 1);
		crash_dump = TRUE;
		break;
	case DFSR_FS_DOMAIN_FAULT_SECTION:
	case DFSR_FS_DOMAIN_FAULT_PAGE:
		rc = cpu_vcpu_cp15_domain_fault(vcpu, uregs, 
						dfar, fs, dom, wnr, 1);
		crash_dump = TRUE;
		break;
	case DFSR_FS_PERM_FAULT_SECTION:
	case DFSR_FS_PERM_FAULT_PAGE:
		rc = cpu_vcpu_cp15_perm_fault(vcpu, uregs, 
						dfar, fs, dom, wnr, 1);
		if ((dfar & ~(TTBL_L2TBL_SMALL_PAGE_SIZE - 1)) != 
						arm_priv(vcpu)->cp15.ovect_base) {
			crash_dump = FALSE;
		}
		break;
	case DFSR_FS_DEBUG_EVENT:
	case DFSR_FS_SYNC_EXT_ABORT:
	case DFSR_FS_IMP_VALID_LOCKDOWN:
	case DFSR_FS_IMP_VALID_COPROC_ABORT:
	case DFSR_FS_MEM_ACCESS_SYNC_PARITY_ERROR:
	case DFSR_FS_ASYNC_EXT_ABORT:
	case DFSR_FS_MEM_ACCESS_ASYNC_PARITY_ERROR:
		break;
	default:
		break;
	};

	if (rc && crash_dump) {
		vmm_printf("\n");
		vmm_printf("%s: error %d\n", __func__, rc);
		vmm_printf("%s: vcpu_id = %d, dfar = 0x%x, dfsr = 0x%x\n", 
				__func__, vcpu->id, dfar, dfsr);
		cpu_vcpu_dump_user_reg(vcpu, uregs);
	}

	vmm_scheduler_irq_exit(uregs);
}
void cpu_vcpu_banked_regs_restore(struct vmm_vcpu * vcpu, arch_regs_t * dst)
{
	if (!vcpu || !vcpu->is_normal || !dst) {
		return;
	}
	switch (arm_priv(vcpu)->cpsr & CPSR_MODE_MASK) {
	case CPSR_MODE_USER:
		dst->gpr[8] = arm_priv(vcpu)->gpr_usr[0];
		dst->gpr[9] = arm_priv(vcpu)->gpr_usr[1];
		dst->gpr[10] = arm_priv(vcpu)->gpr_usr[2];
		dst->gpr[11] = arm_priv(vcpu)->gpr_usr[3];
		dst->gpr[12] = arm_priv(vcpu)->gpr_usr[4];
		dst->sp = arm_priv(vcpu)->sp_usr;
		dst->lr = arm_priv(vcpu)->lr_usr;
		break;
	case CPSR_MODE_SYSTEM:
		dst->gpr[8] = arm_priv(vcpu)->gpr_usr[0];
		dst->gpr[9] = arm_priv(vcpu)->gpr_usr[1];
		dst->gpr[10] = arm_priv(vcpu)->gpr_usr[2];
		dst->gpr[11] = arm_priv(vcpu)->gpr_usr[3];
		dst->gpr[12] = arm_priv(vcpu)->gpr_usr[4];
		dst->sp = arm_priv(vcpu)->sp_usr;
		dst->lr = arm_priv(vcpu)->lr_usr;
		break;
	case CPSR_MODE_ABORT:
		dst->gpr[8] = arm_priv(vcpu)->gpr_usr[0];
		dst->gpr[9] = arm_priv(vcpu)->gpr_usr[1];
		dst->gpr[10] = arm_priv(vcpu)->gpr_usr[2];
		dst->gpr[11] = arm_priv(vcpu)->gpr_usr[3];
		dst->gpr[12] = arm_priv(vcpu)->gpr_usr[4];
		dst->sp = arm_priv(vcpu)->sp_abt;
		dst->lr = arm_priv(vcpu)->lr_abt;
		break;
	case CPSR_MODE_UNDEFINED:
		dst->gpr[8] = arm_priv(vcpu)->gpr_usr[0];
		dst->gpr[9] = arm_priv(vcpu)->gpr_usr[1];
		dst->gpr[10] = arm_priv(vcpu)->gpr_usr[2];
		dst->gpr[11] = arm_priv(vcpu)->gpr_usr[3];
		dst->gpr[12] = arm_priv(vcpu)->gpr_usr[4];
		dst->sp = arm_priv(vcpu)->sp_und;
		dst->lr = arm_priv(vcpu)->lr_und;
		break;
	case CPSR_MODE_MONITOR:
		dst->gpr[8] = arm_priv(vcpu)->gpr_usr[0];
		dst->gpr[9] = arm_priv(vcpu)->gpr_usr[1];
		dst->gpr[10] = arm_priv(vcpu)->gpr_usr[2];
		dst->gpr[11] = arm_priv(vcpu)->gpr_usr[3];
		dst->gpr[12] = arm_priv(vcpu)->gpr_usr[4];
		dst->sp = arm_priv(vcpu)->sp_mon;
		dst->lr = arm_priv(vcpu)->lr_mon;
		break;
	case CPSR_MODE_SUPERVISOR:
		dst->gpr[8] = arm_priv(vcpu)->gpr_usr[0];
		dst->gpr[9] = arm_priv(vcpu)->gpr_usr[1];
		dst->gpr[10] = arm_priv(vcpu)->gpr_usr[2];
		dst->gpr[11] = arm_priv(vcpu)->gpr_usr[3];
		dst->gpr[12] = arm_priv(vcpu)->gpr_usr[4];
		dst->sp = arm_priv(vcpu)->sp_svc;
		dst->lr = arm_priv(vcpu)->lr_svc;
		break;
	case CPSR_MODE_IRQ:
		dst->gpr[8] = arm_priv(vcpu)->gpr_usr[0];
		dst->gpr[9] = arm_priv(vcpu)->gpr_usr[1];
		dst->gpr[10] = arm_priv(vcpu)->gpr_usr[2];
		dst->gpr[11] = arm_priv(vcpu)->gpr_usr[3];
		dst->gpr[12] = arm_priv(vcpu)->gpr_usr[4];
		dst->sp = arm_priv(vcpu)->sp_irq;
		dst->lr = arm_priv(vcpu)->lr_irq;
		break;
	case CPSR_MODE_FIQ:
		dst->gpr[8] = arm_priv(vcpu)->gpr_fiq[0];
		dst->gpr[9] = arm_priv(vcpu)->gpr_fiq[1];
		dst->gpr[10] = arm_priv(vcpu)->gpr_fiq[2];
		dst->gpr[11] = arm_priv(vcpu)->gpr_fiq[3];
		dst->gpr[12] = arm_priv(vcpu)->gpr_fiq[4];
		dst->sp = arm_priv(vcpu)->sp_fiq;
		dst->lr = arm_priv(vcpu)->lr_fiq;
		break;
	default:
		break;
	};
}