예제 #1
0
int arch_guest_init(struct vmm_guest *guest)
{
	if (!guest->reset_count) {
		guest->arch_priv = vmm_malloc(sizeof(struct arm_guest_priv));
		if (!guest->arch_priv) {
			return VMM_ENOMEM;
		}

		arm_guest_priv(guest)->ttbl = mmu_lpae_ttbl_alloc(TTBL_STAGE2);
		if (!arm_guest_priv(guest)->ttbl) {
			vmm_free(guest->arch_priv);
			guest->arch_priv = NULL;
			return VMM_ENOMEM;
		}

		if (vmm_devtree_read_u32(guest->node,
				"psci_version",
				&arm_guest_priv(guest)->psci_version)) {
			/* By default, assume PSCI v0.1 */
			arm_guest_priv(guest)->psci_version = 1;
		}
	}

	return VMM_OK;
}
예제 #2
0
int arch_guest_deinit(struct vmm_guest *guest)
{
	int rc;
	if (guest->arch_priv) {
		if (arm_guest_priv(guest)->ovect) {
			rc = vmm_host_free_pages(
			     (virtual_addr_t)arm_guest_priv(guest)->ovect, 1);
			if (rc) {
				return rc;
			}
		}
		vmm_free(guest->arch_priv);
	}
	return VMM_OK;
}
예제 #3
0
void arch_vcpu_regs_dump(struct vmm_chardev *cdev, struct vmm_vcpu *vcpu)
{
	struct arm_priv *p;

	/* For both Normal & Orphan VCPUs */
	__cpu_vcpu_dump_user_reg(cdev, arm_regs(vcpu));

	/* For only Normal VCPUs */
	if (!vcpu->is_normal) {
		return;
	}

	/* Get private context */
	p = arm_priv(vcpu);

	/* Hypervisor context */
	vmm_cprintf(cdev, "Hypervisor EL2 Registers\n");
	vmm_cprintf(cdev, " %11s=0x%016lx %11s=0x%016lx\n",
		    "HCR_EL2", p->hcr,
		    "CPTR_EL2", p->cptr);
	vmm_cprintf(cdev, " %11s=0x%016lx %11s=0x%016lx\n",
		    "HSTR_EL2", p->hstr,
		    "TTBR_EL2", arm_guest_priv(vcpu->guest)->ttbl->tbl_pa);

	/* Print VFP context */
	cpu_vcpu_vfp_dump(cdev, vcpu);

	/* Print sysregs context */
	cpu_vcpu_sysregs_dump(cdev, vcpu);
}
예제 #4
0
int cpu_vcpu_mem_read(struct vmm_vcpu *vcpu, 
			arch_regs_t *regs,
			virtual_addr_t addr, 
			void *dst, u32 dst_len, 
			bool force_unpriv)
{
	struct cpu_page pg;
	register int rc = VMM_OK;
	register u32 vind, ecode;
	register struct cpu_page *pgp = &pg;
	if ((addr & ~(TTBL_L2TBL_SMALL_PAGE_SIZE - 1)) ==
	    arm_priv(vcpu)->cp15.ovect_base) {
		if ((arm_priv(vcpu)->cpsr & CPSR_MODE_MASK) == CPSR_MODE_USER) {
			force_unpriv = TRUE;
		}
		if ((ecode = cpu_vcpu_cp15_find_page(vcpu, addr,
						     CP15_ACCESS_READ,
						     force_unpriv, &pg))) {
			cpu_vcpu_cp15_assert_fault(vcpu, regs, addr,
						   (ecode >> 4), (ecode & 0xF),
						   0, 1);
			return VMM_EFAIL;
		}
		vind = addr & (TTBL_L2TBL_SMALL_PAGE_SIZE - 1);
		switch (dst_len) {
		case 4:
			vind &= ~(0x4 - 1);
			vind /= 0x4;
			*((u32 *) dst) = arm_guest_priv(vcpu->guest)->ovect[vind];
			break;
		case 2:
			vind &= ~(0x2 - 1);
			vind /= 0x2;
			*((u16 *) dst) =
			    ((u16 *)arm_guest_priv(vcpu->guest)->ovect)[vind];
			break;
		case 1:
			*((u8 *) dst) =
			    ((u8 *)arm_guest_priv(vcpu->guest)->ovect)[vind];
			break;
		default:
			return VMM_EFAIL;
			break;
		};
	} else {
예제 #5
0
int arch_guest_deinit(struct vmm_guest *guest)
{
	int rc;

	if (guest->arch_priv) {
		if ((rc = mmu_lpae_ttbl_free(arm_guest_priv(guest)->ttbl))) {
			return rc;
		}
		vmm_free(guest->arch_priv);
	}

	return VMM_OK;
}
예제 #6
0
int arch_guest_init(struct vmm_guest *guest)
{
	int rc;
	u32 ovect_flags;
	virtual_addr_t ovect_va;
	struct cpu_page pg;

	if (!guest->reset_count) {
		guest->arch_priv = vmm_malloc(sizeof(arm_guest_priv_t));
		if (!guest->arch_priv) {
			return VMM_EFAIL;
		}
		ovect_flags = 0x0;
		ovect_flags |= VMM_MEMORY_READABLE;
		ovect_flags |= VMM_MEMORY_WRITEABLE;
		ovect_flags |= VMM_MEMORY_CACHEABLE;
		ovect_flags |= VMM_MEMORY_EXECUTABLE;
		ovect_va = vmm_host_alloc_pages(1, ovect_flags);
		if (!ovect_va) {
			return VMM_EFAIL;
		}
		if ((rc = cpu_mmu_get_reserved_page(ovect_va, &pg))) {
			return rc;
		}
		if ((rc = cpu_mmu_unmap_reserved_page(&pg))) {
			return rc;
		}
#if defined(CONFIG_ARMV5)
		pg.ap = TTBL_AP_SRW_UR;
#else
		if (pg.ap == TTBL_AP_SR_U) {
			pg.ap = TTBL_AP_SR_UR;
		} else {
			pg.ap = TTBL_AP_SRW_UR;
		}
#endif
		if ((rc = cpu_mmu_map_reserved_page(&pg))) {
			return rc;
		}
		arm_guest_priv(guest)->ovect = (u32 *)ovect_va;
	}

	return VMM_OK;
}
예제 #7
0
void do_prefetch_abort(arch_regs_t * uregs)
{
	int rc = VMM_EFAIL;
	bool crash_dump = FALSE;
	u32 ifsr, ifar, fs;
	struct vmm_vcpu * vcpu;

	ifsr = read_ifsr();
	ifar = read_ifar();

	fs = (ifsr & IFSR_FS_MASK);
#if !defined(CONFIG_ARMV5)
	fs |= (ifsr & IFSR_FS4_MASK) >> (IFSR_FS4_SHIFT - 4);
#endif

	if ((uregs->cpsr & CPSR_MODE_MASK) != CPSR_MODE_USER) {
		struct cpu_l1tbl * l1;
		struct cpu_page pg;
		if (fs != IFSR_FS_TRANS_FAULT_SECTION &&
		    fs != IFSR_FS_TRANS_FAULT_PAGE) {
			vmm_panic("%s: unexpected prefetch abort\n"
				  "%s: pc = 0x%08x, ifsr = 0x%08x, ifar = 0x%08x\n", 
				  __func__, __func__, uregs->pc, ifsr, ifar);
		}
		rc = cpu_mmu_get_reserved_page((virtual_addr_t)ifar, &pg);
		if (rc) {
			vmm_panic("%s: cannot find reserved page\n"
				  "%s: ifsr = 0x%08x, ifar = 0x%08x\n", 
				  __func__, __func__, ifsr, ifar);
		}
		l1 = cpu_mmu_l1tbl_current();
		if (!l1) {
			vmm_panic("%s: cannot find l1 table\n"
				  "%s: ifsr = 0x%08x, ifar = 0x%08x\n",
				  __func__, __func__, ifsr, ifar);
		}
		rc = cpu_mmu_map_page(l1, &pg);
		if (rc) {
			vmm_panic("%s: cannot map page in l1 table\n"
				  "%s: ifsr = 0x%08x, ifar = 0x%08x\n",
				  __func__, __func__, ifsr, ifar);
		}
		return;
	}

	vcpu = vmm_scheduler_current_vcpu();

	if ((uregs->pc & ~(TTBL_L2TBL_SMALL_PAGE_SIZE - 1)) == 
	    arm_priv(vcpu)->cp15.ovect_base) {
		uregs->pc = (virtual_addr_t)arm_guest_priv(vcpu->guest)->ovect 
			    + (uregs->pc & (TTBL_L2TBL_SMALL_PAGE_SIZE - 1));
		return;
	}

	vmm_scheduler_irq_enter(uregs, TRUE);

	switch(fs) {
	case IFSR_FS_TTBL_WALK_SYNC_EXT_ABORT_1:
	case IFSR_FS_TTBL_WALK_SYNC_EXT_ABORT_2:
		break;
	case IFSR_FS_TTBL_WALK_SYNC_PARITY_ERROR_1:
	case IFSR_FS_TTBL_WALK_SYNC_PARITY_ERROR_2:
		break;
	case IFSR_FS_TRANS_FAULT_SECTION:
	case IFSR_FS_TRANS_FAULT_PAGE:
		rc = cpu_vcpu_cp15_trans_fault(vcpu, uregs, 
						ifar, fs, 0, 0, 0, FALSE);
		crash_dump = TRUE;
		break;
	case IFSR_FS_ACCESS_FAULT_SECTION:
	case IFSR_FS_ACCESS_FAULT_PAGE:
		rc = cpu_vcpu_cp15_access_fault(vcpu, uregs, 
						ifar, fs, 0, 0, 0);
		crash_dump = TRUE;
		break;
	case IFSR_FS_DOMAIN_FAULT_SECTION:
	case IFSR_FS_DOMAIN_FAULT_PAGE:
		rc = cpu_vcpu_cp15_domain_fault(vcpu, uregs, 
						ifar, fs, 0, 0, 0);
		crash_dump = TRUE;
		break;
	case IFSR_FS_PERM_FAULT_SECTION:
	case IFSR_FS_PERM_FAULT_PAGE:
		rc = cpu_vcpu_cp15_perm_fault(vcpu, uregs, 
						ifar, fs, 0, 0, 0);
		crash_dump = TRUE;
		break;
	case IFSR_FS_DEBUG_EVENT:
	case IFSR_FS_SYNC_EXT_ABORT:
	case IFSR_FS_IMP_VALID_LOCKDOWN:
	case IFSR_FS_IMP_VALID_COPROC_ABORT:
	case IFSR_FS_MEM_ACCESS_SYNC_PARITY_ERROR:
		break;
	default:
		break; 
	};

	if (rc && crash_dump) {
		vmm_printf("\n");
		vmm_printf("%s: error %d\n", __func__, rc);
		vmm_printf("%s: vcpu_id = %d, ifar = 0x%x, ifsr = 0x%x\n", 
				__func__, vcpu->id, ifar, ifsr);
		cpu_vcpu_dump_user_reg(vcpu, uregs);
	}

	vmm_scheduler_irq_exit(uregs);
}
예제 #8
0
void arch_vcpu_switch(struct vmm_vcpu *tvcpu, 
		      struct vmm_vcpu *vcpu, 
		      arch_regs_t *regs)
{
	u32 ite;
	irq_flags_t flags;

	/* Save user registers & banked registers */
	if (tvcpu) {
		arm_regs(tvcpu)->pc = regs->pc;
		arm_regs(tvcpu)->lr = regs->lr;
		arm_regs(tvcpu)->sp = regs->sp;
		for (ite = 0; ite < CPU_GPR_COUNT; ite++) {
			arm_regs(tvcpu)->gpr[ite] = regs->gpr[ite];
		}
		arm_regs(tvcpu)->pstate = regs->pstate;
		if (tvcpu->is_normal) {
			/* Update last host CPU */
			arm_priv(tvcpu)->last_hcpu = vmm_smp_processor_id();
			/* Save VGIC context */
			arm_vgic_save(tvcpu);
			/* Save sysregs context */
			cpu_vcpu_sysregs_save(tvcpu);
			/* Save VFP and SIMD context */
			cpu_vcpu_vfp_save(tvcpu);
			/* Save generic timer */
			if (arm_feature(tvcpu, ARM_FEATURE_GENERIC_TIMER)) {
				generic_timer_vcpu_context_save(tvcpu,
						arm_gentimer_context(tvcpu));
			}
		}
	}
	/* Restore user registers & special registers */
	regs->pc = arm_regs(vcpu)->pc;
	regs->lr = arm_regs(vcpu)->lr;
	regs->sp = arm_regs(vcpu)->sp;
	for (ite = 0; ite < CPU_GPR_COUNT; ite++) {
		regs->gpr[ite] = arm_regs(vcpu)->gpr[ite];
	}
	regs->pstate = arm_regs(vcpu)->pstate;
	if (vcpu->is_normal) {
		/* Restore hypervisor context */
		vmm_spin_lock_irqsave(&arm_priv(vcpu)->hcr_lock, flags);
		msr(hcr_el2, arm_priv(vcpu)->hcr);
		vmm_spin_unlock_irqrestore(&arm_priv(vcpu)->hcr_lock, flags);
		msr(cptr_el2, arm_priv(vcpu)->cptr);
		msr(hstr_el2, arm_priv(vcpu)->hstr);
		/* Restore Stage2 MMU context */
		mmu_lpae_stage2_chttbl(vcpu->guest->id, 
			       arm_guest_priv(vcpu->guest)->ttbl);
		/* Restore generic timer */
		if (arm_feature(vcpu, ARM_FEATURE_GENERIC_TIMER)) {
			generic_timer_vcpu_context_restore(vcpu,
						arm_gentimer_context(vcpu));
		}
		/* Restore VFP and SIMD context */
		cpu_vcpu_vfp_restore(vcpu);
		/* Restore sysregs context */
		cpu_vcpu_sysregs_restore(vcpu);
		/* Restore VGIC context */
		arm_vgic_restore(vcpu);
		/* Flush TLB if moved to new host CPU */
		if (arm_priv(vcpu)->last_hcpu != vmm_smp_processor_id()) {
			/* Invalidate all guest TLB enteries because
			 * we might have stale guest TLB enteries from
			 * our previous run on new_hcpu host CPU 
			 */
			inv_tlb_guest_allis();
			/* Ensure changes are visible */
			dsb();
			isb();
		}
	}
	/* Clear exclusive monitor */
	clrex();
}