示例#1
0
int arch_vcpu_deinit(struct vmm_vcpu *vcpu)
{
	int rc = VMM_OK;
	u32 saved_cptr_el2, saved_hstr_el2;

	/* For both Orphan & Normal VCPUs */
	memset(arm_regs(vcpu), 0, sizeof(arch_regs_t));

	/* For Orphan VCPUs do nothing else */
	if (!vcpu->is_normal) {
		return VMM_OK;
	}

	/* Save CPTR_EL2 and HSTR_EL2 */
	saved_cptr_el2 = mrs(cptr_el2);
	saved_hstr_el2 = mrs(hstr_el2);

	/* We force disable coprocessor and system traps to be
	 * consistent with arch_vcpu_init() function.
	 */
	msr(cptr_el2, 0x0);
	msr(hstr_el2, 0x0);

	/* Free Generic Timer Context */
	if (arm_feature(vcpu, ARM_FEATURE_GENERIC_TIMER)) {
		if ((rc = generic_timer_vcpu_context_deinit(vcpu,
					&arm_gentimer_context(vcpu)))) {
			goto done;
		}
	}

	/* Free VFP context */
	rc = cpu_vcpu_vfp_deinit(vcpu);
	if (rc) {
		goto done;
	}

	/* Free sysregs context */
	rc = cpu_vcpu_sysregs_deinit(vcpu);
	if (rc) {
		goto done;
	}

	/* Free private context */
	vmm_free(vcpu->arch_priv);
	vcpu->arch_priv = NULL;

	rc = VMM_OK;

done:
	msr(cptr_el2, saved_cptr_el2);
	msr(hstr_el2, saved_hstr_el2);
	return VMM_OK;
}
int arch_vcpu_deinit(struct vmm_vcpu *vcpu)
{
	int rc;

	/* For both Orphan & Normal VCPUs */
	memset(arm_regs(vcpu), 0, sizeof(arch_regs_t));

	/* For Orphan VCPUs do nothing else */
	if (!vcpu->is_normal) {
		return VMM_OK;
	}

	/* Free Generic Timer Context */
	if (arm_feature(vcpu, ARM_FEATURE_GENERIC_TIMER)) {
		if ((rc = generic_timer_vcpu_context_deinit(vcpu,
					&arm_gentimer_context(vcpu)))) {
			return rc;
		}
	}

	/* Free VFP context */
	rc = cpu_vcpu_vfp_deinit(vcpu);
	if (rc) {
		return rc;
	}

	/* Free sysregs context */
	rc = cpu_vcpu_sysregs_deinit(vcpu);
	if (rc) {
		return rc;
	}

	/* Free private context */
	vmm_free(vcpu->arch_priv);
	vcpu->arch_priv = NULL;

	return VMM_OK;
}
int arch_vcpu_init(struct vmm_vcpu *vcpu)
{
	int rc;
	u32 cpuid = 0;
	const char *attr;
	irq_flags_t flags;
	u32 phys_timer_irq, virt_timer_irq;

	/* For both Orphan & Normal VCPUs */
	memset(arm_regs(vcpu), 0, sizeof(arch_regs_t));
	arm_regs(vcpu)->pc = vcpu->start_pc;
	arm_regs(vcpu)->sp = vcpu->stack_va + vcpu->stack_sz - 8;
	if (!vcpu->is_normal) {
		arm_regs(vcpu)->pstate = PSR_MODE64_EL2h;
		arm_regs(vcpu)->pstate |= PSR_ASYNC_ABORT_DISABLED;
		return VMM_OK;
	}

	/* Following initialization for normal VCPUs only */
	rc = vmm_devtree_read_string(vcpu->node, 
			VMM_DEVTREE_COMPATIBLE_ATTR_NAME, &attr);
	if (rc) {
		goto fail;
	}
	if (strcmp(attr, "armv7a,cortex-a8") == 0) {
		cpuid = ARM_CPUID_CORTEXA8;
		arm_regs(vcpu)->pstate = PSR_MODE32;
	} else if (strcmp(attr, "armv7a,cortex-a9") == 0) {
		cpuid = ARM_CPUID_CORTEXA9;
		arm_regs(vcpu)->pstate = PSR_MODE32;
	} else if (strcmp(attr, "armv7a,cortex-a15") == 0) {
		cpuid = ARM_CPUID_CORTEXA15;
		arm_regs(vcpu)->pstate = PSR_MODE32;
	} else if (strcmp(attr, "armv7a,cortex-a7") == 0) {
		cpuid = ARM_CPUID_CORTEXA7;
		arm_regs(vcpu)->pstate = PSR_MODE32;
	} else if (strcmp(attr, "armv8,generic") == 0) {
		cpuid = ARM_CPUID_ARMV8;
	} else {
		rc = VMM_EINVALID;
		goto fail;
	}
	if (arm_regs(vcpu)->pstate == PSR_MODE32) {
		/* Check if the host supports A32 mode @ EL1 */
		if (!cpu_supports_el1_a32()) {
			vmm_printf("Host does not support AArch32 mode\n");
			rc = VMM_ENOTAVAIL;
			goto fail;
		}
		arm_regs(vcpu)->pstate |= PSR_ZERO_MASK;
		arm_regs(vcpu)->pstate |= PSR_MODE32_SUPERVISOR;
	} else {
		arm_regs(vcpu)->pstate |= PSR_MODE64_DEBUG_DISABLED;
		arm_regs(vcpu)->pstate |= PSR_MODE64_EL1h;
	}
	arm_regs(vcpu)->pstate |= PSR_ASYNC_ABORT_DISABLED;
	arm_regs(vcpu)->pstate |= PSR_IRQ_DISABLED;
	arm_regs(vcpu)->pstate |= PSR_FIQ_DISABLED;

	/* First time initialization of private context */
	if (!vcpu->reset_count) {
		/* Alloc private context */
		vcpu->arch_priv = vmm_zalloc(sizeof(struct arm_priv));
		if (!vcpu->arch_priv) {
			rc = VMM_ENOMEM;
			goto fail;
		}
		/* Setup CPUID value expected by VCPU in MIDR register
		 * as-per HW specifications.
		 */
		arm_priv(vcpu)->cpuid = cpuid;
		/* Initialize VCPU features */
		arm_priv(vcpu)->features = 0;
		switch (cpuid) {
		case ARM_CPUID_CORTEXA8:
			arm_set_feature(vcpu, ARM_FEATURE_V7);
			arm_set_feature(vcpu, ARM_FEATURE_VFP3);
			arm_set_feature(vcpu, ARM_FEATURE_NEON);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2EE);
			arm_set_feature(vcpu, ARM_FEATURE_DUMMY_C15_REGS);
			arm_set_feature(vcpu, ARM_FEATURE_TRUSTZONE);
			break;
		case ARM_CPUID_CORTEXA9:
			arm_set_feature(vcpu, ARM_FEATURE_V7);
			arm_set_feature(vcpu, ARM_FEATURE_VFP3);
			arm_set_feature(vcpu, ARM_FEATURE_VFP_FP16);
			arm_set_feature(vcpu, ARM_FEATURE_NEON);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2EE);
			arm_set_feature(vcpu, ARM_FEATURE_V7MP);
			arm_set_feature(vcpu, ARM_FEATURE_TRUSTZONE);
			break;
		case ARM_CPUID_CORTEXA7:
			arm_set_feature(vcpu, ARM_FEATURE_V7);
			arm_set_feature(vcpu, ARM_FEATURE_VFP4);
			arm_set_feature(vcpu, ARM_FEATURE_VFP_FP16);
			arm_set_feature(vcpu, ARM_FEATURE_NEON);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2EE);
			arm_set_feature(vcpu, ARM_FEATURE_ARM_DIV);
			arm_set_feature(vcpu, ARM_FEATURE_V7MP);
			arm_set_feature(vcpu, ARM_FEATURE_GENERIC_TIMER);
			arm_set_feature(vcpu, ARM_FEATURE_DUMMY_C15_REGS);
			arm_set_feature(vcpu, ARM_FEATURE_LPAE);
			arm_set_feature(vcpu, ARM_FEATURE_TRUSTZONE);
			break;
		case ARM_CPUID_CORTEXA15:
			arm_set_feature(vcpu, ARM_FEATURE_V7);
			arm_set_feature(vcpu, ARM_FEATURE_VFP4);
			arm_set_feature(vcpu, ARM_FEATURE_VFP_FP16);
			arm_set_feature(vcpu, ARM_FEATURE_NEON);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2EE);
			arm_set_feature(vcpu, ARM_FEATURE_ARM_DIV);
			arm_set_feature(vcpu, ARM_FEATURE_V7MP);
			arm_set_feature(vcpu, ARM_FEATURE_GENERIC_TIMER);
			arm_set_feature(vcpu, ARM_FEATURE_DUMMY_C15_REGS);
			arm_set_feature(vcpu, ARM_FEATURE_LPAE);
			arm_set_feature(vcpu, ARM_FEATURE_TRUSTZONE);
			break;
		case ARM_CPUID_ARMV8:
			arm_set_feature(vcpu, ARM_FEATURE_V8);
			arm_set_feature(vcpu, ARM_FEATURE_VFP4);
			arm_set_feature(vcpu, ARM_FEATURE_ARM_DIV);
			arm_set_feature(vcpu, ARM_FEATURE_LPAE);
			arm_set_feature(vcpu, ARM_FEATURE_GENERIC_TIMER);
			break;
		default:
			break;
		};
		/* Some features automatically imply others: */
		if (arm_feature(vcpu, ARM_FEATURE_V7)) {
			arm_set_feature(vcpu, ARM_FEATURE_VAPA);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2);
			arm_set_feature(vcpu, ARM_FEATURE_MPIDR);
			if (!arm_feature(vcpu, ARM_FEATURE_M)) {
				arm_set_feature(vcpu, ARM_FEATURE_V6K);
			} else {
				arm_set_feature(vcpu, ARM_FEATURE_V6);
			}
		}
		if (arm_feature(vcpu, ARM_FEATURE_V6K)) {
			arm_set_feature(vcpu, ARM_FEATURE_V6);
			arm_set_feature(vcpu, ARM_FEATURE_MVFR);
		}
		if (arm_feature(vcpu, ARM_FEATURE_V6)) {
			arm_set_feature(vcpu, ARM_FEATURE_V5);
			if (!arm_feature(vcpu, ARM_FEATURE_M)) {
				arm_set_feature(vcpu, ARM_FEATURE_AUXCR);
			}
		}
		if (arm_feature(vcpu, ARM_FEATURE_V5)) {
			arm_set_feature(vcpu, ARM_FEATURE_V4T);
		}
		if (arm_feature(vcpu, ARM_FEATURE_M)) {
			arm_set_feature(vcpu, ARM_FEATURE_THUMB_DIV);
		}
		if (arm_feature(vcpu, ARM_FEATURE_ARM_DIV)) {
			arm_set_feature(vcpu, ARM_FEATURE_THUMB_DIV);
		}
		if (arm_feature(vcpu, ARM_FEATURE_VFP4)) {
			arm_set_feature(vcpu, ARM_FEATURE_VFP3);
		}
		if (arm_feature(vcpu, ARM_FEATURE_VFP3)) {
			arm_set_feature(vcpu, ARM_FEATURE_VFP);
		}
		if (arm_feature(vcpu, ARM_FEATURE_LPAE)) {
			arm_set_feature(vcpu, ARM_FEATURE_PXN);
		}
		/* Initialize Hypervisor Configuration */
		INIT_SPIN_LOCK(&arm_priv(vcpu)->hcr_lock);
		arm_priv(vcpu)->hcr =  (HCR_TSW_MASK |
					HCR_TACR_MASK |
					HCR_TIDCP_MASK |
					HCR_TSC_MASK |
					HCR_TWE_MASK |
					HCR_TWI_MASK |
					HCR_AMO_MASK |
					HCR_IMO_MASK |
					HCR_FMO_MASK |
					HCR_SWIO_MASK |
					HCR_VM_MASK);
		if (!(arm_regs(vcpu)->pstate & PSR_MODE32)) {
			arm_priv(vcpu)->hcr |= HCR_RW_MASK;
		}
		/* Initialize Coprocessor Trap Register */
		arm_priv(vcpu)->cptr = CPTR_TTA_MASK;
		arm_priv(vcpu)->cptr |= CPTR_TFP_MASK;
		/* Initialize Hypervisor System Trap Register */
		arm_priv(vcpu)->hstr = 0;
		/* Cleanup VGIC context first time */
		arm_vgic_cleanup(vcpu);
	}

	/* Clear virtual exception bits in HCR */
	vmm_spin_lock_irqsave(&arm_priv(vcpu)->hcr_lock, flags);
	arm_priv(vcpu)->hcr &= ~(HCR_VSE_MASK | 
				 HCR_VI_MASK | 
				 HCR_VF_MASK);
	vmm_spin_unlock_irqrestore(&arm_priv(vcpu)->hcr_lock, flags);

	/* Set last host CPU to invalid value */
	arm_priv(vcpu)->last_hcpu = 0xFFFFFFFF;

	/* Initialize sysregs context */
	rc = cpu_vcpu_sysregs_init(vcpu, cpuid);
	if (rc) {
		goto fail_sysregs_init;
	}

	/* Initialize VFP context */
	rc = cpu_vcpu_vfp_init(vcpu);
	if (rc) {
		goto fail_vfp_init;
	}

	/* Initialize generic timer context */
	if (arm_feature(vcpu, ARM_FEATURE_GENERIC_TIMER)) {
		if (vmm_devtree_read_u32(vcpu->node, 
					 "gentimer_phys_irq",
					 &phys_timer_irq)) {
			phys_timer_irq = 0;
		}
		if (vmm_devtree_read_u32(vcpu->node, 
					 "gentimer_virt_irq",
					 &virt_timer_irq)) {
			virt_timer_irq = 0;
		}
		rc = generic_timer_vcpu_context_init(vcpu,
						&arm_gentimer_context(vcpu),
						phys_timer_irq,
						virt_timer_irq);
		if (rc) {
			goto fail_gentimer_init;
		}
	}

	return VMM_OK;

fail_gentimer_init:
	if (!vcpu->reset_count) {
		cpu_vcpu_vfp_deinit(vcpu);
	}
fail_vfp_init:
	if (!vcpu->reset_count) {
		cpu_vcpu_sysregs_deinit(vcpu);
	}
fail_sysregs_init:
	if (!vcpu->reset_count) {
		vmm_free(vcpu->arch_priv);
		vcpu->arch_priv = NULL;
	}
fail:
	return rc;
}