Ejemplo n.º 1
0
void arch_vcpu_regs_switch(struct vmm_vcpu * tvcpu,
			  struct vmm_vcpu * vcpu, arch_regs_t * regs)
{
	u32 ite;
	/* Save user registers & banked registers */
	if (tvcpu) {
		arm_regs(tvcpu)->pc = regs->pc;
		arm_regs(tvcpu)->lr = regs->lr;
		arm_regs(tvcpu)->sp = regs->sp;
		for (ite = 0; ite < CPU_GPR_COUNT; ite++) {
			arm_regs(tvcpu)->gpr[ite] = regs->gpr[ite];
		}
		arm_regs(tvcpu)->cpsr = regs->cpsr;
		if(tvcpu->is_normal) {
			cpu_vcpu_banked_regs_save(tvcpu, regs);
		}
	}
	/* Switch CP15 context */
	cpu_vcpu_cp15_switch_context(tvcpu, vcpu);
	/* Restore user registers & banked registers */
	regs->pc = arm_regs(vcpu)->pc;
	regs->lr = arm_regs(vcpu)->lr;
	regs->sp = arm_regs(vcpu)->sp;
	for (ite = 0; ite < CPU_GPR_COUNT; ite++) {
		regs->gpr[ite] = arm_regs(vcpu)->gpr[ite];
	}
	regs->cpsr = arm_regs(vcpu)->cpsr;
	if (vcpu->is_normal) {
		cpu_vcpu_banked_regs_restore(vcpu, regs);
	}
}
Ejemplo n.º 2
0
void arch_vcpu_regs_dump(struct vmm_chardev *cdev, struct vmm_vcpu *vcpu)
{
	struct arm_priv *p;

	/* For both Normal & Orphan VCPUs */
	__cpu_vcpu_dump_user_reg(cdev, arm_regs(vcpu));

	/* For only Normal VCPUs */
	if (!vcpu->is_normal) {
		return;
	}

	/* Get private context */
	p = arm_priv(vcpu);

	/* Hypervisor context */
	vmm_cprintf(cdev, "Hypervisor EL2 Registers\n");
	vmm_cprintf(cdev, " %11s=0x%016lx %11s=0x%016lx\n",
		    "HCR_EL2", p->hcr,
		    "CPTR_EL2", p->cptr);
	vmm_cprintf(cdev, " %11s=0x%016lx %11s=0x%016lx\n",
		    "HSTR_EL2", p->hstr,
		    "TTBR_EL2", arm_guest_priv(vcpu->guest)->ttbl->tbl_pa);

	/* Print VFP context */
	cpu_vcpu_vfp_dump(cdev, vcpu);

	/* Print sysregs context */
	cpu_vcpu_sysregs_dump(cdev, vcpu);
}
Ejemplo n.º 3
0
int arch_vcpu_deinit(struct vmm_vcpu *vcpu)
{
	int rc = VMM_OK;
	u32 saved_cptr_el2, saved_hstr_el2;

	/* For both Orphan & Normal VCPUs */
	memset(arm_regs(vcpu), 0, sizeof(arch_regs_t));

	/* For Orphan VCPUs do nothing else */
	if (!vcpu->is_normal) {
		return VMM_OK;
	}

	/* Save CPTR_EL2 and HSTR_EL2 */
	saved_cptr_el2 = mrs(cptr_el2);
	saved_hstr_el2 = mrs(hstr_el2);

	/* We force disable coprocessor and system traps to be
	 * consistent with arch_vcpu_init() function.
	 */
	msr(cptr_el2, 0x0);
	msr(hstr_el2, 0x0);

	/* Free Generic Timer Context */
	if (arm_feature(vcpu, ARM_FEATURE_GENERIC_TIMER)) {
		if ((rc = generic_timer_vcpu_context_deinit(vcpu,
					&arm_gentimer_context(vcpu)))) {
			goto done;
		}
	}

	/* Free VFP context */
	rc = cpu_vcpu_vfp_deinit(vcpu);
	if (rc) {
		goto done;
	}

	/* Free sysregs context */
	rc = cpu_vcpu_sysregs_deinit(vcpu);
	if (rc) {
		goto done;
	}

	/* Free private context */
	vmm_free(vcpu->arch_priv);
	vcpu->arch_priv = NULL;

	rc = VMM_OK;

done:
	msr(cptr_el2, saved_cptr_el2);
	msr(hstr_el2, saved_hstr_el2);
	return VMM_OK;
}
Ejemplo n.º 4
0
void arch_vcpu_regs_dump(struct vmm_chardev *cdev, struct vmm_vcpu *vcpu)
{
	u32 ite;

	/* For both Normal & Orphan VCPUs */
	__cpu_vcpu_dump_user_reg(cdev, vcpu, arm_regs(vcpu));

	/* For only Normal VCPUs */
	if (!vcpu->is_normal) {
		return;
	}

	vmm_cprintf(cdev, "  User Mode Registers (Banked)\n");
	vmm_cprintf(cdev, "    SP=0x%08x       LR=0x%08x\n",
		    arm_priv(vcpu)->sp_usr, arm_priv(vcpu)->lr_usr);
	vmm_cprintf(cdev, "  Supervisor Mode Registers (Banked)\n");
	vmm_cprintf(cdev, "    SP=0x%08x       LR=0x%08x       SPSR=0x%08x\n",
		    arm_priv(vcpu)->sp_svc, arm_priv(vcpu)->lr_svc,
		    arm_priv(vcpu)->spsr_svc);
	vmm_cprintf(cdev, "  Monitor Mode Registers (Banked)\n");
	vmm_cprintf(cdev, "    SP=0x%08x       LR=0x%08x       SPSR=0x%08x\n",
		    arm_priv(vcpu)->sp_mon, arm_priv(vcpu)->lr_mon,
		    arm_priv(vcpu)->spsr_mon);
	vmm_cprintf(cdev, "  Abort Mode Registers (Banked)\n");
	vmm_cprintf(cdev, "    SP=0x%08x       LR=0x%08x       SPSR=0x%08x\n",
		    arm_priv(vcpu)->sp_abt, arm_priv(vcpu)->lr_abt,
		    arm_priv(vcpu)->spsr_abt);
	vmm_cprintf(cdev, "  Undefined Mode Registers (Banked)\n");
	vmm_cprintf(cdev, "    SP=0x%08x       LR=0x%08x       SPSR=0x%08x\n",
		    arm_priv(vcpu)->sp_und, arm_priv(vcpu)->lr_und,
		    arm_priv(vcpu)->spsr_und);
	vmm_cprintf(cdev, "  IRQ Mode Registers (Banked)\n");
	vmm_cprintf(cdev, "    SP=0x%08x       LR=0x%08x       SPSR=0x%08x\n",
		    arm_priv(vcpu)->sp_irq, arm_priv(vcpu)->lr_irq,
		    arm_priv(vcpu)->spsr_irq);
	vmm_cprintf(cdev, "  FIQ Mode Registers (Banked)\n");
	vmm_cprintf(cdev, "    SP=0x%08x       LR=0x%08x       SPSR=0x%08x",
		    arm_priv(vcpu)->sp_fiq, arm_priv(vcpu)->lr_fiq,
		    arm_priv(vcpu)->spsr_fiq);
	for (ite = 0; ite < 5; ite++) {
		if (ite % 3 == 0)
			vmm_cprintf(cdev, "\n");
		vmm_cprintf(cdev, "    R%02d=0x%08x  ", (ite + 8),
			    arm_priv(vcpu)->gpr_fiq[ite]);
	}
	vmm_cprintf(cdev, "\n");
}
Ejemplo n.º 5
0
int arch_vcpu_regs_deinit(struct vmm_vcpu * vcpu)
{
	int rc;

	/* For both Orphan & Normal VCPUs */
	vmm_memset(arm_regs(vcpu), 0, sizeof(arch_regs_t));

	/* For Orphan VCPUs do nothing else */
	if (!vcpu->is_normal) {
		return VMM_OK;
	}

	/* Cleanup CP15 */
	if ((rc = cpu_vcpu_cp15_deinit(vcpu))) {
		return rc;
	}

	/* Free super regs */
	vmm_free(vcpu->arch_priv);

	return VMM_OK;
}
Ejemplo n.º 6
0
int arch_vcpu_deinit(struct vmm_vcpu *vcpu)
{
	int rc;

	/* For both Orphan & Normal VCPUs */
	memset(arm_regs(vcpu), 0, sizeof(arch_regs_t));

	/* For Orphan VCPUs do nothing else */
	if (!vcpu->is_normal) {
		return VMM_OK;
	}

	/* Free Generic Timer Context */
	if (arm_feature(vcpu, ARM_FEATURE_GENERIC_TIMER)) {
		if ((rc = generic_timer_vcpu_context_deinit(vcpu,
					&arm_gentimer_context(vcpu)))) {
			return rc;
		}
	}

	/* Free VFP context */
	rc = cpu_vcpu_vfp_deinit(vcpu);
	if (rc) {
		return rc;
	}

	/* Free sysregs context */
	rc = cpu_vcpu_sysregs_deinit(vcpu);
	if (rc) {
		return rc;
	}

	/* Free private context */
	vmm_free(vcpu->arch_priv);
	vcpu->arch_priv = NULL;

	return VMM_OK;
}
Ejemplo n.º 7
0
int arch_vcpu_regs_init(struct vmm_vcpu * vcpu)
{
	u32 ite, cpuid = ARM_CPUID_CORTEXA8;
	/* Initialize User Mode Registers */
	/* For both Orphan & Normal VCPUs */
	vmm_memset(arm_regs(vcpu), 0, sizeof(arch_regs_t));
	arm_regs(vcpu)->pc = vcpu->start_pc;
	if (vcpu->is_normal) {
		arm_regs(vcpu)->cpsr  = CPSR_ZERO_MASK;
		arm_regs(vcpu)->cpsr |= CPSR_ASYNC_ABORT_DISABLED;
		arm_regs(vcpu)->cpsr |= CPSR_MODE_USER;
	} else {
		arm_regs(vcpu)->cpsr  = CPSR_ZERO_MASK;
		arm_regs(vcpu)->cpsr |= CPSR_ASYNC_ABORT_DISABLED;
		arm_regs(vcpu)->cpsr |= CPSR_MODE_SUPERVISOR;
		arm_regs(vcpu)->sp = vcpu->start_sp;
	}
	/* Initialize Supervisor Mode Registers */
	/* For only Normal VCPUs */
	if (!vcpu->is_normal) {
		return VMM_OK;
	}
	if (!vcpu->reset_count) {
		vcpu->arch_priv = vmm_malloc(sizeof(arm_priv_t));
		vmm_memset(arm_priv(vcpu), 0, sizeof(arm_priv_t));
		arm_priv(vcpu)->cpsr = CPSR_ASYNC_ABORT_DISABLED | 
				   CPSR_IRQ_DISABLED |
				   CPSR_FIQ_DISABLED | 
				   CPSR_MODE_SUPERVISOR;
	} else {
		for (ite = 0; ite < CPU_FIQ_GPR_COUNT; ite++) {
			arm_priv(vcpu)->gpr_usr[ite] = 0x0;
			arm_priv(vcpu)->gpr_fiq[ite] = 0x0;
		}
		arm_priv(vcpu)->sp_usr = 0x0;
		arm_priv(vcpu)->lr_usr = 0x0;
		arm_priv(vcpu)->sp_svc = 0x0;
		arm_priv(vcpu)->lr_svc = 0x0;
		arm_priv(vcpu)->spsr_svc = 0x0;
		arm_priv(vcpu)->sp_mon = 0x0;
		arm_priv(vcpu)->lr_mon = 0x0;
		arm_priv(vcpu)->spsr_mon = 0x0;
		arm_priv(vcpu)->sp_abt = 0x0;
		arm_priv(vcpu)->lr_abt = 0x0;
		arm_priv(vcpu)->spsr_abt = 0x0;
		arm_priv(vcpu)->sp_und = 0x0;
		arm_priv(vcpu)->lr_und = 0x0;
		arm_priv(vcpu)->spsr_und = 0x0;
		arm_priv(vcpu)->sp_irq = 0x0;
		arm_priv(vcpu)->lr_irq = 0x0;
		arm_priv(vcpu)->spsr_irq = 0x0;
		arm_priv(vcpu)->sp_fiq = 0x0;
		arm_priv(vcpu)->lr_fiq = 0x0;
		arm_priv(vcpu)->spsr_fiq = 0x0;
		cpu_vcpu_cpsr_update(vcpu, 
				     arm_regs(vcpu), 
				     (CPSR_ZERO_MASK |
					CPSR_ASYNC_ABORT_DISABLED | 
					CPSR_IRQ_DISABLED |
					CPSR_FIQ_DISABLED | 
					CPSR_MODE_SUPERVISOR),
				     CPSR_ALLBITS_MASK);
	}
	if (!vcpu->reset_count) {
		arm_priv(vcpu)->features = 0;
		switch (cpuid) {
		case ARM_CPUID_CORTEXA8:
			arm_set_feature(vcpu, ARM_FEATURE_V4T);
			arm_set_feature(vcpu, ARM_FEATURE_V5);
			arm_set_feature(vcpu, ARM_FEATURE_V6);
			arm_set_feature(vcpu, ARM_FEATURE_V6K);
			arm_set_feature(vcpu, ARM_FEATURE_V7);
			arm_set_feature(vcpu, ARM_FEATURE_AUXCR);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2);
			arm_set_feature(vcpu, ARM_FEATURE_VFP);
			arm_set_feature(vcpu, ARM_FEATURE_VFP3);
			arm_set_feature(vcpu, ARM_FEATURE_NEON);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2EE);
			break;
		case ARM_CPUID_CORTEXA9:
			arm_set_feature(vcpu, ARM_FEATURE_V4T);
			arm_set_feature(vcpu, ARM_FEATURE_V5);
			arm_set_feature(vcpu, ARM_FEATURE_V6);
			arm_set_feature(vcpu, ARM_FEATURE_V6K);
			arm_set_feature(vcpu, ARM_FEATURE_V7);
			arm_set_feature(vcpu, ARM_FEATURE_AUXCR);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2);
			arm_set_feature(vcpu, ARM_FEATURE_VFP);
			arm_set_feature(vcpu, ARM_FEATURE_VFP3);
			arm_set_feature(vcpu, ARM_FEATURE_VFP_FP16);
			arm_set_feature(vcpu, ARM_FEATURE_NEON);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2EE);
			arm_set_feature(vcpu, ARM_FEATURE_V7MP);
			break;
		default:
			break;
		};
	}
#ifdef CONFIG_ARM32_FUNCSTATS
	for (ite=0; ite < ARM_FUNCSTAT_MAX; ite++) {
		arm_priv(vcpu)->funcstat[ite].function_name = NULL;
		arm_priv(vcpu)->funcstat[ite].entry_count = 0;
		arm_priv(vcpu)->funcstat[ite].exit_count = 0;
		arm_priv(vcpu)->funcstat[ite].time = 0;
	}
#endif
	return cpu_vcpu_cp15_init(vcpu, cpuid);
}
Ejemplo n.º 8
0
void arch_vcpu_switch(struct vmm_vcpu *tvcpu, 
		      struct vmm_vcpu *vcpu, 
		      arch_regs_t *regs)
{
	u32 ite;
	irq_flags_t flags;

	/* Save user registers & banked registers */
	if (tvcpu) {
		arm_regs(tvcpu)->pc = regs->pc;
		arm_regs(tvcpu)->lr = regs->lr;
		arm_regs(tvcpu)->sp = regs->sp;
		for (ite = 0; ite < CPU_GPR_COUNT; ite++) {
			arm_regs(tvcpu)->gpr[ite] = regs->gpr[ite];
		}
		arm_regs(tvcpu)->pstate = regs->pstate;
		if (tvcpu->is_normal) {
			/* Update last host CPU */
			arm_priv(tvcpu)->last_hcpu = vmm_smp_processor_id();
			/* Save VGIC context */
			arm_vgic_save(tvcpu);
			/* Save sysregs context */
			cpu_vcpu_sysregs_save(tvcpu);
			/* Save VFP and SIMD context */
			cpu_vcpu_vfp_save(tvcpu);
			/* Save generic timer */
			if (arm_feature(tvcpu, ARM_FEATURE_GENERIC_TIMER)) {
				generic_timer_vcpu_context_save(tvcpu,
						arm_gentimer_context(tvcpu));
			}
		}
	}
	/* Restore user registers & special registers */
	regs->pc = arm_regs(vcpu)->pc;
	regs->lr = arm_regs(vcpu)->lr;
	regs->sp = arm_regs(vcpu)->sp;
	for (ite = 0; ite < CPU_GPR_COUNT; ite++) {
		regs->gpr[ite] = arm_regs(vcpu)->gpr[ite];
	}
	regs->pstate = arm_regs(vcpu)->pstate;
	if (vcpu->is_normal) {
		/* Restore hypervisor context */
		vmm_spin_lock_irqsave(&arm_priv(vcpu)->hcr_lock, flags);
		msr(hcr_el2, arm_priv(vcpu)->hcr);
		vmm_spin_unlock_irqrestore(&arm_priv(vcpu)->hcr_lock, flags);
		msr(cptr_el2, arm_priv(vcpu)->cptr);
		msr(hstr_el2, arm_priv(vcpu)->hstr);
		/* Restore Stage2 MMU context */
		mmu_lpae_stage2_chttbl(vcpu->guest->id, 
			       arm_guest_priv(vcpu->guest)->ttbl);
		/* Restore generic timer */
		if (arm_feature(vcpu, ARM_FEATURE_GENERIC_TIMER)) {
			generic_timer_vcpu_context_restore(vcpu,
						arm_gentimer_context(vcpu));
		}
		/* Restore VFP and SIMD context */
		cpu_vcpu_vfp_restore(vcpu);
		/* Restore sysregs context */
		cpu_vcpu_sysregs_restore(vcpu);
		/* Restore VGIC context */
		arm_vgic_restore(vcpu);
		/* Flush TLB if moved to new host CPU */
		if (arm_priv(vcpu)->last_hcpu != vmm_smp_processor_id()) {
			/* Invalidate all guest TLB enteries because
			 * we might have stale guest TLB enteries from
			 * our previous run on new_hcpu host CPU 
			 */
			inv_tlb_guest_allis();
			/* Ensure changes are visible */
			dsb();
			isb();
		}
	}
	/* Clear exclusive monitor */
	clrex();
}
Ejemplo n.º 9
0
int arch_vcpu_init(struct vmm_vcpu *vcpu)
{
	int rc;
	u32 cpuid = 0;
	const char *attr;
	irq_flags_t flags;
	u32 phys_timer_irq, virt_timer_irq;

	/* For both Orphan & Normal VCPUs */
	memset(arm_regs(vcpu), 0, sizeof(arch_regs_t));
	arm_regs(vcpu)->pc = vcpu->start_pc;
	arm_regs(vcpu)->sp = vcpu->stack_va + vcpu->stack_sz - 8;
	if (!vcpu->is_normal) {
		arm_regs(vcpu)->pstate = PSR_MODE64_EL2h;
		arm_regs(vcpu)->pstate |= PSR_ASYNC_ABORT_DISABLED;
		return VMM_OK;
	}

	/* Following initialization for normal VCPUs only */
	rc = vmm_devtree_read_string(vcpu->node, 
			VMM_DEVTREE_COMPATIBLE_ATTR_NAME, &attr);
	if (rc) {
		goto fail;
	}
	if (strcmp(attr, "armv7a,cortex-a8") == 0) {
		cpuid = ARM_CPUID_CORTEXA8;
		arm_regs(vcpu)->pstate = PSR_MODE32;
	} else if (strcmp(attr, "armv7a,cortex-a9") == 0) {
		cpuid = ARM_CPUID_CORTEXA9;
		arm_regs(vcpu)->pstate = PSR_MODE32;
	} else if (strcmp(attr, "armv7a,cortex-a15") == 0) {
		cpuid = ARM_CPUID_CORTEXA15;
		arm_regs(vcpu)->pstate = PSR_MODE32;
	} else if (strcmp(attr, "armv7a,cortex-a7") == 0) {
		cpuid = ARM_CPUID_CORTEXA7;
		arm_regs(vcpu)->pstate = PSR_MODE32;
	} else if (strcmp(attr, "armv8,generic") == 0) {
		cpuid = ARM_CPUID_ARMV8;
	} else {
		rc = VMM_EINVALID;
		goto fail;
	}
	if (arm_regs(vcpu)->pstate == PSR_MODE32) {
		/* Check if the host supports A32 mode @ EL1 */
		if (!cpu_supports_el1_a32()) {
			vmm_printf("Host does not support AArch32 mode\n");
			rc = VMM_ENOTAVAIL;
			goto fail;
		}
		arm_regs(vcpu)->pstate |= PSR_ZERO_MASK;
		arm_regs(vcpu)->pstate |= PSR_MODE32_SUPERVISOR;
	} else {
		arm_regs(vcpu)->pstate |= PSR_MODE64_DEBUG_DISABLED;
		arm_regs(vcpu)->pstate |= PSR_MODE64_EL1h;
	}
	arm_regs(vcpu)->pstate |= PSR_ASYNC_ABORT_DISABLED;
	arm_regs(vcpu)->pstate |= PSR_IRQ_DISABLED;
	arm_regs(vcpu)->pstate |= PSR_FIQ_DISABLED;

	/* First time initialization of private context */
	if (!vcpu->reset_count) {
		/* Alloc private context */
		vcpu->arch_priv = vmm_zalloc(sizeof(struct arm_priv));
		if (!vcpu->arch_priv) {
			rc = VMM_ENOMEM;
			goto fail;
		}
		/* Setup CPUID value expected by VCPU in MIDR register
		 * as-per HW specifications.
		 */
		arm_priv(vcpu)->cpuid = cpuid;
		/* Initialize VCPU features */
		arm_priv(vcpu)->features = 0;
		switch (cpuid) {
		case ARM_CPUID_CORTEXA8:
			arm_set_feature(vcpu, ARM_FEATURE_V7);
			arm_set_feature(vcpu, ARM_FEATURE_VFP3);
			arm_set_feature(vcpu, ARM_FEATURE_NEON);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2EE);
			arm_set_feature(vcpu, ARM_FEATURE_DUMMY_C15_REGS);
			arm_set_feature(vcpu, ARM_FEATURE_TRUSTZONE);
			break;
		case ARM_CPUID_CORTEXA9:
			arm_set_feature(vcpu, ARM_FEATURE_V7);
			arm_set_feature(vcpu, ARM_FEATURE_VFP3);
			arm_set_feature(vcpu, ARM_FEATURE_VFP_FP16);
			arm_set_feature(vcpu, ARM_FEATURE_NEON);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2EE);
			arm_set_feature(vcpu, ARM_FEATURE_V7MP);
			arm_set_feature(vcpu, ARM_FEATURE_TRUSTZONE);
			break;
		case ARM_CPUID_CORTEXA7:
			arm_set_feature(vcpu, ARM_FEATURE_V7);
			arm_set_feature(vcpu, ARM_FEATURE_VFP4);
			arm_set_feature(vcpu, ARM_FEATURE_VFP_FP16);
			arm_set_feature(vcpu, ARM_FEATURE_NEON);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2EE);
			arm_set_feature(vcpu, ARM_FEATURE_ARM_DIV);
			arm_set_feature(vcpu, ARM_FEATURE_V7MP);
			arm_set_feature(vcpu, ARM_FEATURE_GENERIC_TIMER);
			arm_set_feature(vcpu, ARM_FEATURE_DUMMY_C15_REGS);
			arm_set_feature(vcpu, ARM_FEATURE_LPAE);
			arm_set_feature(vcpu, ARM_FEATURE_TRUSTZONE);
			break;
		case ARM_CPUID_CORTEXA15:
			arm_set_feature(vcpu, ARM_FEATURE_V7);
			arm_set_feature(vcpu, ARM_FEATURE_VFP4);
			arm_set_feature(vcpu, ARM_FEATURE_VFP_FP16);
			arm_set_feature(vcpu, ARM_FEATURE_NEON);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2EE);
			arm_set_feature(vcpu, ARM_FEATURE_ARM_DIV);
			arm_set_feature(vcpu, ARM_FEATURE_V7MP);
			arm_set_feature(vcpu, ARM_FEATURE_GENERIC_TIMER);
			arm_set_feature(vcpu, ARM_FEATURE_DUMMY_C15_REGS);
			arm_set_feature(vcpu, ARM_FEATURE_LPAE);
			arm_set_feature(vcpu, ARM_FEATURE_TRUSTZONE);
			break;
		case ARM_CPUID_ARMV8:
			arm_set_feature(vcpu, ARM_FEATURE_V8);
			arm_set_feature(vcpu, ARM_FEATURE_VFP4);
			arm_set_feature(vcpu, ARM_FEATURE_ARM_DIV);
			arm_set_feature(vcpu, ARM_FEATURE_LPAE);
			arm_set_feature(vcpu, ARM_FEATURE_GENERIC_TIMER);
			break;
		default:
			break;
		};
		/* Some features automatically imply others: */
		if (arm_feature(vcpu, ARM_FEATURE_V7)) {
			arm_set_feature(vcpu, ARM_FEATURE_VAPA);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2);
			arm_set_feature(vcpu, ARM_FEATURE_MPIDR);
			if (!arm_feature(vcpu, ARM_FEATURE_M)) {
				arm_set_feature(vcpu, ARM_FEATURE_V6K);
			} else {
				arm_set_feature(vcpu, ARM_FEATURE_V6);
			}
		}
		if (arm_feature(vcpu, ARM_FEATURE_V6K)) {
			arm_set_feature(vcpu, ARM_FEATURE_V6);
			arm_set_feature(vcpu, ARM_FEATURE_MVFR);
		}
		if (arm_feature(vcpu, ARM_FEATURE_V6)) {
			arm_set_feature(vcpu, ARM_FEATURE_V5);
			if (!arm_feature(vcpu, ARM_FEATURE_M)) {
				arm_set_feature(vcpu, ARM_FEATURE_AUXCR);
			}
		}
		if (arm_feature(vcpu, ARM_FEATURE_V5)) {
			arm_set_feature(vcpu, ARM_FEATURE_V4T);
		}
		if (arm_feature(vcpu, ARM_FEATURE_M)) {
			arm_set_feature(vcpu, ARM_FEATURE_THUMB_DIV);
		}
		if (arm_feature(vcpu, ARM_FEATURE_ARM_DIV)) {
			arm_set_feature(vcpu, ARM_FEATURE_THUMB_DIV);
		}
		if (arm_feature(vcpu, ARM_FEATURE_VFP4)) {
			arm_set_feature(vcpu, ARM_FEATURE_VFP3);
		}
		if (arm_feature(vcpu, ARM_FEATURE_VFP3)) {
			arm_set_feature(vcpu, ARM_FEATURE_VFP);
		}
		if (arm_feature(vcpu, ARM_FEATURE_LPAE)) {
			arm_set_feature(vcpu, ARM_FEATURE_PXN);
		}
		/* Initialize Hypervisor Configuration */
		INIT_SPIN_LOCK(&arm_priv(vcpu)->hcr_lock);
		arm_priv(vcpu)->hcr =  (HCR_TSW_MASK |
					HCR_TACR_MASK |
					HCR_TIDCP_MASK |
					HCR_TSC_MASK |
					HCR_TWE_MASK |
					HCR_TWI_MASK |
					HCR_AMO_MASK |
					HCR_IMO_MASK |
					HCR_FMO_MASK |
					HCR_SWIO_MASK |
					HCR_VM_MASK);
		if (!(arm_regs(vcpu)->pstate & PSR_MODE32)) {
			arm_priv(vcpu)->hcr |= HCR_RW_MASK;
		}
		/* Initialize Coprocessor Trap Register */
		arm_priv(vcpu)->cptr = CPTR_TTA_MASK;
		arm_priv(vcpu)->cptr |= CPTR_TFP_MASK;
		/* Initialize Hypervisor System Trap Register */
		arm_priv(vcpu)->hstr = 0;
		/* Cleanup VGIC context first time */
		arm_vgic_cleanup(vcpu);
	}

	/* Clear virtual exception bits in HCR */
	vmm_spin_lock_irqsave(&arm_priv(vcpu)->hcr_lock, flags);
	arm_priv(vcpu)->hcr &= ~(HCR_VSE_MASK | 
				 HCR_VI_MASK | 
				 HCR_VF_MASK);
	vmm_spin_unlock_irqrestore(&arm_priv(vcpu)->hcr_lock, flags);

	/* Set last host CPU to invalid value */
	arm_priv(vcpu)->last_hcpu = 0xFFFFFFFF;

	/* Initialize sysregs context */
	rc = cpu_vcpu_sysregs_init(vcpu, cpuid);
	if (rc) {
		goto fail_sysregs_init;
	}

	/* Initialize VFP context */
	rc = cpu_vcpu_vfp_init(vcpu);
	if (rc) {
		goto fail_vfp_init;
	}

	/* Initialize generic timer context */
	if (arm_feature(vcpu, ARM_FEATURE_GENERIC_TIMER)) {
		if (vmm_devtree_read_u32(vcpu->node, 
					 "gentimer_phys_irq",
					 &phys_timer_irq)) {
			phys_timer_irq = 0;
		}
		if (vmm_devtree_read_u32(vcpu->node, 
					 "gentimer_virt_irq",
					 &virt_timer_irq)) {
			virt_timer_irq = 0;
		}
		rc = generic_timer_vcpu_context_init(vcpu,
						&arm_gentimer_context(vcpu),
						phys_timer_irq,
						virt_timer_irq);
		if (rc) {
			goto fail_gentimer_init;
		}
	}

	return VMM_OK;

fail_gentimer_init:
	if (!vcpu->reset_count) {
		cpu_vcpu_vfp_deinit(vcpu);
	}
fail_vfp_init:
	if (!vcpu->reset_count) {
		cpu_vcpu_sysregs_deinit(vcpu);
	}
fail_sysregs_init:
	if (!vcpu->reset_count) {
		vmm_free(vcpu->arch_priv);
		vcpu->arch_priv = NULL;
	}
fail:
	return rc;
}
Ejemplo n.º 10
0
int arch_vcpu_init(struct vmm_vcpu *vcpu)
{
	u32 ite, cpuid;
	const char *attr;

	/* Initialize User Mode Registers */
	/* For both Orphan & Normal VCPUs */
	memset(arm_regs(vcpu), 0, sizeof(arch_regs_t));
	arm_regs(vcpu)->pc = vcpu->start_pc;
	arm_regs(vcpu)->sp_excp = vcpu->stack_va + vcpu->stack_sz - 4;
	if (vcpu->is_normal) {
		arm_regs(vcpu)->cpsr  = CPSR_ZERO_MASK;
		arm_regs(vcpu)->cpsr |= CPSR_ASYNC_ABORT_DISABLED;
		arm_regs(vcpu)->cpsr |= CPSR_MODE_USER;
		arm_regs(vcpu)->sp = 0;
	} else {
		arm_regs(vcpu)->cpsr  = CPSR_ZERO_MASK;
		arm_regs(vcpu)->cpsr |= CPSR_ASYNC_ABORT_DISABLED;
		arm_regs(vcpu)->cpsr |= CPSR_MODE_SUPERVISOR;
		arm_regs(vcpu)->sp = arm_regs(vcpu)->sp_excp;
	}
	/* Initialize Supervisor Mode Registers */
	/* For only Normal VCPUs */
	if (!vcpu->is_normal) {
		return VMM_OK;
	}
	attr = vmm_devtree_attrval(vcpu->node, 
				   VMM_DEVTREE_COMPATIBLE_ATTR_NAME);
	if (!attr) {
		return VMM_EFAIL;
	}
	if (strcmp(attr, "armv5te,arm926ej") == 0) {
		cpuid = ARM_CPUID_ARM926;
	} else if (strcmp(attr, "armv6,arm11mp") == 0) {
		cpuid = ARM_CPUID_ARM11MPCORE;
	} else if (strcmp(attr, "armv7a,cortex-a8") == 0) {
		cpuid = ARM_CPUID_CORTEXA8;
	} else if (strcmp(attr, "armv7a,cortex-a9") == 0) {
		cpuid = ARM_CPUID_CORTEXA9;
	} else {
		return VMM_EFAIL;
	}
	if (!vcpu->reset_count) {
		vcpu->arch_priv = vmm_zalloc(sizeof(arm_priv_t));
		arm_priv(vcpu)->cpsr = CPSR_ASYNC_ABORT_DISABLED | 
				   CPSR_IRQ_DISABLED |
				   CPSR_FIQ_DISABLED | 
				   CPSR_MODE_SUPERVISOR;
	} else {
		for (ite = 0; ite < CPU_FIQ_GPR_COUNT; ite++) {
			arm_priv(vcpu)->gpr_usr[ite] = 0x0;
			arm_priv(vcpu)->gpr_fiq[ite] = 0x0;
		}
		arm_priv(vcpu)->sp_usr = 0x0;
		arm_priv(vcpu)->lr_usr = 0x0;
		arm_priv(vcpu)->sp_svc = 0x0;
		arm_priv(vcpu)->lr_svc = 0x0;
		arm_priv(vcpu)->spsr_svc = 0x0;
		arm_priv(vcpu)->sp_mon = 0x0;
		arm_priv(vcpu)->lr_mon = 0x0;
		arm_priv(vcpu)->spsr_mon = 0x0;
		arm_priv(vcpu)->sp_abt = 0x0;
		arm_priv(vcpu)->lr_abt = 0x0;
		arm_priv(vcpu)->spsr_abt = 0x0;
		arm_priv(vcpu)->sp_und = 0x0;
		arm_priv(vcpu)->lr_und = 0x0;
		arm_priv(vcpu)->spsr_und = 0x0;
		arm_priv(vcpu)->sp_irq = 0x0;
		arm_priv(vcpu)->lr_irq = 0x0;
		arm_priv(vcpu)->spsr_irq = 0x0;
		arm_priv(vcpu)->sp_fiq = 0x0;
		arm_priv(vcpu)->lr_fiq = 0x0;
		arm_priv(vcpu)->spsr_fiq = 0x0;
		cpu_vcpu_cpsr_update(vcpu, 
				     arm_regs(vcpu), 
				     (CPSR_ZERO_MASK |
					CPSR_ASYNC_ABORT_DISABLED | 
					CPSR_IRQ_DISABLED |
					CPSR_FIQ_DISABLED | 
					CPSR_MODE_SUPERVISOR),
				     CPSR_ALLBITS_MASK);
	}
	if (!vcpu->reset_count) {
		arm_priv(vcpu)->features = 0;
		switch (cpuid) {
		case ARM_CPUID_ARM926:
			arm_set_feature(vcpu, ARM_FEATURE_V5);
			arm_set_feature(vcpu, ARM_FEATURE_VFP);
			arm_set_feature(vcpu, ARM_FEATURE_DUMMY_C15_REGS);
			arm_set_feature(vcpu, ARM_FEATURE_CACHE_TEST_CLEAN);
			break;
		case ARM_CPUID_ARM11MPCORE:
			arm_set_feature(vcpu, ARM_FEATURE_V6);
			arm_set_feature(vcpu, ARM_FEATURE_V6K);
			arm_set_feature(vcpu, ARM_FEATURE_VFP);
			arm_set_feature(vcpu, ARM_FEATURE_VAPA);
			arm_set_feature(vcpu, ARM_FEATURE_DUMMY_C15_REGS);
			break;
		case ARM_CPUID_CORTEXA8:
			arm_set_feature(vcpu, ARM_FEATURE_V7);
			arm_set_feature(vcpu, ARM_FEATURE_VFP3);
			arm_set_feature(vcpu, ARM_FEATURE_NEON);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2EE);
			arm_set_feature(vcpu, ARM_FEATURE_DUMMY_C15_REGS);
			arm_set_feature(vcpu, ARM_FEATURE_TRUSTZONE);
			break;
		case ARM_CPUID_CORTEXA9:
			arm_set_feature(vcpu, ARM_FEATURE_V7);
			arm_set_feature(vcpu, ARM_FEATURE_VFP3);
			arm_set_feature(vcpu, ARM_FEATURE_VFP_FP16);
			arm_set_feature(vcpu, ARM_FEATURE_NEON);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2EE);
			arm_set_feature(vcpu, ARM_FEATURE_V7MP);
			arm_set_feature(vcpu, ARM_FEATURE_TRUSTZONE);
			break;
		default:
			break;
		};

		/* Some features automatically imply others: */
		if (arm_feature(vcpu, ARM_FEATURE_V7)) {
			arm_set_feature(vcpu, ARM_FEATURE_VAPA);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2);
			arm_set_feature(vcpu, ARM_FEATURE_MPIDR);
			if (!arm_feature(vcpu, ARM_FEATURE_M)) {
				arm_set_feature(vcpu, ARM_FEATURE_V6K);
			} else {
				arm_set_feature(vcpu, ARM_FEATURE_V6);
			}
		}
		if (arm_feature(vcpu, ARM_FEATURE_V6K)) {
			arm_set_feature(vcpu, ARM_FEATURE_V6);
			arm_set_feature(vcpu, ARM_FEATURE_MVFR);
		}
		if (arm_feature(vcpu, ARM_FEATURE_V6)) {
			arm_set_feature(vcpu, ARM_FEATURE_V5);
			if (!arm_feature(vcpu, ARM_FEATURE_M)) {
				arm_set_feature(vcpu, ARM_FEATURE_AUXCR);
			}
		}
		if (arm_feature(vcpu, ARM_FEATURE_V5)) {
			arm_set_feature(vcpu, ARM_FEATURE_V4T);
		}
		if (arm_feature(vcpu, ARM_FEATURE_M)) {
			arm_set_feature(vcpu, ARM_FEATURE_THUMB_DIV);
		}
		if (arm_feature(vcpu, ARM_FEATURE_ARM_DIV)) {
			arm_set_feature(vcpu, ARM_FEATURE_THUMB_DIV);
		}
		if (arm_feature(vcpu, ARM_FEATURE_VFP4)) {
			arm_set_feature(vcpu, ARM_FEATURE_VFP3);
		}
		if (arm_feature(vcpu, ARM_FEATURE_VFP3)) {
			arm_set_feature(vcpu, ARM_FEATURE_VFP);
		}
		if (arm_feature(vcpu, ARM_FEATURE_LPAE)) {
			arm_set_feature(vcpu, ARM_FEATURE_PXN);
		}
	}

	return cpu_vcpu_cp15_init(vcpu, cpuid);
}