Exemplo n.º 1
0
void NesCpuTranslator::mRestoreInternalFlags()
{
#if !defined(FRAME_POINTER_FOR_GDB)
	__ msr(CPSR_f, Operand(mInternalFlagsCopy));
#else
	__ ldr(ip, MemOperand(mDataBase, offsetof(NesCpuRecData,internalFlagsCopy)));
	__ msr(CPSR_f, Operand(ip));
#endif
}
Exemplo n.º 2
0
int arch_vcpu_deinit(struct vmm_vcpu *vcpu)
{
	int rc = VMM_OK;
	u32 saved_cptr_el2, saved_hstr_el2;

	/* For both Orphan & Normal VCPUs */
	memset(arm_regs(vcpu), 0, sizeof(arch_regs_t));

	/* For Orphan VCPUs do nothing else */
	if (!vcpu->is_normal) {
		return VMM_OK;
	}

	/* Save CPTR_EL2 and HSTR_EL2 */
	saved_cptr_el2 = mrs(cptr_el2);
	saved_hstr_el2 = mrs(hstr_el2);

	/* We force disable coprocessor and system traps to be
	 * consistent with arch_vcpu_init() function.
	 */
	msr(cptr_el2, 0x0);
	msr(hstr_el2, 0x0);

	/* Free Generic Timer Context */
	if (arm_feature(vcpu, ARM_FEATURE_GENERIC_TIMER)) {
		if ((rc = generic_timer_vcpu_context_deinit(vcpu,
					&arm_gentimer_context(vcpu)))) {
			goto done;
		}
	}

	/* Free VFP context */
	rc = cpu_vcpu_vfp_deinit(vcpu);
	if (rc) {
		goto done;
	}

	/* Free sysregs context */
	rc = cpu_vcpu_sysregs_deinit(vcpu);
	if (rc) {
		goto done;
	}

	/* Free private context */
	vmm_free(vcpu->arch_priv);
	vcpu->arch_priv = NULL;

	rc = VMM_OK;

done:
	msr(cptr_el2, saved_cptr_el2);
	msr(hstr_el2, saved_hstr_el2);
	return VMM_OK;
}
Exemplo n.º 3
0
IMPLEMENT
bool Uart::startup(Address _port, int __irq)
{
  port = _port;
  _irq  = __irq;

  Proc::Status o = Proc::cli_save();

  if (!valid())
    {
      Proc::sti_restore(o);
      fail();
      return false;
    }

  ier(Base_ier_bits);/* disable all rs-232 interrupts */
  mcr(0x0b);         /* out2, rts, and dtr enabled */
  fcr(1);            /* enable fifo */
  fcr(0x07);         /* clear rcv xmit fifo */
  fcr(1);            /* enable fifo */
  lcr(0);            /* clear line control register */

  /* clearall interrupts */
  /*read*/ msr(); /* IRQID 0*/
  /*read*/ iir(); /* IRQID 1*/
  /*read*/ trb(); /* IRQID 2*/
  /*read*/ lsr(); /* IRQID 3*/

  while(lsr() & 1/*DATA READY*/) /*read*/ trb();
  Proc::sti_restore(o);
  return true;
}
Exemplo n.º 4
0
void MacroAssembler::lowLevelDebug(const char *s,
								   Register ra,
								   Register rb,
								   Register rc)
{
	Q_ASSERT(ra.code() < 13 && rb.code() < 13 && rc.code() < 13);
	int preserved = 0x1fff | lr.bit();
	stm(db_w, sp, preserved);
	add(fp, sp, Operand(12*4));
	mrs(r4, CPSR);

	Label omitString;
	b(&omitString);

	int sPtr = intptr_t(buffer_ + pcOffset());
	do {
		db(*s);
	} while (*(s++));
	while (pcOffset() & 3)
		db('\0');

	bind(&omitString);
	ldr(r3, MemOperand(sp, rc.code()*4));
	ldr(r2, MemOperand(sp, rb.code()*4));
	ldr(r1, MemOperand(sp, ra.code()*4));
	mov(r0, Operand(sPtr));

	void (*qDebugPtr)(const char *,...) = &qDebug;
	mov(ip, Operand(intptr_t(qDebugPtr)));
	blx(ip);

	msr(CPSR_f, Operand(r4));
	ldm(ia_w, sp, preserved);
}
Exemplo n.º 5
0
void cpu_vcpu_spsr32_update(struct vmm_vcpu *vcpu, u32 mode, u32 new_spsr)
{
	struct arm_priv_sysregs *s = &arm_priv(vcpu)->sysregs;

	switch (mode) {
	case CPSR_MODE_ABORT:
		msr(spsr_abt, new_spsr);
		s->spsr_abt = new_spsr;
		break;
	case CPSR_MODE_UNDEFINED:
		msr(spsr_und, new_spsr);
		s->spsr_und = new_spsr;
		break;
	case CPSR_MODE_SUPERVISOR:
		msr(spsr_el1, new_spsr);
		s->spsr_el1 = new_spsr;
		break;
	case CPSR_MODE_IRQ:
		msr(spsr_irq, new_spsr);
		s->spsr_irq = new_spsr;
		break;
	case CPSR_MODE_FIQ:
		msr(spsr_fiq, new_spsr);
		s->spsr_fiq = new_spsr;
		break;
	case CPSR_MODE_HYPERVISOR:
		msr(spsr_el2, new_spsr);
		break;
	default:
		break;
	};
}
Exemplo n.º 6
0
  // -----------------------------------------
  // private functions
  // -----------------------------------------
  void UnscentedTransform::generateSigmaPoints (std::vector<VectorXd>& sigmaPoints, std::vector<double>& weights)
  {
    int L = x.size();

    // calculate values which are needed more often
    double alphaPow2 = alpha*alpha;
    double lambda = alphaPow2 * (L + kappa) - L;
    double lPlusLambda = L + lambda;

    // calculate matrix square root ----------------
    
    // matrix which should form the sigma points (matrix to take the
    // root of)
    MatrixXd msr(L,L);
    msr = lPlusLambda * Px;

    // calculate the root of the matrix; this is done here by standard
    // Cholesky decomposition (this works for symmetric,
    // positive-definite matrices, which Px is like)
    msr = msr.llt().matrixL();

    // generate sigma points -----------------------
    VectorXd col(x.size());

    // chi_0
    sigmaPoints.push_back(x);

    // chi_i = x + msr_i-1, i=1,..,L
    for (int i = 1; i <= L; i++) 
      sigmaPoints.push_back(x + msr.col(i-1));

    // chi_i = x - msr_i-L-1, i=L+1,..,2L
    for (int i = 1; i <= L; i++) 
      sigmaPoints.push_back(x - msr.col(i-1));

    // set weights --------------------------------

    // W_0 for mean
    weights.push_back(lambda / lPlusLambda);

    // W_1, W_2, ..., W_2L for mean AND covariance
    for (int i = 1; i <= 2*L; i++)
      weights.push_back(1 / (2 * lPlusLambda));
    
    // W_0 for covariance
    weights.push_back(lambda/lPlusLambda + 1 - alphaPow2 + beta);
  }
int arch_vcpu_irq_assert(struct vmm_vcpu *vcpu, u32 irq_no, u64 reason)
{
	u64 hcr;
	bool update_hcr;
	irq_flags_t flags;

	/* Skip IRQ & FIQ if VGIC available */
	if (arm_vgic_avail(vcpu) &&
	    ((irq_no == CPU_EXTERNAL_IRQ) || 
	     (irq_no == CPU_EXTERNAL_FIQ))) {
		return VMM_OK;
	}

	vmm_spin_lock_irqsave(&arm_priv(vcpu)->hcr_lock, flags);

	hcr = arm_priv(vcpu)->hcr;
	update_hcr = FALSE;

	switch(irq_no) {
	case CPU_EXTERNAL_IRQ:
		hcr |= HCR_VI_MASK;
		/* VI bit will be cleared on deassertion */
		update_hcr = TRUE;
		break;
	case CPU_EXTERNAL_FIQ:
		hcr |= HCR_VF_MASK;
		/* VF bit will be cleared on deassertion */
		update_hcr = TRUE;
		break;
	default:
		break;
	};

	if (update_hcr) {
		arm_priv(vcpu)->hcr = hcr;
		if (vcpu == vmm_scheduler_current_vcpu()) {
			msr(hcr_el2, hcr);
		}
	}

	vmm_spin_unlock_irqrestore(&arm_priv(vcpu)->hcr_lock, flags);

	return VMM_OK;
}
int arch_vcpu_irq_execute(struct vmm_vcpu *vcpu,
			  arch_regs_t *regs, 
			  u32 irq_no, u64 reason)
{
	int rc;
	irq_flags_t flags;

	/* Skip IRQ & FIQ if VGIC available */
	if (arm_vgic_avail(vcpu) &&
	    ((irq_no == CPU_EXTERNAL_IRQ) || 
	     (irq_no == CPU_EXTERNAL_FIQ))) {
		return VMM_OK;
	}

	/* Undefined, Data abort, and Prefetch abort 
	 * can only be emulated in normal context.
	 */
	switch(irq_no) {
	case CPU_UNDEF_INST_IRQ:
		rc = cpu_vcpu_inject_undef(vcpu, regs);
		break;
	case CPU_PREFETCH_ABORT_IRQ:
		rc = cpu_vcpu_inject_pabt(vcpu, regs);
		break;
	case CPU_DATA_ABORT_IRQ:
		rc = cpu_vcpu_inject_dabt(vcpu, regs, (virtual_addr_t)reason);
		break;
	default:
		rc = VMM_OK;
		break;
	};

	vmm_spin_lock_irqsave(&arm_priv(vcpu)->hcr_lock, flags);
	msr(hcr_el2, arm_priv(vcpu)->hcr);
	vmm_spin_unlock_irqrestore(&arm_priv(vcpu)->hcr_lock, flags);

	return rc;
}
Exemplo n.º 9
0
void CSumItApplication::ShowHelp()
{
	try
	{
		BEntry entry;
		gAppDir->FindEntry("Docs", &entry);
		BDirectory docdir(&entry);
		docdir.FindEntry("index.html", &entry);
		
		if (entry.InitCheck() || !entry.Exists())
			THROW((errNoDocumentation));
		
		entry_ref ref;
		entry.GetRef(&ref);
		
		BMessage msg(B_REFS_RECEIVED);
		msg.AddRef("refs", &ref);

		entry_ref browser;
		if (be_roster->FindApp("text/html", &browser) || 
				be_roster->FindApp("application/x-vnd.Haiku-WebPositive", &browser))
			THROW((errNoBrowser));
		
		if (be_roster->IsRunning(&browser))
		{
			BMessenger msr(NULL, be_roster->TeamFor(&browser));
			msr.SendMessage(&msg);
		}
		else if (be_roster->Launch(&browser, &msg))
			THROW((errNoBrowser));
	}
	catch (CErr& e)
	{
		e.DoError();
	}
} /* CSumItApplication::ShowHelp */
Exemplo n.º 10
0
void arch_vcpu_switch(struct vmm_vcpu *tvcpu, 
		      struct vmm_vcpu *vcpu, 
		      arch_regs_t *regs)
{
	u32 ite;
	irq_flags_t flags;

	/* Save user registers & banked registers */
	if (tvcpu) {
		arm_regs(tvcpu)->pc = regs->pc;
		arm_regs(tvcpu)->lr = regs->lr;
		arm_regs(tvcpu)->sp = regs->sp;
		for (ite = 0; ite < CPU_GPR_COUNT; ite++) {
			arm_regs(tvcpu)->gpr[ite] = regs->gpr[ite];
		}
		arm_regs(tvcpu)->pstate = regs->pstate;
		if (tvcpu->is_normal) {
			/* Update last host CPU */
			arm_priv(tvcpu)->last_hcpu = vmm_smp_processor_id();
			/* Save VGIC context */
			arm_vgic_save(tvcpu);
			/* Save sysregs context */
			cpu_vcpu_sysregs_save(tvcpu);
			/* Save VFP and SIMD context */
			cpu_vcpu_vfp_save(tvcpu);
			/* Save generic timer */
			if (arm_feature(tvcpu, ARM_FEATURE_GENERIC_TIMER)) {
				generic_timer_vcpu_context_save(tvcpu,
						arm_gentimer_context(tvcpu));
			}
		}
	}
	/* Restore user registers & special registers */
	regs->pc = arm_regs(vcpu)->pc;
	regs->lr = arm_regs(vcpu)->lr;
	regs->sp = arm_regs(vcpu)->sp;
	for (ite = 0; ite < CPU_GPR_COUNT; ite++) {
		regs->gpr[ite] = arm_regs(vcpu)->gpr[ite];
	}
	regs->pstate = arm_regs(vcpu)->pstate;
	if (vcpu->is_normal) {
		/* Restore hypervisor context */
		vmm_spin_lock_irqsave(&arm_priv(vcpu)->hcr_lock, flags);
		msr(hcr_el2, arm_priv(vcpu)->hcr);
		vmm_spin_unlock_irqrestore(&arm_priv(vcpu)->hcr_lock, flags);
		msr(cptr_el2, arm_priv(vcpu)->cptr);
		msr(hstr_el2, arm_priv(vcpu)->hstr);
		/* Restore Stage2 MMU context */
		mmu_lpae_stage2_chttbl(vcpu->guest->id, 
			       arm_guest_priv(vcpu->guest)->ttbl);
		/* Restore generic timer */
		if (arm_feature(vcpu, ARM_FEATURE_GENERIC_TIMER)) {
			generic_timer_vcpu_context_restore(vcpu,
						arm_gentimer_context(vcpu));
		}
		/* Restore VFP and SIMD context */
		cpu_vcpu_vfp_restore(vcpu);
		/* Restore sysregs context */
		cpu_vcpu_sysregs_restore(vcpu);
		/* Restore VGIC context */
		arm_vgic_restore(vcpu);
		/* Flush TLB if moved to new host CPU */
		if (arm_priv(vcpu)->last_hcpu != vmm_smp_processor_id()) {
			/* Invalidate all guest TLB enteries because
			 * we might have stale guest TLB enteries from
			 * our previous run on new_hcpu host CPU 
			 */
			inv_tlb_guest_allis();
			/* Ensure changes are visible */
			dsb();
			isb();
		}
	}
	/* Clear exclusive monitor */
	clrex();
}
Exemplo n.º 11
0
static inline uint64_t pp0Energy(int msrFile) {
    return msr(msrFile, 0x639);
}
Exemplo n.º 12
0
static inline uint64_t packageEnergy(int msrFile) {
    return msr(msrFile, 0x611);
}
Exemplo n.º 13
0
int arch_vcpu_init(struct vmm_vcpu *vcpu)
{
	int rc = VMM_OK;
	u32 cpuid = 0;
	const char *attr;
	irq_flags_t flags;
	u32 saved_cptr_el2, saved_hstr_el2;
	u32 phys_timer_irq, virt_timer_irq;

	/* For both Orphan & Normal VCPUs */
	memset(arm_regs(vcpu), 0, sizeof(arch_regs_t));
	arm_regs(vcpu)->pc = vcpu->start_pc;
	arm_regs(vcpu)->sp = vcpu->stack_va + vcpu->stack_sz - 8;
	arm_regs(vcpu)->sp = arm_regs(vcpu)->sp & ~0x7;
	if (!vcpu->is_normal) {
		arm_regs(vcpu)->pstate = PSR_MODE64_EL2h;
		arm_regs(vcpu)->pstate |= PSR_ASYNC_ABORT_DISABLED;
		return VMM_OK;
	}

	/* Save CPTR_EL2 and HSTR_EL2 */
	saved_cptr_el2 = mrs(cptr_el2);
	saved_hstr_el2 = mrs(hstr_el2);

	/* A VCPU running on different host CPU can be resetted
	 * using sync IPI. This means we can reach here while VCPU
	 * is running and coprocessor/system traps are enabled.
	 *
	 * We force disable coprocessor and system traps to ensure
	 * that we don't touch coprocessor and system registers
	 * while traps are enabled.
	 */
	msr(cptr_el2, 0x0);
	msr(hstr_el2, 0x0);

	/* Following initialization for normal VCPUs only */
	rc = vmm_devtree_read_string(vcpu->node,
			VMM_DEVTREE_COMPATIBLE_ATTR_NAME, &attr);
	if (rc) {
		goto done;
	}
	if (strcmp(attr, "armv7a,cortex-a8") == 0) {
		cpuid = ARM_CPUID_CORTEXA8;
		arm_regs(vcpu)->pstate = PSR_MODE32;
	} else if (strcmp(attr, "armv7a,cortex-a9") == 0) {
		cpuid = ARM_CPUID_CORTEXA9;
		arm_regs(vcpu)->pstate = PSR_MODE32;
	} else if (strcmp(attr, "armv7a,cortex-a15") == 0) {
		cpuid = ARM_CPUID_CORTEXA15;
		arm_regs(vcpu)->pstate = PSR_MODE32;
	} else if (strcmp(attr, "armv7a,cortex-a7") == 0) {
		cpuid = ARM_CPUID_CORTEXA7;
		arm_regs(vcpu)->pstate = PSR_MODE32;
	} else if (strcmp(attr, "armv8,generic") == 0) {
		cpuid = ARM_CPUID_ARMV8;
	} else {
		rc = VMM_EINVALID;
		goto done;
	}
	if (arm_regs(vcpu)->pstate == PSR_MODE32) {
		/* Check if the host supports A32 mode @ EL1 */
		if (!cpu_supports_el1_a32()) {
			vmm_printf("Host does not support AArch32 mode\n");
			rc = VMM_ENOTAVAIL;
			goto done;
		}
		arm_regs(vcpu)->pstate |= PSR_ZERO_MASK;
		arm_regs(vcpu)->pstate |= PSR_MODE32_SUPERVISOR;
	} else {
		arm_regs(vcpu)->pstate |= PSR_MODE64_DEBUG_DISABLED;
		arm_regs(vcpu)->pstate |= PSR_MODE64_EL1h;
	}
	arm_regs(vcpu)->pstate |= PSR_ASYNC_ABORT_DISABLED;
	arm_regs(vcpu)->pstate |= PSR_IRQ_DISABLED;
	arm_regs(vcpu)->pstate |= PSR_FIQ_DISABLED;

	/* First time initialization of private context */
	if (!vcpu->reset_count) {
		/* Alloc private context */
		vcpu->arch_priv = vmm_zalloc(sizeof(struct arm_priv));
		if (!vcpu->arch_priv) {
			rc = VMM_ENOMEM;
			goto done;
		}
		/* Setup CPUID value expected by VCPU in MIDR register
		 * as-per HW specifications.
		 */
		arm_priv(vcpu)->cpuid = cpuid;
		/* Initialize VCPU features */
		arm_priv(vcpu)->features = 0;
		switch (cpuid) {
		case ARM_CPUID_CORTEXA8:
			arm_set_feature(vcpu, ARM_FEATURE_V7);
			arm_set_feature(vcpu, ARM_FEATURE_VFP3);
			arm_set_feature(vcpu, ARM_FEATURE_NEON);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2EE);
			arm_set_feature(vcpu, ARM_FEATURE_DUMMY_C15_REGS);
			arm_set_feature(vcpu, ARM_FEATURE_TRUSTZONE);
			break;
		case ARM_CPUID_CORTEXA9:
			arm_set_feature(vcpu, ARM_FEATURE_V7);
			arm_set_feature(vcpu, ARM_FEATURE_VFP3);
			arm_set_feature(vcpu, ARM_FEATURE_VFP_FP16);
			arm_set_feature(vcpu, ARM_FEATURE_NEON);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2EE);
			arm_set_feature(vcpu, ARM_FEATURE_V7MP);
			arm_set_feature(vcpu, ARM_FEATURE_TRUSTZONE);
			break;
		case ARM_CPUID_CORTEXA7:
			arm_set_feature(vcpu, ARM_FEATURE_V7);
			arm_set_feature(vcpu, ARM_FEATURE_VFP4);
			arm_set_feature(vcpu, ARM_FEATURE_VFP_FP16);
			arm_set_feature(vcpu, ARM_FEATURE_NEON);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2EE);
			arm_set_feature(vcpu, ARM_FEATURE_ARM_DIV);
			arm_set_feature(vcpu, ARM_FEATURE_V7MP);
			arm_set_feature(vcpu, ARM_FEATURE_GENERIC_TIMER);
			arm_set_feature(vcpu, ARM_FEATURE_DUMMY_C15_REGS);
			arm_set_feature(vcpu, ARM_FEATURE_LPAE);
			arm_set_feature(vcpu, ARM_FEATURE_TRUSTZONE);
			break;
		case ARM_CPUID_CORTEXA15:
			arm_set_feature(vcpu, ARM_FEATURE_V7);
			arm_set_feature(vcpu, ARM_FEATURE_VFP4);
			arm_set_feature(vcpu, ARM_FEATURE_VFP_FP16);
			arm_set_feature(vcpu, ARM_FEATURE_NEON);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2EE);
			arm_set_feature(vcpu, ARM_FEATURE_ARM_DIV);
			arm_set_feature(vcpu, ARM_FEATURE_V7MP);
			arm_set_feature(vcpu, ARM_FEATURE_GENERIC_TIMER);
			arm_set_feature(vcpu, ARM_FEATURE_DUMMY_C15_REGS);
			arm_set_feature(vcpu, ARM_FEATURE_LPAE);
			arm_set_feature(vcpu, ARM_FEATURE_TRUSTZONE);
			break;
		case ARM_CPUID_ARMV8:
			arm_set_feature(vcpu, ARM_FEATURE_V8);
			arm_set_feature(vcpu, ARM_FEATURE_VFP4);
			arm_set_feature(vcpu, ARM_FEATURE_ARM_DIV);
			arm_set_feature(vcpu, ARM_FEATURE_LPAE);
			arm_set_feature(vcpu, ARM_FEATURE_GENERIC_TIMER);
			break;
		default:
			break;
		};
		/* Some features automatically imply others: */
		if (arm_feature(vcpu, ARM_FEATURE_V7)) {
			arm_set_feature(vcpu, ARM_FEATURE_VAPA);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2);
			arm_set_feature(vcpu, ARM_FEATURE_MPIDR);
			if (!arm_feature(vcpu, ARM_FEATURE_M)) {
				arm_set_feature(vcpu, ARM_FEATURE_V6K);
			} else {
				arm_set_feature(vcpu, ARM_FEATURE_V6);
			}
		}
		if (arm_feature(vcpu, ARM_FEATURE_V6K)) {
			arm_set_feature(vcpu, ARM_FEATURE_V6);
			arm_set_feature(vcpu, ARM_FEATURE_MVFR);
		}
		if (arm_feature(vcpu, ARM_FEATURE_V6)) {
			arm_set_feature(vcpu, ARM_FEATURE_V5);
			if (!arm_feature(vcpu, ARM_FEATURE_M)) {
				arm_set_feature(vcpu, ARM_FEATURE_AUXCR);
			}
		}
		if (arm_feature(vcpu, ARM_FEATURE_V5)) {
			arm_set_feature(vcpu, ARM_FEATURE_V4T);
		}
		if (arm_feature(vcpu, ARM_FEATURE_M)) {
			arm_set_feature(vcpu, ARM_FEATURE_THUMB_DIV);
		}
		if (arm_feature(vcpu, ARM_FEATURE_ARM_DIV)) {
			arm_set_feature(vcpu, ARM_FEATURE_THUMB_DIV);
		}
		if (arm_feature(vcpu, ARM_FEATURE_VFP4)) {
			arm_set_feature(vcpu, ARM_FEATURE_VFP3);
		}
		if (arm_feature(vcpu, ARM_FEATURE_VFP3)) {
			arm_set_feature(vcpu, ARM_FEATURE_VFP);
		}
		if (arm_feature(vcpu, ARM_FEATURE_LPAE)) {
			arm_set_feature(vcpu, ARM_FEATURE_PXN);
		}
		/* Initialize Hypervisor Configuration */
		INIT_SPIN_LOCK(&arm_priv(vcpu)->hcr_lock);
		arm_priv(vcpu)->hcr =  (HCR_TSW_MASK |
					HCR_TACR_MASK |
					HCR_TIDCP_MASK |
					HCR_TSC_MASK |
					HCR_TWE_MASK |
					HCR_TWI_MASK |
					HCR_AMO_MASK |
					HCR_IMO_MASK |
					HCR_FMO_MASK |
					HCR_SWIO_MASK |
					HCR_VM_MASK);
		if (!(arm_regs(vcpu)->pstate & PSR_MODE32)) {
			arm_priv(vcpu)->hcr |= HCR_RW_MASK;
		}
		/* Initialize Coprocessor Trap Register */
		arm_priv(vcpu)->cptr = CPTR_TTA_MASK;
		arm_priv(vcpu)->cptr |= CPTR_TFP_MASK;
		/* Initialize Hypervisor System Trap Register */
		arm_priv(vcpu)->hstr = 0;
		/* Cleanup VGIC context first time */
		arm_vgic_cleanup(vcpu);
	}

	/* Clear virtual exception bits in HCR */
	vmm_spin_lock_irqsave(&arm_priv(vcpu)->hcr_lock, flags);
	arm_priv(vcpu)->hcr &= ~(HCR_VSE_MASK |
				 HCR_VI_MASK |
				 HCR_VF_MASK);
	vmm_spin_unlock_irqrestore(&arm_priv(vcpu)->hcr_lock, flags);

	/* Set last host CPU to invalid value */
	arm_priv(vcpu)->last_hcpu = 0xFFFFFFFF;

	/* Initialize sysregs context */
	rc = cpu_vcpu_sysregs_init(vcpu, cpuid);
	if (rc) {
		goto fail_sysregs_init;
	}

	/* Initialize VFP context */
	rc = cpu_vcpu_vfp_init(vcpu);
	if (rc) {
		goto fail_vfp_init;
	}

	/* Initialize generic timer context */
	if (arm_feature(vcpu, ARM_FEATURE_GENERIC_TIMER)) {
		if (vmm_devtree_read_u32(vcpu->node,
					 "gentimer_phys_irq",
					 &phys_timer_irq)) {
			phys_timer_irq = 0;
		}
		if (vmm_devtree_read_u32(vcpu->node,
					 "gentimer_virt_irq",
					 &virt_timer_irq)) {
			virt_timer_irq = 0;
		}
		rc = generic_timer_vcpu_context_init(vcpu,
						&arm_gentimer_context(vcpu),
						phys_timer_irq,
						virt_timer_irq);
		if (rc) {
			goto fail_gentimer_init;
		}
	}

	rc = VMM_OK;
	goto done;

fail_gentimer_init:
	if (!vcpu->reset_count) {
		cpu_vcpu_vfp_deinit(vcpu);
	}
fail_vfp_init:
	if (!vcpu->reset_count) {
		cpu_vcpu_sysregs_deinit(vcpu);
	}
fail_sysregs_init:
	if (!vcpu->reset_count) {
		vmm_free(vcpu->arch_priv);
		vcpu->arch_priv = NULL;
	}

done:
	msr(cptr_el2, saved_cptr_el2);
	msr(hstr_el2, saved_hstr_el2);
	return rc;
}