static void continue_new_vcpu(struct vcpu *prev) { schedule_tail(prev); if ( is_idle_vcpu(current) ) reset_stack_and_jump(idle_loop); else if ( is_32bit_domain(current->domain) ) /* check_wakeup_from_wait(); */ reset_stack_and_jump(return_to_new_vcpu32); else /* check_wakeup_from_wait(); */ reset_stack_and_jump(return_to_new_vcpu64); }
/* * Initialise VCPU state. The context can be supplied by either the * toolstack (XEN_DOMCTL_setvcpucontext) or the guest * (VCPUOP_initialise) and therefore must be properly validated. */ int arch_set_info_guest( struct vcpu *v, vcpu_guest_context_u c) { struct vcpu_guest_context *ctxt = c.nat; struct vcpu_guest_core_regs *regs = &c.nat->user_regs; if ( is_32bit_domain(v->domain) ) { if ( !is_guest_pv32_psr(regs->cpsr) ) return -EINVAL; if ( regs->spsr_svc && !is_guest_pv32_psr(regs->spsr_svc) ) return -EINVAL; if ( regs->spsr_abt && !is_guest_pv32_psr(regs->spsr_abt) ) return -EINVAL; if ( regs->spsr_und && !is_guest_pv32_psr(regs->spsr_und) ) return -EINVAL; if ( regs->spsr_irq && !is_guest_pv32_psr(regs->spsr_irq) ) return -EINVAL; if ( regs->spsr_fiq && !is_guest_pv32_psr(regs->spsr_fiq) ) return -EINVAL; } #ifdef CONFIG_ARM_64 else { if ( !is_guest_pv64_psr(regs->cpsr) ) return -EINVAL; if ( regs->spsr_el1 && !is_guest_pv64_psr(regs->spsr_el1) ) return -EINVAL; } #endif vcpu_regs_user_to_hyp(v, regs); v->arch.sctlr = ctxt->sctlr; v->arch.ttbr0 = ctxt->ttbr0; v->arch.ttbr1 = ctxt->ttbr1; v->arch.ttbcr = ctxt->ttbcr; v->is_initialised = 1; if ( ctxt->flags & VGCF_online ) clear_bit(_VPF_down, &v->pause_flags); else set_bit(_VPF_down, &v->pause_flags); return 0; }
void p2m_restore_state(struct vcpu *n) { register_t hcr; hcr = READ_SYSREG(HCR_EL2); WRITE_SYSREG(hcr & ~HCR_VM, HCR_EL2); isb(); p2m_load_VTTBR(n->domain); isb(); if ( is_32bit_domain(n->domain) ) hcr &= ~HCR_RW; else hcr |= HCR_RW; WRITE_SYSREG(n->arch.sctlr, SCTLR_EL1); isb(); WRITE_SYSREG(hcr, HCR_EL2); isb(); }
static void ctxt_switch_from(struct vcpu *p) { p2m_save_state(p); /* CP 15 */ p->arch.csselr = READ_SYSREG(CSSELR_EL1); /* Control Registers */ p->arch.cpacr = READ_SYSREG(CPACR_EL1); p->arch.contextidr = READ_SYSREG(CONTEXTIDR_EL1); p->arch.tpidr_el0 = READ_SYSREG(TPIDR_EL0); p->arch.tpidrro_el0 = READ_SYSREG(TPIDRRO_EL0); p->arch.tpidr_el1 = READ_SYSREG(TPIDR_EL1); /* Arch timer */ p->arch.cntkctl = READ_SYSREG32(CNTKCTL_EL1); virt_timer_save(p); if ( is_32bit_domain(p->domain) && cpu_has_thumbee ) { p->arch.teecr = READ_SYSREG32(TEECR32_EL1); p->arch.teehbr = READ_SYSREG32(TEEHBR32_EL1); } #ifdef CONFIG_ARM_32 p->arch.joscr = READ_CP32(JOSCR); p->arch.jmcr = READ_CP32(JMCR); #endif isb(); /* MMU */ p->arch.vbar = READ_SYSREG(VBAR_EL1); p->arch.ttbcr = READ_SYSREG(TCR_EL1); p->arch.ttbr0 = READ_SYSREG64(TTBR0_EL1); p->arch.ttbr1 = READ_SYSREG64(TTBR1_EL1); if ( is_32bit_domain(p->domain) ) p->arch.dacr = READ_SYSREG(DACR32_EL2); p->arch.par = READ_SYSREG64(PAR_EL1); #if defined(CONFIG_ARM_32) p->arch.mair0 = READ_CP32(MAIR0); p->arch.mair1 = READ_CP32(MAIR1); p->arch.amair0 = READ_CP32(AMAIR0); p->arch.amair1 = READ_CP32(AMAIR1); #else p->arch.mair = READ_SYSREG64(MAIR_EL1); p->arch.amair = READ_SYSREG64(AMAIR_EL1); #endif /* Fault Status */ #if defined(CONFIG_ARM_32) p->arch.dfar = READ_CP32(DFAR); p->arch.ifar = READ_CP32(IFAR); p->arch.dfsr = READ_CP32(DFSR); #elif defined(CONFIG_ARM_64) p->arch.far = READ_SYSREG64(FAR_EL1); p->arch.esr = READ_SYSREG64(ESR_EL1); #endif if ( is_32bit_domain(p->domain) ) p->arch.ifsr = READ_SYSREG(IFSR32_EL2); p->arch.afsr0 = READ_SYSREG(AFSR0_EL1); p->arch.afsr1 = READ_SYSREG(AFSR1_EL1); /* XXX MPU */ /* VFP */ vfp_save_state(p); /* VGIC */ gic_save_state(p); isb(); context_saved(p); }
static void ctxt_switch_to(struct vcpu *n) { p2m_restore_state(n); WRITE_SYSREG32(n->domain->arch.vpidr, VPIDR_EL2); WRITE_SYSREG(n->arch.vmpidr, VMPIDR_EL2); /* VGIC */ gic_restore_state(n); /* VFP */ vfp_restore_state(n); /* XXX MPU */ /* Fault Status */ #if defined(CONFIG_ARM_32) WRITE_CP32(n->arch.dfar, DFAR); WRITE_CP32(n->arch.ifar, IFAR); WRITE_CP32(n->arch.dfsr, DFSR); #elif defined(CONFIG_ARM_64) WRITE_SYSREG64(n->arch.far, FAR_EL1); WRITE_SYSREG64(n->arch.esr, ESR_EL1); #endif if ( is_32bit_domain(n->domain) ) WRITE_SYSREG(n->arch.ifsr, IFSR32_EL2); WRITE_SYSREG(n->arch.afsr0, AFSR0_EL1); WRITE_SYSREG(n->arch.afsr1, AFSR1_EL1); /* MMU */ WRITE_SYSREG(n->arch.vbar, VBAR_EL1); WRITE_SYSREG(n->arch.ttbcr, TCR_EL1); WRITE_SYSREG64(n->arch.ttbr0, TTBR0_EL1); WRITE_SYSREG64(n->arch.ttbr1, TTBR1_EL1); if ( is_32bit_domain(n->domain) ) WRITE_SYSREG(n->arch.dacr, DACR32_EL2); WRITE_SYSREG64(n->arch.par, PAR_EL1); #if defined(CONFIG_ARM_32) WRITE_CP32(n->arch.mair0, MAIR0); WRITE_CP32(n->arch.mair1, MAIR1); WRITE_CP32(n->arch.amair0, AMAIR0); WRITE_CP32(n->arch.amair1, AMAIR1); #elif defined(CONFIG_ARM_64) WRITE_SYSREG64(n->arch.mair, MAIR_EL1); WRITE_SYSREG64(n->arch.amair, AMAIR_EL1); #endif isb(); /* Control Registers */ WRITE_SYSREG(n->arch.cpacr, CPACR_EL1); WRITE_SYSREG(n->arch.contextidr, CONTEXTIDR_EL1); WRITE_SYSREG(n->arch.tpidr_el0, TPIDR_EL0); WRITE_SYSREG(n->arch.tpidrro_el0, TPIDRRO_EL0); WRITE_SYSREG(n->arch.tpidr_el1, TPIDR_EL1); if ( is_32bit_domain(n->domain) && cpu_has_thumbee ) { WRITE_SYSREG32(n->arch.teecr, TEECR32_EL1); WRITE_SYSREG32(n->arch.teehbr, TEEHBR32_EL1); } #ifdef CONFIG_ARM_32 WRITE_CP32(n->arch.joscr, JOSCR); WRITE_CP32(n->arch.jmcr, JMCR); #endif isb(); /* CP 15 */ WRITE_SYSREG(n->arch.csselr, CSSELR_EL1); isb(); /* This is could trigger an hardware interrupt from the virtual * timer. The interrupt needs to be injected into the guest. */ WRITE_SYSREG32(n->arch.cntkctl, CNTKCTL_EL1); virt_timer_restore(n); }
unsigned long hypercall_create_continuation( unsigned int op, const char *format, ...) { struct mc_state *mcs = ¤t->mc_state; struct cpu_user_regs *regs; const char *p = format; unsigned long arg, rc; unsigned int i; va_list args; /* All hypercalls take at least one argument */ BUG_ON( !p || *p == '\0' ); va_start(args, format); if ( test_bit(_MCSF_in_multicall, &mcs->flags) ) { BUG(); /* XXX multicalls not implemented yet. */ __set_bit(_MCSF_call_preempted, &mcs->flags); for ( i = 0; *p != '\0'; i++ ) mcs->call.args[i] = next_arg(p, args); /* Return value gets written back to mcs->call.result */ rc = mcs->call.result; } else { regs = guest_cpu_user_regs(); /* Ensure the hypercall trap instruction is re-executed. */ regs->pc -= 4; /* re-execute 'hvc #XEN_HYPERCALL_TAG' */ #ifdef CONFIG_ARM_64 if ( !is_32bit_domain(current->domain) ) { regs->x16 = op; for ( i = 0; *p != '\0'; i++ ) { arg = next_arg(p, args); switch ( i ) { case 0: regs->x0 = arg; break; case 1: regs->x1 = arg; break; case 2: regs->x2 = arg; break; case 3: regs->x3 = arg; break; case 4: regs->x4 = arg; break; case 5: regs->x5 = arg; break; } } /* Return value gets written back to x0 */ rc = regs->x0; } else #endif { regs->r12 = op; for ( i = 0; *p != '\0'; i++ ) { arg = next_arg(p, args); switch ( i ) { case 0: regs->r0 = arg; break; case 1: regs->r1 = arg; break; case 2: regs->r2 = arg; break; case 3: regs->r3 = arg; break; case 4: regs->r4 = arg; break; case 5: regs->r5 = arg; break; } } /* Return value gets written back to r0 */ rc = regs->r0; } } va_end(args); return rc; }
static void ctxt_switch_to(struct vcpu *n) { /* When the idle VCPU is running, Xen will always stay in hypervisor * mode. Therefore we don't need to restore the context of an idle VCPU. */ if ( is_idle_vcpu(n) ) return; p2m_restore_state(n); WRITE_SYSREG32(n->domain->arch.vpidr, VPIDR_EL2); WRITE_SYSREG(n->arch.vmpidr, VMPIDR_EL2); /* VGIC */ gic_restore_state(n); /* VFP */ vfp_restore_state(n); /* XXX MPU */ /* Fault Status */ #if defined(CONFIG_ARM_32) WRITE_CP32(n->arch.dfar, DFAR); WRITE_CP32(n->arch.ifar, IFAR); WRITE_CP32(n->arch.dfsr, DFSR); #elif defined(CONFIG_ARM_64) WRITE_SYSREG64(n->arch.far, FAR_EL1); WRITE_SYSREG64(n->arch.esr, ESR_EL1); #endif if ( is_32bit_domain(n->domain) ) WRITE_SYSREG(n->arch.ifsr, IFSR32_EL2); WRITE_SYSREG(n->arch.afsr0, AFSR0_EL1); WRITE_SYSREG(n->arch.afsr1, AFSR1_EL1); /* MMU */ WRITE_SYSREG(n->arch.vbar, VBAR_EL1); WRITE_SYSREG(n->arch.ttbcr, TCR_EL1); WRITE_SYSREG64(n->arch.ttbr0, TTBR0_EL1); WRITE_SYSREG64(n->arch.ttbr1, TTBR1_EL1); /* * Erratum #852523: DACR32_EL2 must be restored before one of the * following sysregs: SCTLR_EL1, TCR_EL1, TTBR0_EL1, TTBR1_EL1 or * CONTEXTIDR_EL1. */ if ( is_32bit_domain(n->domain) ) WRITE_SYSREG(n->arch.dacr, DACR32_EL2); WRITE_SYSREG64(n->arch.par, PAR_EL1); #if defined(CONFIG_ARM_32) WRITE_CP32(n->arch.mair0, MAIR0); WRITE_CP32(n->arch.mair1, MAIR1); WRITE_CP32(n->arch.amair0, AMAIR0); WRITE_CP32(n->arch.amair1, AMAIR1); #elif defined(CONFIG_ARM_64) WRITE_SYSREG64(n->arch.mair, MAIR_EL1); WRITE_SYSREG64(n->arch.amair, AMAIR_EL1); #endif isb(); /* Control Registers */ WRITE_SYSREG(n->arch.cpacr, CPACR_EL1); /* * This write to sysreg CONTEXTIDR_EL1 ensures we don't hit erratum * #852523. I.e DACR32_EL2 is not correctly synchronized. */ WRITE_SYSREG(n->arch.contextidr, CONTEXTIDR_EL1); WRITE_SYSREG(n->arch.tpidr_el0, TPIDR_EL0); WRITE_SYSREG(n->arch.tpidrro_el0, TPIDRRO_EL0); WRITE_SYSREG(n->arch.tpidr_el1, TPIDR_EL1); if ( is_32bit_domain(n->domain) && cpu_has_thumbee ) { WRITE_SYSREG32(n->arch.teecr, TEECR32_EL1); WRITE_SYSREG32(n->arch.teehbr, TEEHBR32_EL1); } #ifdef CONFIG_ARM_32 WRITE_CP32(n->arch.joscr, JOSCR); WRITE_CP32(n->arch.jmcr, JMCR); #endif isb(); /* CP 15 */ WRITE_SYSREG(n->arch.csselr, CSSELR_EL1); isb(); /* This is could trigger an hardware interrupt from the virtual * timer. The interrupt needs to be injected into the guest. */ WRITE_SYSREG32(n->arch.cntkctl, CNTKCTL_EL1); virt_timer_restore(n); }
static int do_common_cpu_on(register_t target_cpu, register_t entry_point, register_t context_id,int ver) { struct vcpu *v; struct domain *d = current->domain; struct vcpu_guest_context *ctxt; int rc; int is_thumb = entry_point & 1; register_t vcpuid; vcpuid = vaffinity_to_vcpuid(target_cpu); if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL ) return PSCI_INVALID_PARAMETERS; /* THUMB set is not allowed with 64-bit domain */ if ( is_64bit_domain(d) && is_thumb ) return PSCI_INVALID_PARAMETERS; if( ( ver == XEN_PSCI_V_0_2 ) && ( !test_bit(_VPF_down, &v->pause_flags) ) ) return PSCI_ALREADY_ON; if ( (ctxt = alloc_vcpu_guest_context()) == NULL ) return PSCI_DENIED; vgic_clear_pending_irqs(v); memset(ctxt, 0, sizeof(*ctxt)); ctxt->user_regs.pc64 = (u64) entry_point; ctxt->sctlr = SCTLR_GUEST_INIT; ctxt->ttbr0 = 0; ctxt->ttbr1 = 0; ctxt->ttbcr = 0; /* Defined Reset Value */ if ( is_32bit_domain(d) ) { ctxt->user_regs.cpsr = PSR_GUEST32_INIT; if( ver == XEN_PSCI_V_0_2 ) ctxt->user_regs.r0_usr = context_id; } #ifdef CONFIG_ARM_64 else { ctxt->user_regs.cpsr = PSR_GUEST64_INIT; if( ver == XEN_PSCI_V_0_2 ) ctxt->user_regs.x0 = context_id; } #endif /* Start the VCPU with THUMB set if it's requested by the kernel */ if ( is_thumb ) ctxt->user_regs.cpsr |= PSR_THUMB; ctxt->flags = VGCF_online; domain_lock(d); rc = arch_set_info_guest(v, ctxt); free_vcpu_guest_context(ctxt); if ( rc < 0 ) { domain_unlock(d); return PSCI_DENIED; } domain_unlock(d); vcpu_wake(v); return PSCI_SUCCESS; }