void vcpu_reset(struct vcpu *v) { struct domain *d = v->domain; vcpu_pause(v); domain_lock(d); arch_vcpu_reset(v); set_bit(_VPF_down, &v->pause_flags); clear_bit(v->vcpu_id, d->poll_mask); v->poll_evtchn = 0; v->fpu_initialised = 0; v->fpu_dirtied = 0; v->is_initialised = 0; #ifdef VCPU_TRAP_LAST v->async_exception_mask = 0; memset(v->async_exception_state, 0, sizeof(v->async_exception_state)); #endif cpus_clear(v->cpu_affinity_tmp); clear_bit(_VPF_blocked, &v->pause_flags); domain_unlock(v->domain); vcpu_unpause(v); }
int do_psci_cpu_on(uint32_t vcpuid, register_t entry_point) { struct vcpu *v; struct domain *d = current->domain; struct vcpu_guest_context *ctxt; int rc; int is_thumb = entry_point & 1; if ( (vcpuid < 0) || (vcpuid >= MAX_VIRT_CPUS) ) return PSCI_EINVAL; if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL ) return PSCI_EINVAL; /* THUMB set is not allowed with 64-bit domain */ if ( is_pv64_domain(d) && is_thumb ) return PSCI_EINVAL; if ( (ctxt = alloc_vcpu_guest_context()) == NULL ) return PSCI_DENIED; vgic_clear_pending_irqs(v); memset(ctxt, 0, sizeof(*ctxt)); ctxt->user_regs.pc64 = (u64) entry_point; ctxt->sctlr = SCTLR_GUEST_INIT; ctxt->ttbr0 = 0; ctxt->ttbr1 = 0; ctxt->ttbcr = 0; /* Defined Reset Value */ if ( is_pv32_domain(d) ) ctxt->user_regs.cpsr = PSR_GUEST32_INIT; #ifdef CONFIG_ARM_64 else ctxt->user_regs.cpsr = PSR_GUEST64_INIT; #endif /* Start the VCPU with THUMB set if it's requested by the kernel */ if ( is_thumb ) ctxt->user_regs.cpsr |= PSR_THUMB; ctxt->flags = VGCF_online; domain_lock(d); rc = arch_set_info_guest(v, ctxt); free_vcpu_guest_context(ctxt); if ( rc < 0 ) { domain_unlock(d); return PSCI_DENIED; } domain_unlock(d); vcpu_wake(v); return PSCI_SUCCESS; }
int do_psci_cpu_on(uint32_t vcpuid, register_t entry_point) { struct vcpu *v; struct domain *d = current->domain; struct vcpu_guest_context *ctxt; int rc; if ( (vcpuid < 0) || (vcpuid >= MAX_VIRT_CPUS) ) return PSCI_EINVAL; if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL ) return PSCI_EINVAL; if ( (ctxt = alloc_vcpu_guest_context()) == NULL ) return PSCI_DENIED; vgic_clear_pending_irqs(v); memset(ctxt, 0, sizeof(*ctxt)); ctxt->user_regs.pc64 = (u64) entry_point; ctxt->sctlr = SCTLR_BASE; ctxt->ttbr0 = 0; ctxt->ttbr1 = 0; ctxt->ttbcr = 0; /* Defined Reset Value */ ctxt->user_regs.cpsr = PSR_GUEST_INIT; ctxt->flags = VGCF_online; domain_lock(d); rc = arch_set_info_guest(v, ctxt); free_vcpu_guest_context(ctxt); if ( rc < 0 ) { domain_unlock(d); return PSCI_DENIED; } domain_unlock(d); vcpu_wake(v); return PSCI_SUCCESS; }
static int xenmem_add_to_physmap_one( struct domain *d, uint16_t space, domid_t foreign_domid, unsigned long idx, xen_pfn_t gpfn) { unsigned long mfn = 0; int rc; switch ( space ) { case XENMAPSPACE_grant_table: spin_lock(&d->grant_table->lock); if ( d->grant_table->gt_version == 0 ) d->grant_table->gt_version = 1; if ( d->grant_table->gt_version == 2 && (idx & XENMAPIDX_grant_table_status) ) { idx &= ~XENMAPIDX_grant_table_status; if ( idx < nr_status_frames(d->grant_table) ) mfn = virt_to_mfn(d->grant_table->status[idx]); } else { if ( (idx >= nr_grant_frames(d->grant_table)) && (idx < max_nr_grant_frames) ) gnttab_grow_table(d, idx + 1); if ( idx < nr_grant_frames(d->grant_table) ) mfn = virt_to_mfn(d->grant_table->shared_raw[idx]); } d->arch.grant_table_gpfn[idx] = gpfn; spin_unlock(&d->grant_table->lock); break; case XENMAPSPACE_shared_info: if ( idx == 0 ) mfn = virt_to_mfn(d->shared_info); break; case XENMAPSPACE_gmfn_foreign: { paddr_t maddr; struct domain *od; rc = rcu_lock_target_domain_by_id(foreign_domid, &od); if ( rc < 0 ) return rc; maddr = p2m_lookup(od, idx << PAGE_SHIFT); if ( maddr == INVALID_PADDR ) { dump_p2m_lookup(od, idx << PAGE_SHIFT); rcu_unlock_domain(od); return -EINVAL; } mfn = maddr >> PAGE_SHIFT; rcu_unlock_domain(od); break; } default: return -ENOSYS; } domain_lock(d); /* Map at new location. */ rc = guest_physmap_add_page(d, gpfn, mfn, 0); domain_unlock(d); return rc; }
static int do_common_cpu_on(register_t target_cpu, register_t entry_point, register_t context_id,int ver) { struct vcpu *v; struct domain *d = current->domain; struct vcpu_guest_context *ctxt; int rc; int is_thumb = entry_point & 1; register_t vcpuid; vcpuid = vaffinity_to_vcpuid(target_cpu); if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL ) return PSCI_INVALID_PARAMETERS; /* THUMB set is not allowed with 64-bit domain */ if ( is_64bit_domain(d) && is_thumb ) return PSCI_INVALID_PARAMETERS; if( ( ver == XEN_PSCI_V_0_2 ) && ( !test_bit(_VPF_down, &v->pause_flags) ) ) return PSCI_ALREADY_ON; if ( (ctxt = alloc_vcpu_guest_context()) == NULL ) return PSCI_DENIED; vgic_clear_pending_irqs(v); memset(ctxt, 0, sizeof(*ctxt)); ctxt->user_regs.pc64 = (u64) entry_point; ctxt->sctlr = SCTLR_GUEST_INIT; ctxt->ttbr0 = 0; ctxt->ttbr1 = 0; ctxt->ttbcr = 0; /* Defined Reset Value */ if ( is_32bit_domain(d) ) { ctxt->user_regs.cpsr = PSR_GUEST32_INIT; if( ver == XEN_PSCI_V_0_2 ) ctxt->user_regs.r0_usr = context_id; } #ifdef CONFIG_ARM_64 else { ctxt->user_regs.cpsr = PSR_GUEST64_INIT; if( ver == XEN_PSCI_V_0_2 ) ctxt->user_regs.x0 = context_id; } #endif /* Start the VCPU with THUMB set if it's requested by the kernel */ if ( is_thumb ) ctxt->user_regs.cpsr |= PSR_THUMB; ctxt->flags = VGCF_online; domain_lock(d); rc = arch_set_info_guest(v, ctxt); free_vcpu_guest_context(ctxt); if ( rc < 0 ) { domain_unlock(d); return PSCI_DENIED; } domain_unlock(d); vcpu_wake(v); return PSCI_SUCCESS; }