void vmx_vmcs_enter(struct vcpu *v) { struct foreign_vmcs *fv; /* * NB. We must *always* run an HVM VCPU on its own VMCS, except for * vmx_vmcs_enter/exit critical regions. */ if ( likely(v == current) ) return; fv = &this_cpu(foreign_vmcs); if ( fv->v == v ) { BUG_ON(fv->count == 0); } else { BUG_ON(fv->v != NULL); BUG_ON(fv->count != 0); vcpu_pause(v); spin_lock(&v->arch.hvm_vmx.vmcs_lock); vmx_clear_vmcs(v); vmx_load_vmcs(v); fv->v = v; } fv->count++; }
static void pvpmu_finish(struct domain *d, xen_pmu_params_t *params) { struct vcpu *v; struct vpmu_struct *vpmu; uint64_t mfn; void *xenpmu_data; if ( (params->vcpu >= d->max_vcpus) || (d->vcpu[params->vcpu] == NULL) ) return; v = d->vcpu[params->vcpu]; if ( v != current ) vcpu_pause(v); vpmu = vcpu_vpmu(v); spin_lock(&vpmu->vpmu_lock); vpmu_destroy(v); xenpmu_data = vpmu->xenpmu_data; vpmu->xenpmu_data = NULL; spin_unlock(&vpmu->vpmu_lock); if ( xenpmu_data ) { mfn = domain_page_map_to_mfn(xenpmu_data); ASSERT(mfn_valid(mfn)); unmap_domain_page_global(xenpmu_data); put_page_and_type(mfn_to_page(mfn)); } if ( v != current ) vcpu_unpause(v); }
void vcpu_reset(struct vcpu *v) { struct domain *d = v->domain; vcpu_pause(v); domain_lock(d); arch_vcpu_reset(v); set_bit(_VPF_down, &v->pause_flags); clear_bit(v->vcpu_id, d->poll_mask); v->poll_evtchn = 0; v->fpu_initialised = 0; v->fpu_dirtied = 0; v->is_initialised = 0; #ifdef VCPU_TRAP_LAST v->async_exception_mask = 0; memset(v->async_exception_state, 0, sizeof(v->async_exception_state)); #endif cpus_clear(v->cpu_affinity_tmp); clear_bit(_VPF_blocked, &v->pause_flags); domain_unlock(v->domain); vcpu_unpause(v); }
static void freeze_domains(void) { struct domain *d; struct vcpu *v; rcu_read_lock(&domlist_read_lock); for_each_domain ( d ) { switch ( d->domain_id ) { case 0: for_each_vcpu ( d, v ) if ( v != current ) vcpu_pause(v); break; default: domain_pause(d); break; } } rcu_read_unlock(&domlist_read_lock); }