Esempio n. 1
0
File: vpmu.c Progetto: Fantu/Xen
static void pvpmu_finish(struct domain *d, xen_pmu_params_t *params)
{
    struct vcpu *v;
    struct vpmu_struct *vpmu;
    uint64_t mfn;
    void *xenpmu_data;

    if ( (params->vcpu >= d->max_vcpus) || (d->vcpu[params->vcpu] == NULL) )
        return;

    v = d->vcpu[params->vcpu];
    if ( v != current )
        vcpu_pause(v);

    vpmu = vcpu_vpmu(v);
    spin_lock(&vpmu->vpmu_lock);

    vpmu_destroy(v);
    xenpmu_data = vpmu->xenpmu_data;
    vpmu->xenpmu_data = NULL;

    spin_unlock(&vpmu->vpmu_lock);

    if ( xenpmu_data )
    {
        mfn = domain_page_map_to_mfn(xenpmu_data);
        ASSERT(mfn_valid(mfn));
        unmap_domain_page_global(xenpmu_data);
        put_page_and_type(mfn_to_page(mfn));
    }

    if ( v != current )
        vcpu_unpause(v);
}
Esempio n. 2
0
File: mem_event.c Progetto: CPFL/xen
static int mem_event_disable(struct domain *d, struct mem_event_domain *med)
{
    if ( med->ring_page )
    {
        struct vcpu *v;

        mem_event_ring_lock(med);

        if ( !list_empty(&med->wq.list) )
        {
            mem_event_ring_unlock(med);
            return -EBUSY;
        }

        /* Free domU's event channel and leave the other one unbound */
        free_xen_event_channel(d, med->xen_port);

        /* Unblock all vCPUs */
        for_each_vcpu ( d, v )
        {
            if ( test_and_clear_bit(med->pause_flag, &v->pause_flags) )
            {
                vcpu_unpause(v);
                med->blocked--;
            }
        }

        destroy_ring_for_helper(&med->ring_page,
                                med->ring_pg_struct);
        mem_event_ring_unlock(med);
    }

    return 0;
}
Esempio n. 3
0
void vcpu_reset(struct vcpu *v)
{
    struct domain *d = v->domain;

    vcpu_pause(v);
    domain_lock(d);

    arch_vcpu_reset(v);

    set_bit(_VPF_down, &v->pause_flags);

    clear_bit(v->vcpu_id, d->poll_mask);
    v->poll_evtchn = 0;

    v->fpu_initialised = 0;
    v->fpu_dirtied     = 0;
    v->is_initialised  = 0;
#ifdef VCPU_TRAP_LAST
    v->async_exception_mask = 0;
    memset(v->async_exception_state, 0, sizeof(v->async_exception_state));
#endif
    cpus_clear(v->cpu_affinity_tmp);
    clear_bit(_VPF_blocked, &v->pause_flags);

    domain_unlock(v->domain);
    vcpu_unpause(v);
}
Esempio n. 4
0
void domain_resume(struct domain *d)
{
    struct vcpu *v;

    /*
     * Some code paths assume that shutdown status does not get reset under
     * their feet (e.g., some assertions make this assumption).
     */
    domain_pause(d);

    spin_lock(&d->shutdown_lock);

    d->is_shutting_down = d->is_shut_down = 0;
    d->shutdown_code = -1;

    for_each_vcpu ( d, v )
    {
        if ( v->paused_for_shutdown )
            vcpu_unpause(v);
        v->paused_for_shutdown = 0;
    }

    spin_unlock(&d->shutdown_lock);

    domain_unpause(d);
}
Esempio n. 5
0
File: mem_event.c Progetto: CPFL/xen
/*
 * mem_event_wake_blocked() will wakeup vcpus waiting for room in the
 * ring. These vCPUs were paused on their way out after placing an event,
 * but need to be resumed where the ring is capable of processing at least
 * one event from them.
 */
static void mem_event_wake_blocked(struct domain *d, struct mem_event_domain *med)
{
    struct vcpu *v;
    int online = d->max_vcpus;
    unsigned int avail_req = mem_event_ring_available(med);

    if ( avail_req == 0 || med->blocked == 0 )
        return;

    /*
     * We ensure that we only have vCPUs online if there are enough free slots
     * for their memory events to be processed.  This will ensure that no
     * memory events are lost (due to the fact that certain types of events
     * cannot be replayed, we need to ensure that there is space in the ring
     * for when they are hit).
     * See comment below in mem_event_put_request().
     */
    for_each_vcpu ( d, v )
        if ( test_bit(med->pause_flag, &v->pause_flags) )
            online--;

    ASSERT(online == (d->max_vcpus - med->blocked));

    /* We remember which vcpu last woke up to avoid scanning always linearly
     * from zero and starving higher-numbered vcpus under high load */
    if ( d->vcpu )
    {
        int i, j, k;

        for (i = med->last_vcpu_wake_up + 1, j = 0; j < d->max_vcpus; i++, j++)
        {
            k = i % d->max_vcpus;
            v = d->vcpu[k];
            if ( !v )
                continue;

            if ( !(med->blocked) || online >= avail_req )
               break;

            if ( test_and_clear_bit(med->pause_flag, &v->pause_flags) )
            {
                vcpu_unpause(v);
                online++;
                med->blocked--;
                med->last_vcpu_wake_up = k;
            }
        }
    }
}
Esempio n. 6
0
static void thaw_domains(void)
{
    struct domain *d;
    struct vcpu *v;

    rcu_read_lock(&domlist_read_lock);
    for_each_domain ( d )
    {
        switch ( d->domain_id )
        {
        case 0:
            for_each_vcpu ( d, v )
                if ( v != current )
                    vcpu_unpause(v);
            break;
        default:
            domain_unpause(d);
            break;
        }
    }
    rcu_read_unlock(&domlist_read_lock);
}
Esempio n. 7
0
void vmx_vmcs_exit(struct vcpu *v)
{
    struct foreign_vmcs *fv;

    if ( likely(v == current) )
        return;

    fv = &this_cpu(foreign_vmcs);
    BUG_ON(fv->v != v);
    BUG_ON(fv->count == 0);

    if ( --fv->count == 0 )
    {
        /* Don't confuse vmx_do_resume (for @v or @current!) */
        vmx_clear_vmcs(v);
        if ( is_hvm_vcpu(current) )
            vmx_load_vmcs(current);

        spin_unlock(&v->arch.hvm_vmx.vmcs_lock);
        vcpu_unpause(v);

        fv->v = NULL;
    }
}