Esempio n. 1
0
File: mem_event.c Progetto: CPFL/xen
static int mem_event_grab_slot(struct mem_event_domain *med, int foreign)
{
    unsigned int avail_req;

    if ( !med->ring_page )
        return -ENOSYS;

    mem_event_ring_lock(med);

    avail_req = mem_event_ring_available(med);
    if ( avail_req == 0 )
    {
        mem_event_ring_unlock(med);
        return -EBUSY;
    }

    if ( !foreign )
        med->target_producers++;
    else
        med->foreign_producers++;

    mem_event_ring_unlock(med);

    return 0;
}
Esempio n. 2
0
File: mem_event.c Progetto: CPFL/xen
int mem_event_get_response(struct domain *d, struct mem_event_domain *med, mem_event_response_t *rsp)
{
    mem_event_front_ring_t *front_ring;
    RING_IDX rsp_cons;

    mem_event_ring_lock(med);

    front_ring = &med->front_ring;
    rsp_cons = front_ring->rsp_cons;

    if ( !RING_HAS_UNCONSUMED_RESPONSES(front_ring) )
    {
        mem_event_ring_unlock(med);
        return 0;
    }

    /* Copy response */
    memcpy(rsp, RING_GET_RESPONSE(front_ring, rsp_cons), sizeof(*rsp));
    rsp_cons++;

    /* Update ring */
    front_ring->rsp_cons = rsp_cons;
    front_ring->sring->rsp_event = rsp_cons + 1;

    /* Kick any waiters -- since we've just consumed an event,
     * there may be additional space available in the ring. */
    mem_event_wake(d, med);

    mem_event_ring_unlock(med);

    return 1;
}
Esempio n. 3
0
File: mem_event.c Progetto: CPFL/xen
static int mem_event_disable(struct domain *d, struct mem_event_domain *med)
{
    if ( med->ring_page )
    {
        struct vcpu *v;

        mem_event_ring_lock(med);

        if ( !list_empty(&med->wq.list) )
        {
            mem_event_ring_unlock(med);
            return -EBUSY;
        }

        /* Free domU's event channel and leave the other one unbound */
        free_xen_event_channel(d, med->xen_port);

        /* Unblock all vCPUs */
        for_each_vcpu ( d, v )
        {
            if ( test_and_clear_bit(med->pause_flag, &v->pause_flags) )
            {
                vcpu_unpause(v);
                med->blocked--;
            }
        }

        destroy_ring_for_helper(&med->ring_page,
                                med->ring_pg_struct);
        mem_event_ring_unlock(med);
    }

    return 0;
}
int mem_event_check_ring(struct domain *d)
{
    struct vcpu *curr = current;
    int free_requests;
    int ring_full;

    if ( !d->mem_event.ring_page )
        return -1;

    mem_event_ring_lock(d);

    free_requests = RING_FREE_REQUESTS(&d->mem_event.front_ring);
    if ( unlikely(free_requests < 2) )
    {
        gdprintk(XENLOG_INFO, "free request slots: %d\n", free_requests);
        WARN_ON(free_requests == 0);
    }
    ring_full = free_requests < MEM_EVENT_RING_THRESHOLD ? 1 : 0;

    if ( (curr->domain->domain_id == d->domain_id) && ring_full )
    {
        set_bit(_VPF_mem_event, &curr->pause_flags);
        vcpu_sleep_nosync(curr);
    }

    mem_event_ring_unlock(d);

    return ring_full;
}
Esempio n. 5
0
File: mem_event.c Progetto: CPFL/xen
/*
 * This must be preceded by a call to claim_slot(), and is guaranteed to
 * succeed.  As a side-effect however, the vCPU may be paused if the ring is
 * overly full and its continued execution would cause stalling and excessive
 * waiting.  The vCPU will be automatically unpaused when the ring clears.
 */
void mem_event_put_request(struct domain *d,
                           struct mem_event_domain *med,
                           mem_event_request_t *req)
{
    mem_event_front_ring_t *front_ring;
    int free_req;
    unsigned int avail_req;
    RING_IDX req_prod;

    if ( current->domain != d )
    {
        req->flags |= MEM_EVENT_FLAG_FOREIGN;
#ifndef NDEBUG
        if ( !(req->flags & MEM_EVENT_FLAG_VCPU_PAUSED) )
            gdprintk(XENLOG_G_WARNING, "d%dv%d was not paused.\n",
                     d->domain_id, req->vcpu_id);
#endif
    }

    mem_event_ring_lock(med);

    /* Due to the reservations, this step must succeed. */
    front_ring = &med->front_ring;
    free_req = RING_FREE_REQUESTS(front_ring);
    ASSERT(free_req > 0);

    /* Copy request */
    req_prod = front_ring->req_prod_pvt;
    memcpy(RING_GET_REQUEST(front_ring, req_prod), req, sizeof(*req));
    req_prod++;

    /* Update ring */
    front_ring->req_prod_pvt = req_prod;
    RING_PUSH_REQUESTS(front_ring);

    /* We've actually *used* our reservation, so release the slot. */
    mem_event_release_slot(d, med);

    /* Give this vCPU a black eye if necessary, on the way out.
     * See the comments above wake_blocked() for more information
     * on how this mechanism works to avoid waiting. */
    avail_req = mem_event_ring_available(med);
    if( current->domain == d && avail_req < d->max_vcpus )
        mem_event_mark_and_pause(current, med);

    mem_event_ring_unlock(med);

    notify_via_xen_event_channel(d, med->xen_port);
}
void mem_event_get_response(struct domain *d, mem_event_response_t *rsp)
{
    mem_event_front_ring_t *front_ring;
    RING_IDX rsp_cons;

    mem_event_ring_lock(d);

    front_ring = &d->mem_event.front_ring;
    rsp_cons = front_ring->rsp_cons;

    /* Copy response */
    memcpy(rsp, RING_GET_RESPONSE(front_ring, rsp_cons), sizeof(*rsp));
    rsp_cons++;

    /* Update ring */
    front_ring->rsp_cons = rsp_cons;
    front_ring->sring->rsp_event = rsp_cons + 1;

    mem_event_ring_unlock(d);
}
void mem_event_put_request(struct domain *d, mem_event_request_t *req)
{
    mem_event_front_ring_t *front_ring;
    RING_IDX req_prod;

    mem_event_ring_lock(d);

    front_ring = &d->mem_event.front_ring;
    req_prod = front_ring->req_prod_pvt;

    /* Copy request */
    memcpy(RING_GET_REQUEST(front_ring, req_prod), req, sizeof(*req));
    req_prod++;

    /* Update ring */
    front_ring->req_prod_pvt = req_prod;
    RING_PUSH_REQUESTS(front_ring);

    mem_event_ring_unlock(d);

    notify_via_xen_event_channel(d, d->mem_event.xen_port);
}
Esempio n. 8
0
File: mem_event.c Progetto: CPFL/xen
static int mem_event_enable(
    struct domain *d,
    xen_domctl_mem_event_op_t *mec,
    struct mem_event_domain *med,
    int pause_flag,
    int param,
    xen_event_channel_notification_t notification_fn)
{
    int rc;
    unsigned long ring_gfn = d->arch.hvm_domain.params[param];

    /* Only one helper at a time. If the helper crashed,
     * the ring is in an undefined state and so is the guest.
     */
    if ( med->ring_page )
        return -EBUSY;

    /* The parameter defaults to zero, and it should be
     * set to something */
    if ( ring_gfn == 0 )
        return -ENOSYS;

    mem_event_ring_lock_init(med);
    mem_event_ring_lock(med);

    rc = prepare_ring_for_helper(d, ring_gfn, &med->ring_pg_struct,
                                    &med->ring_page);
    if ( rc < 0 )
        goto err;

    /* Set the number of currently blocked vCPUs to 0. */
    med->blocked = 0;

    /* Allocate event channel */
    rc = alloc_unbound_xen_event_channel(d, 0, current->domain->domain_id,
                                         notification_fn);
    if ( rc < 0 )
        goto err;

    med->xen_port = mec->port = rc;

    /* Prepare ring buffer */
    FRONT_RING_INIT(&med->front_ring,
                    (mem_event_sring_t *)med->ring_page,
                    PAGE_SIZE);

    /* Save the pause flag for this particular ring. */
    med->pause_flag = pause_flag;

    /* Initialize the last-chance wait queue. */
    init_waitqueue_head(&med->wq);

    mem_event_ring_unlock(med);
    return 0;

 err:
    destroy_ring_for_helper(&med->ring_page,
                            med->ring_pg_struct);
    mem_event_ring_unlock(med);

    return rc;
}
Esempio n. 9
0
File: mem_event.c Progetto: CPFL/xen
void mem_event_cancel_slot(struct domain *d, struct mem_event_domain *med)
{
    mem_event_ring_lock(med);
    mem_event_release_slot(d, med);
    mem_event_ring_unlock(med);
}