static int mem_event_enable(struct domain *d, mfn_t ring_mfn, mfn_t shared_mfn)
{
    int rc;

    /* Map ring and shared pages */
    d->mem_event.ring_page = map_domain_page(mfn_x(ring_mfn));
    if ( d->mem_event.ring_page == NULL )
        goto err;

    d->mem_event.shared_page = map_domain_page(mfn_x(shared_mfn));
    if ( d->mem_event.shared_page == NULL )
        goto err_ring;

    /* Allocate event channel */
    rc = alloc_unbound_xen_event_channel(d->vcpu[0],
                                         current->domain->domain_id);
    if ( rc < 0 )
        goto err_shared;

    ((mem_event_shared_page_t *)d->mem_event.shared_page)->port = rc;
    d->mem_event.xen_port = rc;

    /* Prepare ring buffer */
    FRONT_RING_INIT(&d->mem_event.front_ring,
                    (mem_event_sring_t *)d->mem_event.ring_page,
                    PAGE_SIZE);

    mem_event_ring_lock_init(d);

    /* Wake any VCPUs paused for memory events */
    mem_event_unpause_vcpus(d);

    return 0;

 err_shared:
    unmap_domain_page(d->mem_event.shared_page);
    d->mem_event.shared_page = NULL;
 err_ring:
    unmap_domain_page(d->mem_event.ring_page);
    d->mem_event.ring_page = NULL;
 err:
    return 1;
}
Esempio n. 2
0
File: mem_event.c Progetto: CPFL/xen
static int mem_event_enable(
    struct domain *d,
    xen_domctl_mem_event_op_t *mec,
    struct mem_event_domain *med,
    int pause_flag,
    int param,
    xen_event_channel_notification_t notification_fn)
{
    int rc;
    unsigned long ring_gfn = d->arch.hvm_domain.params[param];

    /* Only one helper at a time. If the helper crashed,
     * the ring is in an undefined state and so is the guest.
     */
    if ( med->ring_page )
        return -EBUSY;

    /* The parameter defaults to zero, and it should be
     * set to something */
    if ( ring_gfn == 0 )
        return -ENOSYS;

    mem_event_ring_lock_init(med);
    mem_event_ring_lock(med);

    rc = prepare_ring_for_helper(d, ring_gfn, &med->ring_pg_struct,
                                    &med->ring_page);
    if ( rc < 0 )
        goto err;

    /* Set the number of currently blocked vCPUs to 0. */
    med->blocked = 0;

    /* Allocate event channel */
    rc = alloc_unbound_xen_event_channel(d, 0, current->domain->domain_id,
                                         notification_fn);
    if ( rc < 0 )
        goto err;

    med->xen_port = mec->port = rc;

    /* Prepare ring buffer */
    FRONT_RING_INIT(&med->front_ring,
                    (mem_event_sring_t *)med->ring_page,
                    PAGE_SIZE);

    /* Save the pause flag for this particular ring. */
    med->pause_flag = pause_flag;

    /* Initialize the last-chance wait queue. */
    init_waitqueue_head(&med->wq);

    mem_event_ring_unlock(med);
    return 0;

 err:
    destroy_ring_for_helper(&med->ring_page,
                            med->ring_pg_struct);
    mem_event_ring_unlock(med);

    return rc;
}