示例#1
0
LOCAL void mcd_event_put_request(struct domain *d, mcd_event_request_t *req)
{
    mcd_event_front_ring_t *front_ring;
    RING_IDX req_prod;

my_trace()

    mcd_event_ring_lock(d);

    front_ring = &d->mcd_event.front_ring;
    req_prod = front_ring->req_prod_pvt;

    /* Copy request */
    memcpy(RING_GET_REQUEST(front_ring, req_prod), req, sizeof(*req));
    req_prod++;

    /* Update ring */
    front_ring->req_prod_pvt = req_prod;
    RING_PUSH_REQUESTS(front_ring);

    mcd_event_ring_unlock(d);

my_trace()

    // TODO check whether I have to use notifying through channel or just ring.. ???
    notify_via_xen_event_channel(d, d->mcd_event.xen_port);
}
示例#2
0
文件: vm_event.c 项目: prosig/xen
/*
 * This must be preceded by a call to claim_slot(), and is guaranteed to
 * succeed.  As a side-effect however, the vCPU may be paused if the ring is
 * overly full and its continued execution would cause stalling and excessive
 * waiting.  The vCPU will be automatically unpaused when the ring clears.
 */
void vm_event_put_request(struct domain *d,
                          struct vm_event_domain *ved,
                          vm_event_request_t *req)
{
    vm_event_front_ring_t *front_ring;
    int free_req;
    unsigned int avail_req;
    RING_IDX req_prod;

    if ( current->domain != d )
    {
        req->flags |= VM_EVENT_FLAG_FOREIGN;
#ifndef NDEBUG
        if ( !(req->flags & VM_EVENT_FLAG_VCPU_PAUSED) )
            gdprintk(XENLOG_G_WARNING, "d%dv%d was not paused.\n",
                     d->domain_id, req->vcpu_id);
#endif
    }

    req->version = VM_EVENT_INTERFACE_VERSION;

    vm_event_ring_lock(ved);

    /* Due to the reservations, this step must succeed. */
    front_ring = &ved->front_ring;
    free_req = RING_FREE_REQUESTS(front_ring);
    ASSERT(free_req > 0);

    /* Copy request */
    req_prod = front_ring->req_prod_pvt;
    memcpy(RING_GET_REQUEST(front_ring, req_prod), req, sizeof(*req));
    req_prod++;

    /* Update ring */
    front_ring->req_prod_pvt = req_prod;
    RING_PUSH_REQUESTS(front_ring);

    /* We've actually *used* our reservation, so release the slot. */
    vm_event_release_slot(d, ved);

    /* Give this vCPU a black eye if necessary, on the way out.
     * See the comments above wake_blocked() for more information
     * on how this mechanism works to avoid waiting. */
    avail_req = vm_event_ring_available(ved);
    if( current->domain == d && avail_req < d->max_vcpus )
        vm_event_mark_and_pause(current, ved);

    vm_event_ring_unlock(ved);

    notify_via_xen_event_channel(d, ved->xen_port);
}
示例#3
0
/*
 * This must be preceded by a call to claim_slot(), and is guaranteed to
 * succeed.  As a side-effect however, the vCPU may be paused if the ring is
 * overly full and its continued execution would cause stalling and excessive
 * waiting.  The vCPU will be automatically unpaused when the ring clears.
 */
void mem_event_put_request(struct domain *d,
                           struct mem_event_domain *med,
                           mem_event_request_t *req)
{
    mem_event_front_ring_t *front_ring;
    int free_req;
    unsigned int avail_req;
    RING_IDX req_prod;

    if ( current->domain != d )
    {
        req->flags |= MEM_EVENT_FLAG_FOREIGN;
        ASSERT( !(req->flags & MEM_EVENT_FLAG_VCPU_PAUSED) );
    }

    mem_event_ring_lock(med);

    /* Due to the reservations, this step must succeed. */
    front_ring = &med->front_ring;
    free_req = RING_FREE_REQUESTS(front_ring);
    ASSERT(free_req > 0);

    /* Copy request */
    req_prod = front_ring->req_prod_pvt;
    memcpy(RING_GET_REQUEST(front_ring, req_prod), req, sizeof(*req));
    req_prod++;

    /* Update ring */
    front_ring->req_prod_pvt = req_prod;
    RING_PUSH_REQUESTS(front_ring);

    /* We've actually *used* our reservation, so release the slot. */
    mem_event_release_slot(d, med);

    /* Give this vCPU a black eye if necessary, on the way out.
     * See the comments above wake_blocked() for more information
     * on how this mechanism works to avoid waiting. */
    avail_req = mem_event_ring_available(med);
    if( current->domain == d && avail_req < d->max_vcpus )
        mem_event_mark_and_pause(current, med);

    mem_event_ring_unlock(med);

    notify_via_xen_event_channel(d, med->xen_port);
}
void mem_event_put_request(struct domain *d, mem_event_request_t *req)
{
    mem_event_front_ring_t *front_ring;
    RING_IDX req_prod;

    mem_event_ring_lock(d);

    front_ring = &d->mem_event.front_ring;
    req_prod = front_ring->req_prod_pvt;

    /* Copy request */
    memcpy(RING_GET_REQUEST(front_ring, req_prod), req, sizeof(*req));
    req_prod++;

    /* Update ring */
    front_ring->req_prod_pvt = req_prod;
    RING_PUSH_REQUESTS(front_ring);

    mem_event_ring_unlock(d);

    notify_via_xen_event_channel(d, d->mem_event.xen_port);
}
示例#5
0
文件: vpl011.c 项目: fdario/xen
static uint8_t vpl011_read_data(struct domain *d)
{
    unsigned long flags;
    uint8_t data = 0;
    struct vpl011 *vpl011 = &d->arch.vpl011;
    struct xencons_interface *intf = vpl011->ring_buf;
    XENCONS_RING_IDX in_cons, in_prod;

    VPL011_LOCK(d, flags);

    in_cons = intf->in_cons;
    in_prod = intf->in_prod;

    smp_rmb();

    /*
     * It is expected that there will be data in the ring buffer when this
     * function is called since the guest is expected to read the data register
     * only if the TXFE flag is not set.
     * If the guest still does read when TXFE bit is set then 0 will be returned.
     */
    if ( xencons_queued(in_prod, in_cons, sizeof(intf->in)) > 0 )
    {
        unsigned int fifo_level;

        data = intf->in[xencons_mask(in_cons, sizeof(intf->in))];
        in_cons += 1;
        smp_mb();
        intf->in_cons = in_cons;

        fifo_level = xencons_queued(in_prod, in_cons, sizeof(intf->in));

        /* If the FIFO is now empty, we clear the receive timeout interrupt. */
        if ( fifo_level == 0 )
        {
            vpl011->uartfr |= RXFE;
            vpl011->uartris &= ~RTI;
        }

        /* If the FIFO is more than half empty, we clear the RX interrupt. */
        if ( fifo_level < sizeof(intf->in) - SBSA_UART_FIFO_LEVEL )
            vpl011->uartris &= ~RXI;

        vpl011_update_interrupt_status(d);
    }
    else
        gprintk(XENLOG_ERR, "vpl011: Unexpected IN ring buffer empty\n");

    /*
     * We have consumed a character or the FIFO was empty, so clear the
     * "FIFO full" bit.
     */
    vpl011->uartfr &= ~RXFF;

    VPL011_UNLOCK(d, flags);

    /*
     * Send an event to console backend to indicate that data has been
     * read from the IN ring buffer.
     */
    notify_via_xen_event_channel(d, vpl011->evtchn);

    return data;
}
示例#6
0
文件: vpl011.c 项目: fdario/xen
static void vpl011_write_data(struct domain *d, uint8_t data)
{
    unsigned long flags;
    struct vpl011 *vpl011 = &d->arch.vpl011;
    struct xencons_interface *intf = vpl011->ring_buf;
    XENCONS_RING_IDX out_cons, out_prod;

    VPL011_LOCK(d, flags);

    out_cons = intf->out_cons;
    out_prod = intf->out_prod;

    smp_mb();

    /*
     * It is expected that the ring is not full when this function is called
     * as the guest is expected to write to the data register only when the
     * TXFF flag is not set.
     * In case the guest does write even when the TXFF flag is set then the
     * data will be silently dropped.
     */
    if ( xencons_queued(out_prod, out_cons, sizeof(intf->out)) !=
         sizeof (intf->out) )
    {
        unsigned int fifo_level;

        intf->out[xencons_mask(out_prod, sizeof(intf->out))] = data;
        out_prod += 1;
        smp_wmb();
        intf->out_prod = out_prod;

        fifo_level = xencons_queued(out_prod, out_cons, sizeof(intf->out));

        if ( fifo_level == sizeof(intf->out) )
        {
            vpl011->uartfr |= TXFF;

            /*
             * This bit is set only when FIFO becomes full. This ensures that
             * the SBSA UART driver can write the early console data as fast as
             * possible, without waiting for the BUSY bit to get cleared before
             * writing each byte.
             */
            vpl011->uartfr |= BUSY;
        }

        vpl011_update_tx_fifo_status(vpl011, fifo_level);

        vpl011_update_interrupt_status(d);
    }
    else
        gprintk(XENLOG_ERR, "vpl011: Unexpected OUT ring buffer full\n");

    vpl011->uartfr &= ~TXFE;

    VPL011_UNLOCK(d, flags);

    /*
     * Send an event to console backend to indicate that there is
     * data in the OUT ring buffer.
     */
    notify_via_xen_event_channel(d, vpl011->evtchn);
}