Exemple #1
0
static void net_tx_response(struct XenNetDev *netdev, netif_tx_request_t *txp, int8_t st)
{
    RING_IDX i = netdev->tx_ring.rsp_prod_pvt;
    netif_tx_response_t *resp;
    int notify;

    resp = RING_GET_RESPONSE(&netdev->tx_ring, i);
    resp->id     = txp->id;
    resp->status = st;

#if 0
    if (txp->flags & NETTXF_extra_info) {
        RING_GET_RESPONSE(&netdev->tx_ring, ++i)->status = NETIF_RSP_NULL;
    }
#endif

    netdev->tx_ring.rsp_prod_pvt = ++i;
    RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->tx_ring, notify);
    if (notify) {
        xen_be_send_notify(&netdev->xendev);
    }

    if (i == netdev->tx_ring.req_cons) {
        int more_to_do;
        RING_FINAL_CHECK_FOR_REQUESTS(&netdev->tx_ring, more_to_do);
        if (more_to_do) {
            netdev->tx_work++;
        }
    }
}
static void fb_event(struct XenDevice *xendev)
{
    struct XenFB *xenfb = container_of(xendev, struct XenFB, c.xendev);

    xenfb_handle_events(xenfb);
    xen_be_send_notify(&xenfb->c.xendev);
}
Exemple #3
0
static void net_rx_response(struct XenNetDev *netdev,
                            netif_rx_request_t *req, int8_t st,
                            uint16_t offset, uint16_t size,
                            uint16_t flags)
{
    RING_IDX i = netdev->rx_ring.rsp_prod_pvt;
    netif_rx_response_t *resp;
    int notify;

    resp = RING_GET_RESPONSE(&netdev->rx_ring, i);
    resp->offset     = offset;
    resp->flags      = flags;
    resp->id         = req->id;
    resp->status     = (int16_t)size;
    if (st < 0) {
        resp->status = (int16_t)st;
    }

    xen_be_printf(&netdev->xendev, 3, "rx response: idx %d, status %d, flags 0x%x\n",
                  i, resp->status, resp->flags);

    netdev->rx_ring.rsp_prod_pvt = ++i;
    RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->rx_ring, notify);
    if (notify) {
        xen_be_send_notify(&netdev->xendev);
    }
}
static void input_event(struct XenDevice *xendev)
{
    struct XenInput *xenfb = container_of(xendev, struct XenInput, c.xendev);
    struct xenkbd_page *page = xenfb->c.page;

    /* We don't understand any keyboard events, so just ignore them. */
    if (page->out_prod == page->out_cons)
	return;
    page->out_cons = page->out_prod;
    xen_be_send_notify(&xenfb->c.xendev);
}
Exemple #5
0
static void blk_handle_requests(struct XenBlkDev *blkdev)
{
    RING_IDX rc, rp;
    struct ioreq *ioreq;

    blkdev->more_work = 0;

    rc = blkdev->rings.common.req_cons;
    rp = blkdev->rings.common.sring->req_prod;
    xen_rmb(); /* Ensure we see queued requests up to 'rp'. */

    if (use_aio) {
        blk_send_response_all(blkdev);
    }
    while (rc != rp) {
        /* pull request from ring */
        if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
            break;
        }
        ioreq = ioreq_start(blkdev);
        if (ioreq == NULL) {
            blkdev->more_work++;
            break;
        }
        blk_get_request(blkdev, ioreq, rc);
        blkdev->rings.common.req_cons = ++rc;

        /* parse them */
        if (ioreq_parse(ioreq) != 0) {
            if (blk_send_response_one(ioreq)) {
                xen_be_send_notify(&blkdev->xendev);
            }
            ioreq_release(ioreq);
            continue;
        }

        if (use_aio) {
            /* run i/o in aio mode */
            ioreq_runio_qemu_aio(ioreq);
        } else {
            /* run i/o in sync mode */
            ioreq_runio_qemu_sync(ioreq);
        }
    }
    if (!use_aio) {
        blk_send_response_all(blkdev);
    }

    if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
        qemu_bh_schedule(blkdev->bh);
    }
}
Exemple #6
0
/* walk finished list, send outstanding responses, free requests */
static void blk_send_response_all(struct XenBlkDev *blkdev)
{
    struct ioreq *ioreq;
    int send_notify = 0;

    while (!QLIST_EMPTY(&blkdev->finished)) {
        ioreq = QLIST_FIRST(&blkdev->finished);
	send_notify += blk_send_response_one(ioreq);
	ioreq_release(ioreq);
    }
    if (send_notify)
	xen_be_send_notify(&blkdev->xendev);
}
static void xenfb_send_event(struct XenFB *xenfb, union xenfb_in_event *event)
{
    uint32_t prod;
    struct xenfb_page *page = xenfb->c.page;

    prod = page->in_prod;
    /* caller ensures !xenfb_queue_full() */
    xen_mb();                   /* ensure ring space available */
    XENFB_IN_RING_REF(page, prod) = *event;
    xen_wmb();                  /* ensure ring contents visible */
    page->in_prod = prod + 1;

    xen_be_send_notify(&xenfb->c.xendev);
}
Exemple #8
0
static void buffer_append(struct XenConsole *con)
{
    struct buffer *buffer = &con->buffer;
    XENCONS_RING_IDX cons, prod, size;
    struct xencons_interface *intf = con->sring;

    cons = intf->out_cons;
    prod = intf->out_prod;
    xen_mb();

    size = prod - cons;
    if ((size == 0) || (size > sizeof(intf->out)))
	return;

    if ((buffer->capacity - buffer->size) < size) {
	buffer->capacity += (size + 1024);
	buffer->data = qemu_realloc(buffer->data, buffer->capacity);
    }

    while (cons != prod)
	buffer->data[buffer->size++] = intf->out[
	    MASK_XENCONS_IDX(cons++, intf->out)];

    xen_mb();
    intf->out_cons = cons;
    xen_be_send_notify(&con->xendev);

    if (buffer->max_capacity &&
	buffer->size > buffer->max_capacity) {
	/* Discard the middle of the data. */

	size_t over = buffer->size - buffer->max_capacity;
	uint8_t *maxpos = buffer->data + buffer->max_capacity;

	memmove(maxpos - over, maxpos, over);
	buffer->data = qemu_realloc(buffer->data, buffer->max_capacity);
	buffer->size = buffer->capacity = buffer->max_capacity;

	if (buffer->consumed > buffer->max_capacity - over)
	    buffer->consumed = buffer->max_capacity - over;
    }
}
Exemple #9
0
static void xencons_receive(void *opaque, const uint8_t *buf, int len)
{
    struct XenConsole *con = opaque;
    struct xencons_interface *intf = con->sring;
    XENCONS_RING_IDX prod;
    int i, max;

    max = ring_free_bytes(con);
    /* The can_receive() func limits this, but check again anyway */
    if (max < len)
	len = max;

    prod = intf->in_prod;
    for (i = 0; i < len; i++) {
	intf->in[MASK_XENCONS_IDX(prod++, intf->in)] =
	    buf[i];
    }
    xen_wmb();
    intf->in_prod = prod;
    xen_be_send_notify(&con->xendev);
}
/* Send an event to the keyboard frontend driver */
static int xenfb_kbd_event(struct XenInput *xenfb,
			   union xenkbd_in_event *event)
{
    struct xenkbd_page *page = xenfb->c.page;
    uint32_t prod;

    if (xenfb->c.xendev.be_state != XenbusStateConnected)
	return 0;
    if (!page)
        return 0;

    prod = page->in_prod;
    if (prod - page->in_cons == XENKBD_IN_RING_LEN) {
	errno = EAGAIN;
	return -1;
    }

    xen_mb();		/* ensure ring space available */
    XENKBD_IN_RING_REF(page, prod) = *event;
    xen_wmb();		/* ensure ring contents visible */
    page->in_prod = prod + 1;
    return xen_be_send_notify(&xenfb->c.xendev);
}