Esempio n. 1
0
void
xenconscn_putc(dev_t dev, int c)
{
    int s = spltty();
    XENCONS_RING_IDX cons, prod;

    if (xendomain_is_dom0()) {
        u_char buf[1];

        buf[0] = c;
        (void)HYPERVISOR_console_io(CONSOLEIO_write, 1, buf);
    } else {
        XENPRINTK(("xenconscn_putc(%c)\n", c));

        cons = xencons_interface->out_cons;
        prod = xencons_interface->out_prod;
        xen_rmb();
        while (prod == cons + sizeof(xencons_interface->out)) {
            cons = xencons_interface->out_cons;
            prod = xencons_interface->out_prod;
            xen_rmb();
        }
        xencons_interface->out[MASK_XENCONS_IDX(xencons_interface->out_prod,
                                                xencons_interface->out)] = c;
        xen_rmb();
        xencons_interface->out_prod++;
        xen_rmb();
        hypervisor_notify_via_evtchn(xen_start_info.console.domU.evtchn);
        splx(s);
    }
}
Esempio n. 2
0
int
xb_read(void *data, unsigned len)
{
	struct xenstore_domain_interface *intf = xenstore_domain_interface();
	XENSTORE_RING_IDX cons, prod;

	int s = spltty();

	while (len != 0) {
		unsigned int avail;
		const char *src;

		while (intf->rsp_cons == intf->rsp_prod)
			tsleep(&xenstore_interface, PRIBIO, "rdst", 0);

		/* Read indexes, then verify. */
		cons = intf->rsp_cons;
		prod = intf->rsp_prod;
		xen_rmb();
		if (!check_indexes(cons, prod)) {
			XENPRINTF(("xb_read EIO\n"));
			splx(s);
			return EIO;
		}

		src = get_input_chunk(cons, prod, intf->rsp, &avail);
		if (avail == 0)
			continue;
		if (avail > len)
			avail = len;

		/* We must read header before we read data. */
		xen_rmb();

		memcpy(data, src, avail);
		data = (char *)data + avail;
		len -= avail;

		/* Other side must not see free space until we've copied out */
		xen_rmb();
		intf->rsp_cons += avail;
		xen_rmb();

		XENPRINTF(("Finished read of %i bytes (%i to go)\n",
		    avail, len));

		hypervisor_notify_via_evtchn(xen_start_info.store_evtchn);
	}

	splx(s);
	return 0;
}
Esempio n. 3
0
int
xenconscn_getc(dev_t dev)
{
    char c;
    int s = spltty();
    XENCONS_RING_IDX cons, prod;

    if (xencons_console_device && xencons_console_device->polling == 0) {
        printf("xenconscn_getc() but not polling\n");
        splx(s);
        return 0;
    }
    if (xendomain_is_dom0()) {
        while (HYPERVISOR_console_io(CONSOLEIO_read, 1, &c) == 0)
            ;
        cn_check_magic(dev, c, xencons_cnm_state);
        splx(s);
        return c;
    }
    if (xencons_console_device == NULL) {
        printf("xenconscn_getc(): not console\n");
        while (1)
            ;  /* loop here instead of in ddb */
        splx(s);
        return 0;
    }

    if (xencons_console_device->polling == 0) {
        printf("xenconscn_getc() but not polling\n");
        splx(s);
        return 0;
    }

    cons = xencons_interface->in_cons;
    prod = xencons_interface->in_prod;
    xen_rmb();
    while (cons == prod) {
        HYPERVISOR_yield();
        prod = xencons_interface->in_prod;
    }
    xen_rmb();
    c = xencons_interface->in[MASK_XENCONS_IDX(xencons_interface->in_cons,
                              xencons_interface->in)];
    xen_rmb();
    xencons_interface->in_cons = cons + 1;
    cn_check_magic(dev, c, xencons_cnm_state);
    splx(s);
    return c;
}
Esempio n. 4
0
int
xb_write(const void *data, unsigned len)
{
	struct xenstore_domain_interface *intf = xenstore_domain_interface();
	XENSTORE_RING_IDX cons, prod;

	int s = spltty();

	while (len != 0) {
		void *dst;
		unsigned int avail;

		while ((intf->req_prod - intf->req_cons) == XENSTORE_RING_SIZE) {
			XENPRINTF(("xb_write tsleep\n"));
			tsleep(&xenstore_interface, PRIBIO, "wrst", 0);
			XENPRINTF(("xb_write tsleep done\n"));
		}

		/* Read indexes, then verify. */
		cons = intf->req_cons;
		prod = intf->req_prod;
		xen_rmb();
		if (!check_indexes(cons, prod)) {
			splx(s);
			return EIO;
		}

		dst = get_output_chunk(cons, prod, intf->req, &avail);
		if (avail == 0)
			continue;
		if (avail > len)
			avail = len;

		memcpy(dst, data, avail);
		data = (const char *)data + avail;
		len -= avail;

		/* Other side must not see new header until data is there. */
		xen_rmb();
		intf->req_prod += avail;
		xen_rmb();

		hypervisor_notify_via_evtchn(xen_start_info.store_evtchn);
	}

	splx(s);
	return 0;
}
Esempio n. 5
0
static void xen_9pfs_out_sg(Xen9pfsRing *ring,
                            struct iovec *out_sg,
                            int *num,
                            uint32_t idx)
{
    RING_IDX cons, prod, masked_prod, masked_cons;

    cons = ring->intf->out_cons;
    prod = ring->intf->out_prod;
    xen_rmb();
    masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order));
    masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order));

    if (masked_cons < masked_prod) {
        out_sg[0].iov_base = ring->ring.out + masked_cons;
        out_sg[0].iov_len = ring->out_size;
        *num = 1;
    } else {
        if (ring->out_size >
            (XEN_FLEX_RING_SIZE(ring->ring_order) - masked_cons)) {
            out_sg[0].iov_base = ring->ring.out + masked_cons;
            out_sg[0].iov_len = XEN_FLEX_RING_SIZE(ring->ring_order) -
                                masked_cons;
            out_sg[1].iov_base = ring->ring.out;
            out_sg[1].iov_len = ring->out_size -
                                (XEN_FLEX_RING_SIZE(ring->ring_order) -
                                 masked_cons);
            *num = 2;
        } else {
            out_sg[0].iov_base = ring->ring.out + masked_cons;
            out_sg[0].iov_len = ring->out_size;
            *num = 1;
        }
    }
}
Esempio n. 6
0
static void xen_9pfs_in_sg(Xen9pfsRing *ring,
                           struct iovec *in_sg,
                           int *num,
                           uint32_t idx,
                           uint32_t size)
{
    RING_IDX cons, prod, masked_prod, masked_cons;

    cons = ring->intf->in_cons;
    prod = ring->intf->in_prod;
    xen_rmb();
    masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order));
    masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order));

    if (masked_prod < masked_cons) {
        in_sg[0].iov_base = ring->ring.in + masked_prod;
        in_sg[0].iov_len = masked_cons - masked_prod;
        *num = 1;
    } else {
        in_sg[0].iov_base = ring->ring.in + masked_prod;
        in_sg[0].iov_len = XEN_FLEX_RING_SIZE(ring->ring_order) - masked_prod;
        in_sg[1].iov_base = ring->ring.in;
        in_sg[1].iov_len = masked_cons;
        *num = 2;
    }
}
Esempio n. 7
0
static void blk_handle_requests(struct XenBlkDev *blkdev)
{
    RING_IDX rc, rp;
    struct ioreq *ioreq;

    blkdev->more_work = 0;

    rc = blkdev->rings.common.req_cons;
    rp = blkdev->rings.common.sring->req_prod;
    xen_rmb(); /* Ensure we see queued requests up to 'rp'. */

    blk_send_response_all(blkdev);
    while (rc != rp) {
        /* pull request from ring */
        if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
            break;
        }
        ioreq = ioreq_start(blkdev);
        if (ioreq == NULL) {
            blkdev->more_work++;
            break;
        }
        blk_get_request(blkdev, ioreq, rc);
        blkdev->rings.common.req_cons = ++rc;

        /* parse them */
        if (ioreq_parse(ioreq) != 0) {

            switch (ioreq->req.operation) {
            case BLKIF_OP_READ:
                block_acct_invalid(blk_get_stats(blkdev->blk),
                                   BLOCK_ACCT_READ);
                break;
            case BLKIF_OP_WRITE:
                block_acct_invalid(blk_get_stats(blkdev->blk),
                                   BLOCK_ACCT_WRITE);
                break;
            case BLKIF_OP_FLUSH_DISKCACHE:
                block_acct_invalid(blk_get_stats(blkdev->blk),
                                   BLOCK_ACCT_FLUSH);
            default:
                break;
            };

            if (blk_send_response_one(ioreq)) {
                xen_pv_send_notify(&blkdev->xendev);
            }
            ioreq_release(ioreq, false);
            continue;
        }

        ioreq_runio_qemu_aio(ioreq);
    }

    if (blkdev->more_work && blkdev->requests_inflight < blkdev->max_requests) {
        qemu_bh_schedule(blkdev->bh);
    }
}
Esempio n. 8
0
/* Non-privileged console interrupt routine */
static int
xencons_handler(void *arg)
{
    struct xencons_softc *sc = arg;
    XENCONS_RING_IDX cons, prod, len;
    int s = spltty();

    if (sc->polling) {
        splx(s);
        return 1;
    }


#define XNC_IN (xencons_interface->in)

    cons = xencons_interface->in_cons;
    prod = xencons_interface->in_prod;
    xen_rmb();
    while (cons != prod) {
        if (MASK_XENCONS_IDX(cons, XNC_IN) <
                MASK_XENCONS_IDX(prod, XNC_IN))
            len = MASK_XENCONS_IDX(prod, XNC_IN) -
                  MASK_XENCONS_IDX(cons, XNC_IN);
        else
            len = sizeof(XNC_IN) - MASK_XENCONS_IDX(cons, XNC_IN);

        xencons_tty_input(sc, __UNVOLATILE(
                              &XNC_IN[MASK_XENCONS_IDX(cons, XNC_IN)]), len);
        if (__predict_false(xencons_interface->in_cons != cons)) {
            /* catch up with xenconscn_getc() */
            cons = xencons_interface->in_cons;
            prod = xencons_interface->in_prod;
            xen_rmb();
        } else {
            cons += len;
            xen_wmb();
            xencons_interface->in_cons = cons;
            xen_wmb();
        }
    }
    hypervisor_notify_via_evtchn(xen_start_info.console.domU.evtchn);
    splx(s);
    return 1;
#undef XNC_IN
}
Esempio n. 9
0
void
x86_write_psl(u_long psl)
{
	struct cpu_info *ci = curcpu();

	ci->ci_vcpu->evtchn_upcall_mask = psl;
	xen_rmb();
	if (ci->ci_vcpu->evtchn_upcall_pending && psl == 0) {
	    	hypervisor_force_callback();
	}
}
Esempio n. 10
0
static void xenfb_on_fb_event(struct xenfb *xenfb)
{
	uint32_t prod, cons;
	struct xenfb_page *page = xenfb->fb.page;

	prod = page->out_prod;
	if (prod == page->out_cons)
		return;
	xen_rmb();		/* ensure we see ring contents up to prod */
	for (cons = page->out_cons; cons != prod; cons++) {
		union xenfb_out_event *event = &XENFB_OUT_RING_REF(page, cons);
		int x, y, w, h;

		switch (event->type) {
		case XENFB_TYPE_UPDATE:
			x = MAX(event->update.x, 0);
			y = MAX(event->update.y, 0);
			w = MIN(event->update.width, xenfb->width - x);
			h = MIN(event->update.height, xenfb->height - y);
			if (w < 0 || h < 0) {
				fprintf(stderr, "%s bogus update ignored\n",
					xenfb->fb.nodename);
				break;
			}
			if (x != event->update.x || y != event->update.y
			    || w != event->update.width
			    || h != event->update.height) {
				fprintf(stderr, "%s bogus update clipped\n",
					xenfb->fb.nodename);
			}
			xenfb_guest_copy(xenfb, x, y, w, h);
			break;
		case XENFB_TYPE_RESIZE:
			if (xenfb_configure_fb(xenfb, xenfb->fb_len,
					       event->resize.width,
					       event->resize.height,
					       event->resize.depth,
					       xenfb->fb_len,
					       event->resize.offset,
					       event->resize.stride) < 0)
				break;
			if (xenfb->ds->dpy_resize_shared)
			    dpy_resize_shared(xenfb->ds, xenfb->width, xenfb->height, xenfb->depth, xenfb->row_stride, xenfb->pixels + xenfb->offset);
			else
			    dpy_resize(xenfb->ds, xenfb->width, xenfb->height);
			xenfb_invalidate(xenfb);
			break;
		}
	}
	xen_mb();		/* ensure we're done with ring contents */
	page->out_cons = cons;
	xc_evtchn_notify(xenfb->evt_xch, xenfb->fb.port);
}
Esempio n. 11
0
static void blk_handle_requests(struct XenBlkDev *blkdev)
{
    RING_IDX rc, rp;
    struct ioreq *ioreq;

    blkdev->more_work = 0;

    rc = blkdev->rings.common.req_cons;
    rp = blkdev->rings.common.sring->req_prod;
    xen_rmb(); /* Ensure we see queued requests up to 'rp'. */

    if (use_aio) {
        blk_send_response_all(blkdev);
    }
    while (rc != rp) {
        /* pull request from ring */
        if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
            break;
        }
        ioreq = ioreq_start(blkdev);
        if (ioreq == NULL) {
            blkdev->more_work++;
            break;
        }
        blk_get_request(blkdev, ioreq, rc);
        blkdev->rings.common.req_cons = ++rc;

        /* parse them */
        if (ioreq_parse(ioreq) != 0) {
            if (blk_send_response_one(ioreq)) {
                xen_be_send_notify(&blkdev->xendev);
            }
            ioreq_release(ioreq);
            continue;
        }

        if (use_aio) {
            /* run i/o in aio mode */
            ioreq_runio_qemu_aio(ioreq);
        } else {
            /* run i/o in sync mode */
            ioreq_runio_qemu_sync(ioreq);
        }
    }
    if (!use_aio) {
        blk_send_response_all(blkdev);
    }

    if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
        qemu_bh_schedule(blkdev->bh);
    }
}
Esempio n. 12
0
File: io.c Progetto: CPFL/gxen
static int do_recv(struct libxenvchan *ctrl, void *data, size_t size)
{
	int real_idx = rd_cons(ctrl) & (rd_ring_size(ctrl) - 1);
	int avail_contig = rd_ring_size(ctrl) - real_idx;
	if (avail_contig > size)
		avail_contig = size;
	xen_rmb(); /* data read must happen /after/ rd_cons read */
	memcpy(data, rd_ring(ctrl) + real_idx, avail_contig);
	if (avail_contig < size)
	{
		// we rolled across the end of the ring
		memcpy(data + avail_contig, rd_ring(ctrl), size - avail_contig);
	}
	xen_mb(); /* consume /then/ notify */
	rd_cons(ctrl) += size;
	if (send_notify(ctrl, VCHAN_NOTIFY_READ))
		return -1;
	return size;
}
Esempio n. 13
0
static ssize_t net_rx_packet(VLANClientState *nc, const uint8_t *buf, size_t size)
{
    struct XenNetDev *netdev = DO_UPCAST(NICState, nc, nc)->opaque;
    netif_rx_request_t rxreq;
    RING_IDX rc, rp;
    void *page;

    if (netdev->xendev.be_state != XenbusStateConnected) {
        return -1;
    }

    rc = netdev->rx_ring.req_cons;
    rp = netdev->rx_ring.sring->req_prod;
    xen_rmb(); /* Ensure we see queued requests up to 'rp'. */

    if (rc == rp || RING_REQUEST_CONS_OVERFLOW(&netdev->rx_ring, rc)) {
        xen_be_printf(&netdev->xendev, 2, "no buffer, drop packet\n");
        return -1;
    }
    if (size > XC_PAGE_SIZE - NET_IP_ALIGN) {
        xen_be_printf(&netdev->xendev, 0, "packet too big (%lu > %ld)",
                      (unsigned long)size, XC_PAGE_SIZE - NET_IP_ALIGN);
        return -1;
    }

    memcpy(&rxreq, RING_GET_REQUEST(&netdev->rx_ring, rc), sizeof(rxreq));
    netdev->rx_ring.req_cons = ++rc;

    page = xc_gnttab_map_grant_ref(netdev->xendev.gnttabdev,
                                   netdev->xendev.dom,
                                   rxreq.gref, PROT_WRITE);
    if (page == NULL) {
        xen_be_printf(&netdev->xendev, 0, "error: rx gref dereference failed (%d)\n",
                      rxreq.gref);
        net_rx_response(netdev, &rxreq, NETIF_RSP_ERROR, 0, 0, 0);
        return -1;
    }
    memcpy(page + NET_IP_ALIGN, buf, size);
    xc_gnttab_munmap(netdev->xendev.gnttabdev, page, 1);
    net_rx_response(netdev, &rxreq, NETIF_RSP_OKAY, NET_IP_ALIGN, size, 0);

    return size;
}
Esempio n. 14
0
static int net_rx_ok(VLANClientState *vc)
{
    struct XenNetDev *netdev = vc->opaque;
    RING_IDX rc, rp;

    if (netdev->xendev.be_state != XenbusStateConnected)
	return 0;

    rc = netdev->rx_ring.req_cons;
    rp = netdev->rx_ring.sring->req_prod;
    xen_rmb();

    if (rc == rp || RING_REQUEST_CONS_OVERFLOW(&netdev->rx_ring, rc)) {
	xen_be_printf(&netdev->xendev, 2, "%s: no rx buffers (%d/%d)\n",
		      __FUNCTION__, rc, rp);
	return 0;
    }
    return 1;
}
Esempio n. 15
0
static void xen_9pfs_push_and_notify(V9fsPDU *pdu)
{
    RING_IDX prod;
    Xen9pfsDev *priv = container_of(pdu->s, Xen9pfsDev, state);
    Xen9pfsRing *ring = &priv->rings[pdu->tag % priv->num_rings];

    g_free(ring->sg);
    ring->sg = NULL;

    ring->intf->out_cons = ring->out_cons;
    xen_wmb();

    prod = ring->intf->in_prod;
    xen_rmb();
    ring->intf->in_prod = prod + pdu->size;
    xen_wmb();

    ring->inprogress = false;
    xenevtchn_notify(ring->evtchndev, ring->local_port);

    qemu_bh_schedule(ring->bh);
}
Esempio n. 16
0
static int xen_9pfs_receive(Xen9pfsRing *ring)
{
    P9MsgHeader h;
    RING_IDX cons, prod, masked_prod, masked_cons, queued;
    V9fsPDU *pdu;

    if (ring->inprogress) {
        return 0;
    }

    cons = ring->intf->out_cons;
    prod = ring->intf->out_prod;
    xen_rmb();

    queued = xen_9pfs_queued(prod, cons, XEN_FLEX_RING_SIZE(ring->ring_order));
    if (queued < sizeof(h)) {
        return 0;
    }
    ring->inprogress = true;

    masked_prod = xen_9pfs_mask(prod, XEN_FLEX_RING_SIZE(ring->ring_order));
    masked_cons = xen_9pfs_mask(cons, XEN_FLEX_RING_SIZE(ring->ring_order));

    xen_9pfs_read_packet((uint8_t *) &h, ring->ring.out, sizeof(h),
                         masked_prod, &masked_cons,
                         XEN_FLEX_RING_SIZE(ring->ring_order));
    if (queued < le32_to_cpu(h.size_le)) {
        return 0;
    }

    /* cannot fail, because we only handle one request per ring at a time */
    pdu = pdu_alloc(&ring->priv->state);
    ring->out_size = le32_to_cpu(h.size_le);
    ring->out_cons = cons + le32_to_cpu(h.size_le);

    pdu_submit(pdu, &h);

    return 0;
}
Esempio n. 17
0
/**
 * Retrieves at most @count request descriptors from the ring, up to the
 * first barrier request, copying them to @reqs.
 *
 * @param blkif the block interface
 * @param reqs array of pointers where each element points to sufficient memory
 * space that receives each request descriptor
 * @param count retrieve at most that many request descriptors
 * @returns the number of retrieved request descriptors
 *
 *  XXX only called by xenio_blkif_get_requests
 */
static inline int
__xenio_blkif_get_requests(struct td_xenblkif * const blkif,
        blkif_request_t *reqs[], const unsigned int count)
{
    blkif_common_back_ring_t * ring;
    RING_IDX rp, rc;
    unsigned int n;
	bool barrier;

    ASSERT(blkif);
    ASSERT(reqs);

    if (!count)
        return 0;

    ring = &blkif->rings.common;

    rp = ring->sring->req_prod;
    xen_rmb(); /* TODO why? */

    for (rc = ring->req_cons, n = 0, barrier = false;
			rc != rp && n < count && !barrier;
			rc++, n++) {

        blkif_request_t *dst = reqs[n];

        xenio_blkif_get_request(blkif, dst, rc);

		if (unlikely(dst->operation == BLKIF_OP_WRITE_BARRIER))
			barrier = true;
    }

    ring->req_cons = rc;

    return n;
}
Esempio n. 18
0
static void net_tx_packets(struct XenNetDev *netdev)
{
    netif_tx_request_t txreq;
    RING_IDX rc, rp;
    void *page;
    void *tmpbuf = NULL;

    for (;;) {
        rc = netdev->tx_ring.req_cons;
        rp = netdev->tx_ring.sring->req_prod;
        xen_rmb(); /* Ensure we see queued requests up to 'rp'. */

        while ((rc != rp)) {
            if (RING_REQUEST_CONS_OVERFLOW(&netdev->tx_ring, rc)) {
                break;
            }
            memcpy(&txreq, RING_GET_REQUEST(&netdev->tx_ring, rc), sizeof(txreq));
            netdev->tx_ring.req_cons = ++rc;

#if 1
            /* should not happen in theory, we don't announce the *
             * feature-{sg,gso,whatelse} flags in xenstore (yet?) */
            if (txreq.flags & NETTXF_extra_info) {
                xen_be_printf(&netdev->xendev, 0, "FIXME: extra info flag\n");
                net_tx_error(netdev, &txreq, rc);
                continue;
            }
            if (txreq.flags & NETTXF_more_data) {
                xen_be_printf(&netdev->xendev, 0, "FIXME: more data flag\n");
                net_tx_error(netdev, &txreq, rc);
                continue;
            }
#endif

            if (txreq.size < 14) {
                xen_be_printf(&netdev->xendev, 0, "bad packet size: %d\n", txreq.size);
                net_tx_error(netdev, &txreq, rc);
                continue;
            }

            if ((txreq.offset + txreq.size) > XC_PAGE_SIZE) {
                xen_be_printf(&netdev->xendev, 0, "error: page crossing\n");
                net_tx_error(netdev, &txreq, rc);
                continue;
            }

            xen_be_printf(&netdev->xendev, 3, "tx packet ref %d, off %d, len %d, flags 0x%x%s%s%s%s\n",
                          txreq.gref, txreq.offset, txreq.size, txreq.flags,
                          (txreq.flags & NETTXF_csum_blank)     ? " csum_blank"     : "",
                          (txreq.flags & NETTXF_data_validated) ? " data_validated" : "",
                          (txreq.flags & NETTXF_more_data)      ? " more_data"      : "",
                          (txreq.flags & NETTXF_extra_info)     ? " extra_info"     : "");

            page = xc_gnttab_map_grant_ref(netdev->xendev.gnttabdev,
                                           netdev->xendev.dom,
                                           txreq.gref, PROT_READ);
            if (page == NULL) {
                xen_be_printf(&netdev->xendev, 0, "error: tx gref dereference failed (%d)\n",
                              txreq.gref);
                net_tx_error(netdev, &txreq, rc);
                continue;
            }
            if (txreq.flags & NETTXF_csum_blank) {
                /* have read-only mapping -> can't fill checksum in-place */
                if (!tmpbuf) {
                    tmpbuf = g_malloc(XC_PAGE_SIZE);
                }
                memcpy(tmpbuf, page + txreq.offset, txreq.size);
                net_checksum_calculate(tmpbuf, txreq.size);
                qemu_send_packet(&netdev->nic->nc, tmpbuf, txreq.size);
            } else {
                qemu_send_packet(&netdev->nic->nc, page + txreq.offset, txreq.size);
            }
            xc_gnttab_munmap(netdev->xendev.gnttabdev, page, 1);
            net_tx_response(netdev, &txreq, NETIF_RSP_OKAY);
        }
        if (!netdev->tx_work) {
            break;
        }
        netdev->tx_work = 0;
    }
    g_free(tmpbuf);
}
Esempio n. 19
0
int
xc_ia64_copy_memmap(int xc_handle, uint32_t domid, shared_info_t *live_shinfo,
                    xen_ia64_memmap_info_t **memmap_info_p,
                    unsigned long *memmap_info_num_pages_p)
{
    unsigned long gpfn_max_prev;
    unsigned long gpfn_max_post;

    unsigned long num_pages;
    unsigned long num_pages_post;
    unsigned long memmap_size;
    xen_ia64_memmap_info_t *memmap_info;

    int ret;

    gpfn_max_prev = xc_memory_op(xc_handle, XENMEM_maximum_gpfn, &domid);
    if (gpfn_max_prev < 0)
        return -1;

 again:
    num_pages = live_shinfo->arch.memmap_info_num_pages;
    if (num_pages == 0) {
        ERROR("num_pages 0x%x", num_pages);
        return -1;
    }

    memmap_size = num_pages << PAGE_SHIFT;
    memmap_info = malloc(memmap_size);
    if (memmap_info == NULL)
        return -1;
    ret = xc_ia64_get_memmap(xc_handle,
                             domid, (char*)memmap_info, memmap_size);
    if (ret != 0) {
        free(memmap_info);
        return -1;
    }
    xen_rmb();
    num_pages_post = live_shinfo->arch.memmap_info_num_pages;
    if (num_pages != num_pages_post) {
        free(memmap_info);
        num_pages = num_pages_post;
        goto again;
    }

    gpfn_max_post = xc_memory_op(xc_handle, XENMEM_maximum_gpfn, &domid);
    if (gpfn_max_prev < 0) {
        free(memmap_info);
        return -1;
    }
    if (gpfn_max_post > gpfn_max_prev) {
        free(memmap_info);
        gpfn_max_prev = gpfn_max_post;
        goto again;
    }

    /* reject unknown memmap */
    if (memmap_info->efi_memdesc_size != sizeof(efi_memory_desc_t) ||
        (memmap_info->efi_memmap_size / memmap_info->efi_memdesc_size) == 0 ||
        memmap_info->efi_memmap_size >
        (num_pages << PAGE_SHIFT) - sizeof(memmap_info) ||
        memmap_info->efi_memdesc_version != EFI_MEMORY_DESCRIPTOR_VERSION) {
        PERROR("unknown memmap header. defaulting to compat mode.");
        free(memmap_info);
        return -1;
    }

    *memmap_info_p = memmap_info;
    if (memmap_info_num_pages_p != NULL)
        *memmap_info_num_pages_p = num_pages;

    return 0;
}
Esempio n. 20
0
void
xencons_start(struct tty *tp)
{
    struct clist *cl;
    int s;

    s = spltty();
    if (tp->t_state & (TS_TIMEOUT | TS_BUSY | TS_TTSTOP))
        goto out;
    tp->t_state |= TS_BUSY;
    splx(s);

    /*
     * We need to do this outside spl since it could be fairly
     * expensive and we don't want our serial ports to overflow.
     */
    cl = &tp->t_outq;
    if (xendomain_is_dom0()) {
        int len, r;
        u_char buf[XENCONS_BURST+1];

        len = q_to_b(cl, buf, XENCONS_BURST);
        while (len > 0) {
            r = HYPERVISOR_console_io(CONSOLEIO_write, len, buf);
            if (r <= 0)
                break;
            len -= r;
        }
    } else {
        XENCONS_RING_IDX cons, prod, len;

#define XNC_OUT (xencons_interface->out)
        cons = xencons_interface->out_cons;
        prod = xencons_interface->out_prod;
        xen_rmb();
        while (prod != cons + sizeof(xencons_interface->out)) {
            if (MASK_XENCONS_IDX(prod, XNC_OUT) <
                    MASK_XENCONS_IDX(cons, XNC_OUT)) {
                len = MASK_XENCONS_IDX(cons, XNC_OUT) -
                      MASK_XENCONS_IDX(prod, XNC_OUT);
            } else {
                len = sizeof(XNC_OUT) -
                      MASK_XENCONS_IDX(prod, XNC_OUT);
            }
            len = q_to_b(cl, __UNVOLATILE(
                             &XNC_OUT[MASK_XENCONS_IDX(prod, XNC_OUT)]), len);
            if (len == 0)
                break;
            prod = prod + len;
        }
        xen_wmb();
        xencons_interface->out_prod = prod;
        xen_wmb();
        hypervisor_notify_via_evtchn(xen_start_info.console.domU.evtchn);
#undef XNC_OUT
    }

    s = spltty();
    tp->t_state &= ~TS_BUSY;
    if (ttypull(tp)) {
        tp->t_state |= TS_TIMEOUT;
        callout_schedule(&tp->t_rstrt_ch, 1);
    }
out:
    splx(s);
}
Esempio n. 21
0
static void xenfb_handle_events(struct XenFB *xenfb)
{
    uint32_t prod, cons;
    struct xenfb_page *page = xenfb->c.page;

    prod = page->out_prod;
    if (prod == page->out_cons)
	return;
    xen_rmb();		/* ensure we see ring contents up to prod */
    for (cons = page->out_cons; cons != prod; cons++) {
	union xenfb_out_event *event = &XENFB_OUT_RING_REF(page, cons);
	int x, y, w, h;

	switch (event->type) {
	case XENFB_TYPE_UPDATE:
	    if (xenfb->up_count == UP_QUEUE)
		xenfb->up_fullscreen = 1;
	    if (xenfb->up_fullscreen)
		break;
	    x = MAX(event->update.x, 0);
	    y = MAX(event->update.y, 0);
	    w = MIN(event->update.width, xenfb->width - x);
	    h = MIN(event->update.height, xenfb->height - y);
	    if (w < 0 || h < 0) {
                xen_be_printf(&xenfb->c.xendev, 1, "bogus update ignored\n");
		break;
	    }
	    if (x != event->update.x ||
                y != event->update.y ||
		w != event->update.width ||
		h != event->update.height) {
                xen_be_printf(&xenfb->c.xendev, 1, "bogus update clipped\n");
	    }
	    if (w == xenfb->width && h > xenfb->height / 2) {
		/* scroll detector: updated more than 50% of the lines,
		 * don't bother keeping track of the rectangles then */
		xenfb->up_fullscreen = 1;
	    } else {
		xenfb->up_rects[xenfb->up_count].x = x;
		xenfb->up_rects[xenfb->up_count].y = y;
		xenfb->up_rects[xenfb->up_count].w = w;
		xenfb->up_rects[xenfb->up_count].h = h;
		xenfb->up_count++;
	    }
	    break;
#ifdef XENFB_TYPE_RESIZE
	case XENFB_TYPE_RESIZE:
	    if (xenfb_configure_fb(xenfb, xenfb->fb_len,
				   event->resize.width,
				   event->resize.height,
				   event->resize.depth,
				   xenfb->fb_len,
				   event->resize.offset,
				   event->resize.stride) < 0)
		break;
	    xenfb_invalidate(xenfb);
	    break;
#endif
	}
    }
    xen_mb();		/* ensure we're done with ring contents */
    page->out_cons = cons;
}
Esempio n. 22
0
static void *handle_mount(void *data)
{
    int more, notify;
    struct fs_mount *mount = (struct fs_mount *)data;
    
    printf("Starting a thread for mount: %d\n", mount->mount_id);
    allocate_request_array(mount);

    for(;;)
    {
        int nr_consumed=0;
        RING_IDX cons, rp;
        struct fsif_request *req;

        handle_aio_events(mount);
moretodo:
        rp = mount->ring.sring->req_prod;
        xen_rmb(); /* Ensure we see queued requests up to 'rp'. */

        while ((cons = mount->ring.req_cons) != rp)
        {
            int i;
            struct fs_op *op;

            printf("Got a request at %d (of %d)\n", 
                    cons, RING_SIZE(&mount->ring));
            req = RING_GET_REQUEST(&mount->ring, cons);
            printf("Request type=%d\n", req->type); 
            for(i=0;;i++)
            {
                op = fsops[i];
                if(op == NULL)
                {
                    /* We've reached the end of the array, no appropirate
                     * handler found. Warn, ignore and continue. */
                    printf("WARN: Unknown request type: %d\n", req->type);
                    mount->ring.req_cons++; 
                    break;
                }
                if(op->type == req->type)
                {
                    /* There needs to be a dispatch handler */
                    assert(op->dispatch_handler != NULL);
                    op->dispatch_handler(mount, req);
                    break;
                }
             }

            nr_consumed++;
        }
        printf("Backend consumed: %d requests\n", nr_consumed);
        RING_FINAL_CHECK_FOR_REQUESTS(&mount->ring, more);
        if(more) goto moretodo;

        RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&mount->ring, notify);
        printf("Pushed responces and notify=%d\n", notify);
        if(notify)
            xc_evtchn_notify(mount->evth, mount->local_evtchn);
    }
 
    printf("Destroying thread for mount: %d\n", mount->mount_id);
    xc_gnttab_munmap(mount->gnth, mount->ring.sring, 1);
    xc_gnttab_close(mount->gnth);
    xc_evtchn_unbind(mount->evth, mount->local_evtchn);
    xc_evtchn_close(mount->evth);
    free(mount->frontend);
    pthread_exit(NULL);
}
Esempio n. 23
0
/**
 * monitor_tbufs - monitor the contents of tbufs
 */
static int monitor_tbufs(void)
{
    int i;

    struct t_struct *tbufs;      /* Pointer to hypervisor maps */
    struct t_buf **meta;         /* pointers to the trace buffer metadata    */
    unsigned char **data;        /* pointers to the trace buffer data areas
                                  * where they are mapped into user space.   */
    unsigned long tbufs_mfn;     /* mfn of the tbufs                         */
    unsigned int  num;           /* number of trace buffers / logical CPUS   */
    unsigned long tinfo_size;    /* size of t_info metadata map              */
    unsigned long size;          /* size of a single trace buffer            */

    unsigned long data_size, rec_size;

    /* get number of logical CPUs (and therefore number of trace buffers) */
    num = get_num_cpus();

    init_current(num);
    alloc_qos_data(num);

    printf("CPU Frequency = %7.2f\n", opts.cpu_freq);
    
    /* setup access to trace buffers */
    get_tbufs(&tbufs_mfn, &tinfo_size);
    tbufs = map_tbufs(tbufs_mfn, num, tinfo_size);

    size = tbufs->t_info->tbuf_size * XC_PAGE_SIZE;

    data_size = size - sizeof(struct t_buf);

    meta = tbufs->meta;
    data = tbufs->data;

    if ( eventchn_init() < 0 )
        fprintf(stderr, "Failed to initialize event channel; "
                "Using POLL method\r\n");

    /* now, scan buffers for events */
    while ( !interrupted )
    {
        for ( i = 0; (i < num) && !interrupted; i++ )
        {
            unsigned long start_offset, end_offset, cons, prod;

            cons = meta[i]->cons;
            prod = meta[i]->prod;
            xen_rmb(); /* read prod, then read item. */

            if ( cons == prod )
                continue;

            start_offset = cons % data_size;
            end_offset = prod % data_size;

            if ( start_offset >= end_offset )
            {
                while ( start_offset != data_size )
                {
                    rec_size = process_record(
                        i, (struct t_rec *)(data[i] + start_offset));
                    start_offset += rec_size;
                }
                start_offset = 0;
            }
            while ( start_offset != end_offset )
            {
                rec_size = process_record(
                    i, (struct t_rec *)(data[i] + start_offset));
                start_offset += rec_size;
            }
            xen_mb(); /* read item, then update cons. */
            meta[i]->cons = prod;
        }

	wait_for_event();
	wakeups++;
    }

    /* cleanup */
    free(meta);
    free(data);
    /* don't need to munmap - cleanup is automatic */

    return 0;
}
Esempio n. 24
0
static void xen_block_handle_requests(XenBlockDataPlane *dataplane)
{
    RING_IDX rc, rp;
    XenBlockRequest *request;
    int inflight_atstart = dataplane->requests_inflight;
    int batched = 0;

    dataplane->more_work = 0;

    rc = dataplane->rings.common.req_cons;
    rp = dataplane->rings.common.sring->req_prod;
    xen_rmb(); /* Ensure we see queued requests up to 'rp'. */

    /*
     * If there was more than IO_PLUG_THRESHOLD requests in flight
     * when we got here, this is an indication that there the bottleneck
     * is below us, so it's worth beginning to batch up I/O requests
     * rather than submitting them immediately. The maximum number
     * of requests we're willing to batch is the number already in
     * flight, so it can grow up to max_requests when the bottleneck
     * is below us.
     */
    if (inflight_atstart > IO_PLUG_THRESHOLD) {
        blk_io_plug(dataplane->blk);
    }
    while (rc != rp) {
        /* pull request from ring */
        if (RING_REQUEST_CONS_OVERFLOW(&dataplane->rings.common, rc)) {
            break;
        }
        request = xen_block_start_request(dataplane);
        if (request == NULL) {
            dataplane->more_work++;
            break;
        }
        xen_block_get_request(dataplane, request, rc);
        dataplane->rings.common.req_cons = ++rc;

        /* parse them */
        if (xen_block_parse_request(request) != 0) {
            switch (request->req.operation) {
            case BLKIF_OP_READ:
                block_acct_invalid(blk_get_stats(dataplane->blk),
                                   BLOCK_ACCT_READ);
                break;
            case BLKIF_OP_WRITE:
                block_acct_invalid(blk_get_stats(dataplane->blk),
                                   BLOCK_ACCT_WRITE);
                break;
            case BLKIF_OP_FLUSH_DISKCACHE:
                block_acct_invalid(blk_get_stats(dataplane->blk),
                                   BLOCK_ACCT_FLUSH);
            default:
                break;
            };

            if (xen_block_send_response(request)) {
                Error *local_err = NULL;

                xen_device_notify_event_channel(dataplane->xendev,
                                                dataplane->event_channel,
                                                &local_err);
                if (local_err) {
                    error_report_err(local_err);
                }
            }
            xen_block_release_request(request);
            continue;
        }

        if (inflight_atstart > IO_PLUG_THRESHOLD &&
            batched >= inflight_atstart) {
            blk_io_unplug(dataplane->blk);
        }
        xen_block_do_aio(request);
        if (inflight_atstart > IO_PLUG_THRESHOLD) {
            if (batched >= inflight_atstart) {
                blk_io_plug(dataplane->blk);
                batched = 0;
            } else {
                batched++;
            }
        }
    }
    if (inflight_atstart > IO_PLUG_THRESHOLD) {
        blk_io_unplug(dataplane->blk);
    }

    if (dataplane->more_work &&
        dataplane->requests_inflight < dataplane->max_requests) {
        qemu_bh_schedule(dataplane->bh);
    }
}