Exemplo n.º 1
0
static void free_netfront(struct netfront_dev *dev)
{
    int i;

    for(i=0;i<NET_TX_RING_SIZE;i++)
	down(&dev->tx_sem);

    mask_evtchn(dev->evtchn);

    free(dev->mac);
    free(dev->backend);

    gnttab_end_access(dev->rx_ring_ref);
    gnttab_end_access(dev->tx_ring_ref);

    free_page(dev->rx.sring);
    free_page(dev->tx.sring);

    unbind_evtchn(dev->evtchn);

    for(i=0;i<NET_RX_RING_SIZE;i++) {
	gnttab_end_access(dev->rx_buffers[i].gref);
	free_page(dev->rx_buffers[i].page);
    }

    for(i=0;i<NET_TX_RING_SIZE;i++)
	if (dev->tx_buffers[i].page)
	    free_page(dev->tx_buffers[i].page);

    free(dev->nodename);
    free(dev);
}
Exemplo n.º 2
0
static void free_netfront(struct netfront_dev *dev)
{
    int i;

    free(dev->mac);
    free(dev->backend);
#ifdef CONFIG_NETMAP
	if (dev->netmap)
			return;
#endif
    for(i=0;i<NET_TX_RING_SIZE;i++)
	down(&dev->tx_sem);

    mask_evtchn(dev->evtchn);

    gnttab_end_access(dev->rx_ring_ref);
    gnttab_end_access(dev->tx_ring_ref);

    free_page(dev->rx.sring);
    free_page(dev->tx.sring);

    unbind_evtchn(dev->evtchn);

    for(i=0;i<NET_RX_RING_SIZE;i++) {
	gnttab_end_access(dev->rx_buffers[i].gref);
	free_page(dev->rx_buffers[i].page);
    }

    for(i=0;i<NET_TX_RING_SIZE;i++) {
	if (dev->tx_buffers[i].page) {
	    gnttab_end_access(dev->tx_buffers[i].gref);
	    free_page(dev->tx_buffers[i].page);
	}
    }
}
Exemplo n.º 3
0
static void netfront_tx_buf_gc(struct netfront_dev *dev)
{
	RING_IDX cons, prod;
	unsigned short id;
#ifndef CONFIG_NETFRONT_PERSISTENT_GRANTS
	struct net_txbuffer *buf;
#endif

	do {
		prod = dev->tx.sring->rsp_prod;
		rmb(); /* Ensure we see responses up to 'rp'. */

		for (cons = dev->tx.rsp_cons; cons != prod; cons++) {
			struct netif_tx_response *txrsp;

			txrsp = RING_GET_RESPONSE(&dev->tx, cons);
			if (txrsp->status == NETIF_RSP_NULL)
				continue;

			if (txrsp->status == NETIF_RSP_DROPPED)
				printk("netif drop for tx\n");

			if (txrsp->status == NETIF_RSP_ERROR)
				printk("netif error for tx\n");

			id  = txrsp->id;
			BUG_ON(id >= NET_TX_RING_SIZE);

#ifndef CONFIG_NETFRONT_PERSISTENT_GRANTS
			buf = &dev->tx_buffers[id];
			gnttab_end_access(buf->gref);
			buf->gref = GRANT_INVALID_REF;
#ifdef HAVE_LWIP
			if (buf->pbuf) {
				pbuf_free(buf->pbuf);
				buf->pbuf = NULL;
			}
#endif /* HAVE_LWIP */
#endif /* CONFIG_NETFRONT_PERSISTENT_GRANTS */
			add_id_to_freelist(id, dev->tx_freelist);
			up(&dev->tx_sem);
		}

		dev->tx.rsp_cons = prod;

		/*
		 * Set a new event, then check for race with update of tx_cons.
		 * Note that it is essential to schedule a callback, no matter
		 * how few tx_buffers are pending. Even if there is space in the
		 * transmit ring, higher layers may be blocked because too much
		 * data is outstanding: in such cases notification from Xen is
		 * likely to be the only kick that we'll get.
		 */
		dev->tx.sring->rsp_event =
			prod + ((dev->tx.sring->req_prod - prod) >> 1) + 1;
		mb();
	} while ((cons == prod) && (prod != dev->tx.sring->rsp_prod));
}
Exemplo n.º 4
0
static void free_blkfront(struct blkfront_dev *dev)
{
    mask_evtchn(dev->evtchn);

    free(dev->backend);

    gnttab_end_access(dev->ring_ref);
    free_page(dev->ring.sring);

    unbind_evtchn(dev->evtchn);

    free(dev->nodename);
    free(dev);
}
Exemplo n.º 5
0
static void free_pcifront(struct pcifront_dev *dev)
{
    mask_evtchn(dev->evtchn);

    free(dev->backend);

    gnttab_end_access(dev->info_ref);
    free_page(dev->info);

    unbind_evtchn(dev->evtchn);

    free(dev->nodename);
    free(dev);
}
Exemplo n.º 6
0
void network_tx_buf_gc(struct netfront_dev *dev)
{
    RING_IDX cons, prod;
    unsigned short id;

    do {
        prod = dev->tx.sring->rsp_prod;
        rmb(); /* Ensure we see responses up to 'rp'. */
        //printk("cons = %ld, prod = %ld\n", dev->tx.rsp_cons, prod); //farewellkou

        for (cons = dev->tx.rsp_cons; cons != prod; cons++) 
        {
            struct netif_tx_response *txrsp;
            struct net_buffer *buf;

            txrsp = RING_GET_RESPONSE(&dev->tx, cons);
            if (txrsp->status == NETIF_RSP_NULL){
                continue;
            }

            if (txrsp->status == NETIF_RSP_ERROR){
                printk("packet error\n");
            }

            id  = txrsp->id;
            BUG_ON(id >= NET_TX_RING_SIZE);
            buf = &dev->tx_buffers[id];
            gnttab_end_access(buf->gref);
            buf->gref=GRANT_INVALID_REF;

	    add_id_to_freelist(id,dev->tx_freelist);
	    up(&dev->tx_sem);
        }

        dev->tx.rsp_cons = prod;

        /*
         * Set a new event, then check for race with update of tx_cons.
         * Note that it is essential to schedule a callback, no matter
         * how few tx_buffers are pending. Even if there is space in the
         * transmit ring, higher layers may be blocked because too much
         * data is outstanding: in such cases notification from Xen is
         * likely to be the only kick that we'll get.
         */
        dev->tx.sring->rsp_event =
            prod + ((dev->tx.sring->req_prod - prod) >> 1) + 1;
        mb();
    } while ((cons == prod) && (prod != dev->tx.sring->rsp_prod));
}
Exemplo n.º 7
0
static int tpmfront_connect(struct tpmfront_dev* dev)
{
   char* err;
   /* Create shared page */
   dev->page = (vtpm_shared_page_t*) alloc_page();
   if(dev->page == NULL) {
      TPMFRONT_ERR("Unable to allocate page for shared memory\n");
      goto error;
   }
   memset(dev->page, 0, PAGE_SIZE);
   dev->ring_ref = gnttab_grant_access(dev->bedomid, virt_to_mfn(dev->page), 0);
   TPMFRONT_DEBUG("grant ref is %lu\n", (unsigned long) dev->ring_ref);

   /*Create event channel */
   if(evtchn_alloc_unbound(dev->bedomid, tpmfront_handler, dev, &dev->evtchn)) {
      TPMFRONT_ERR("Unable to allocate event channel\n");
      goto error_postmap;
   }
   unmask_evtchn(dev->evtchn);
   TPMFRONT_DEBUG("event channel is %lu\n", (unsigned long) dev->evtchn);

   /* Write the entries to xenstore */
   if(publish_xenbus(dev)) {
      goto error_postevtchn;
   }

   /* Change state to connected */
   dev->state = XenbusStateConnected;

   /* Tell the backend that we are ready */
   if((err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%u", dev->state))) {
      TPMFRONT_ERR("Unable to write to xenstore %s/state, value=%u", dev->nodename, XenbusStateConnected);
      free(err);
      goto error;
   }

   return 0;
error_postevtchn:
      mask_evtchn(dev->evtchn);
      unbind_evtchn(dev->evtchn);
error_postmap:
      gnttab_end_access(dev->ring_ref);
      free_page(dev->page);
error:
   return -1;
}
Exemplo n.º 8
0
void free_consfront(struct consfront_dev *dev)
{
    char* err = NULL;
    XenbusState state;

    char path[strlen(dev->backend) + 1 + 5 + 1];
    char nodename[strlen(dev->nodename) + 1 + 5 + 1];

    snprintf(path, sizeof(path), "%s/state", dev->backend);
    snprintf(nodename, sizeof(nodename), "%s/state", dev->nodename);

    if ((err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateClosing)) != NULL) {
        printk("free_consfront: error changing state to %d: %s\n",
                XenbusStateClosing, err);
        goto close;
    }
    state = xenbus_read_integer(path);
    while (err == NULL && state < XenbusStateClosing)
        err = xenbus_wait_for_state_change(path, &state, &dev->events);
    if (err) free(err);

    if ((err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateClosed)) != NULL) {
        printk("free_consfront: error changing state to %d: %s\n",
                XenbusStateClosed, err);
        goto close;
    }

close:
    if (err) free(err);
    xenbus_unwatch_path_token(XBT_NIL, path, path);

    mask_evtchn(dev->evtchn);
    unbind_evtchn(dev->evtchn);
    free(dev->backend);
    free(dev->nodename);

    gnttab_end_access(dev->ring_ref);

    free_page(dev->ring);
    free(dev);
}
Exemplo n.º 9
0
/*
 * Reads extra slots to check for a GSO packet
 */
static int netfront_get_extras(struct netfront_dev *dev,
			       struct netif_extra_info *extras, RING_IDX ri)
{
	struct netif_extra_info *extra;
	RING_IDX cons = dev->rx.rsp_cons;
	int err = 0;
#ifndef CONFIG_NETFRONT_PERSISTENT_GRANTS
	struct net_rxbuffer *buf;
#endif

	do {
		extra = (struct netif_extra_info *)
			RING_GET_RESPONSE(&dev->rx, ++cons);

		if (unlikely(!extra->type ||
			     extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
			printk("Invalid extra type: %d\n", extra->type);
			err = -EINVAL;
		} else {
			dprintk("rx: scan: extra %u %s\n", extra->type,
				(extra->flags & XEN_NETIF_EXTRA_FLAG_MORE
					? "(more true)": ""));
			NETIF_MEMCPY(&extras[extra->type - 1], extra,
			       sizeof(*extra));
		}

#ifndef CONFIG_NETFRONT_PERSISTENT_GRANTS
		buf = dev->rx_buffers[netfront_rxidx(cons)];
		gnttab_end_access(buf->gref);
		buf->gref = GRANT_INVALID_REF;
		dev->rx_buffers[netfront_rxidx(cons)] = NULL;
		netfront_release_rxbuffer(buf, dev);
#endif
	} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);

	dev->rx.rsp_cons = cons;
	return err;
}
Exemplo n.º 10
0
void network_rx(struct netfront_dev *dev)
{
    RING_IDX rp,cons,req_prod;
    struct netif_rx_response *rx;
    int nr_consumed, some, more, i, notify;


moretodo:
    rp = dev->rx.sring->rsp_prod;
    rmb(); /* Ensure we see queued responses up to 'rp'. */
    cons = dev->rx.rsp_cons;

    nr_consumed = 0;
    some = 0;
    while ((cons != rp) && !some)
    {
        struct net_buffer* buf;
        unsigned char* page;
        int id;

        rx = RING_GET_RESPONSE(&dev->rx, cons);

        if (rx->flags & NETRXF_extra_info)
        {
            printk("+++++++++++++++++++++ we have extras!\n");
            continue;
        }


        if (rx->status == NETIF_RSP_NULL) continue;

        id = rx->id;
        BUG_ON(id >= NET_TX_RING_SIZE);

        buf = &dev->rx_buffers[id];
        page = (unsigned char*)buf->page;
        gnttab_end_access(buf->gref);

        if(rx->status>0)
        {
#ifdef HAVE_LIBC
	    if (dev->netif_rx == NETIF_SELECT_RX) {
		int len = rx->status;
		ASSERT(current == main_thread);
		if (len > dev->len)
		    len = dev->len;
		memcpy(dev->data, page+rx->offset, len);
		dev->rlen = len;
		some = 1;
	    } else
#endif
		dev->netif_rx(page+rx->offset,rx->status);
        }

        nr_consumed++;

        ++cons;
    }
    dev->rx.rsp_cons=cons;

    RING_FINAL_CHECK_FOR_RESPONSES(&dev->rx,more);
    if(more && !some) goto moretodo;

    req_prod = dev->rx.req_prod_pvt;

    for(i=0; i<nr_consumed; i++)
    {
        int id = xennet_rxidx(req_prod + i);
        netif_rx_request_t *req = RING_GET_REQUEST(&dev->rx, req_prod + i);
        struct net_buffer* buf = &dev->rx_buffers[id];
        void* page = buf->page;

        /* We are sure to have free gnttab entries since they got released above */
        buf->gref = req->gref = 
            gnttab_grant_access(dev->dom,virt_to_mfn(page),0);

        req->id = id;
    }

    wmb();

    dev->rx.req_prod_pvt = req_prod + i;
    
    RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->rx, notify);
    if (notify)
        notify_remote_via_evtchn(dev->evtchn);

}
Exemplo n.º 11
0
int blkfront_aio_poll(struct blkfront_dev *dev)
{
    RING_IDX rp, cons;
    struct blkif_response *rsp;
    int more;
    int nr_consumed;

moretodo:
#ifdef HAVE_LIBC
    if (dev->fd != -1) {
        files[dev->fd].read = 0;
        mb(); /* Make sure to let the handler set read to 1 before we start looking at the ring */
    }
#endif

    rp = dev->ring.sring->rsp_prod;
    rmb(); /* Ensure we see queued responses up to 'rp'. */
    cons = dev->ring.rsp_cons;

    nr_consumed = 0;
    while ((cons != rp))
    {
        struct blkfront_aiocb *aiocbp;
        int status;

	rsp = RING_GET_RESPONSE(&dev->ring, cons);
	nr_consumed++;

        aiocbp = (void*) (uintptr_t) rsp->id;
        status = rsp->status;

        switch (rsp->operation) {
        case BLKIF_OP_READ:
        case BLKIF_OP_WRITE:
        {
            int j;

            if (status != BLKIF_RSP_OKAY)
                printk("%s error %d on %s at offset %llu, num bytes %llu\n",
                        rsp->operation == BLKIF_OP_READ?"read":"write",
                        status, aiocbp->aio_dev->nodename,
                        (unsigned long long) aiocbp->aio_offset,
                        (unsigned long long) aiocbp->aio_nbytes);

            for (j = 0; j < aiocbp->n; j++)
                gnttab_end_access(aiocbp->gref[j]);

            break;
        }

        case BLKIF_OP_WRITE_BARRIER:
            if (status != BLKIF_RSP_OKAY)
                printk("write barrier error %d\n", status);
            break;
        case BLKIF_OP_FLUSH_DISKCACHE:
            if (status != BLKIF_RSP_OKAY)
                printk("flush error %d\n", status);
            break;

        default:
            printk("unrecognized block operation %d response (status %d)\n", rsp->operation, status);
            break;
        }

        dev->ring.rsp_cons = ++cons;
        /* Nota: callback frees aiocbp itself */
        if (aiocbp && aiocbp->aio_cb)
            aiocbp->aio_cb(aiocbp, status ? -EIO : 0);
        if (dev->ring.rsp_cons != cons)
            /* We reentered, we must not continue here */
            break;
    }

    RING_FINAL_CHECK_FOR_RESPONSES(&dev->ring, more);
    if (more) goto moretodo;

    return nr_consumed;
}
Exemplo n.º 12
0
/*
 * Reads RX responses for a single packet
 */
static int netfront_get_responses(struct netfront_dev *dev,
				  RING_IDX rp)
{
	struct netif_rx_response *rsp = &(dev->rsp);
	int32_t realsize = rsp->status;
	int16_t size = rsp->status;
	uint16_t id = rsp->id;
	uint16_t flags = rsp->flags;
	RING_IDX cons = rp;
	uint16_t slots = 1;
	int drop = 0;
#ifdef HAVE_LWIP
	struct pbuf *p;
	struct pbuf *first_p;
#endif

	dprintk("rx: ring: len %d %s\n", size,
		(flags & NETRXF_more_data ? "(more true) ": ""));

	BUG_ON(id >= NET_RX_RING_SIZE);

	if (flags & NETRXF_extra_info) {
		memset(dev->extras, 0, sizeof(dev->extras));
		netfront_get_extras(dev, dev->extras, cons);
		cons = dev->rx.rsp_cons;
	}

	if (flags & NETRXF_more_data) {
		dprintk("rx: scan: slot 0 len %d %s\n",
			size, (flags & NETRXF_more_data ? "(more true)": ""));
		realsize = size + netfront_get_size(dev, cons);
	}

	dprintk("rx: %c%c- %"PRIi32" bytes\n",
		flags & NETRXF_extra_info ? 'S' : '-',
		flags & ((NETRXF_csum_blank) | (NETRXF_data_validated)) ? 'C' : '-',
		realsize);

#ifdef HAVE_LWIP
	if (likely(dev->netif_rx_pbuf)) {
#ifdef CONFIG_NETFRONT_PERSISTENT_GRANTS
	  first_p = p = netfront_alloc_pbuf(dev, realsize);
	  drop = (p == NULL);
#else
	  first_p = p = &dev->rx_buffers[id]->cpbuf.pbuf;
	  drop = 0;
	  dev->pbuf_cur = p;
#endif /* CONFIG_NETFRONT_PERSISTENT_GRANTS */

#if ETH_PAD_SIZE
	  if (likely(!drop))
	    pbuf_header(p, -ETH_PAD_SIZE); /* drop the padding word */
#endif /* ETH_PAD_SIZE */
	}
#endif /* HAVE_LWIP */

	for (;;) {
		if (unlikely(rsp->status < 0 ||
			     (rsp->offset + rsp->status > PAGE_SIZE))) {
			printk("rx: ring<%u>: status %d, flags %04x, offset %d\n",
			       cons + slots, size, flags, rsp->offset);
		} else if (likely(!drop)) {
#ifdef CONFIG_NETFRONT_PERSISTENT_GRANTS
			handle_buffer(dev, rsp, &dev->rx_buffers[id], realsize);
#else
			handle_buffer(dev, rsp, dev->rx_buffers[id], realsize);
#endif
		}

#ifndef CONFIG_NETFRONT_PERSISTENT_GRANTS
		BUG_ON(dev->rx_buffers[id]->gref == GRANT_INVALID_REF);
		gnttab_end_access(dev->rx_buffers[id]->gref);
		dev->rx_buffers[id]->gref = GRANT_INVALID_REF;
		dev->rx_buffers[id] = NULL;
#endif

		if (!(flags & NETRXF_more_data))
			break;

		if (dev->rx.sring->rsp_prod <= cons + slots)
			break;

		rsp = RING_GET_RESPONSE(&dev->rx, cons + slots);
		id = rsp->id;
		BUG_ON(id >= NET_RX_RING_SIZE);
		size = rsp->status;
		flags = rsp->flags;
		slots++;
#ifndef CONFIG_NETFRONT_PERSISTENT_GRANTS
		if (likely(dev->netif_rx_pbuf && (!drop))) {
			/* set tot_len */
			p->tot_len = realsize;
			realsize -= p->len;
			/* ..and link it to next pbuf */
			p->next = &dev->rx_buffers[id]->cpbuf.pbuf;
			dev->pbuf_cur = p = p->next;
		} else {
			netfront_release_rxbuffer(dev->rx_buffers[id], dev);
		}
#endif

		dprintk("rx: ring: len %d %s %s\n", size,
			(flags & NETRXF_more_data ? "(more true) ": ""),
			(drop ? "DROP" : ""));
	}

	BUG_ON(slots > dev->rx.sring->rsp_prod - dev->rx.rsp_cons);
	dev->rx.rsp_cons = cons + slots;

	if (unlikely(drop))
		goto err_drop;

#ifdef HAVE_LWIP
	if (likely(dev->netif_rx_pbuf)) {
#if ETH_PAD_SIZE
		pbuf_header(p, ETH_PAD_SIZE); /* reclaim the padding word */
#endif /* ETH_PAD_SIZE */
		if (first_p->ref != 1)
		  printk("first_p->ref = %u\n", first_p->ref);
		dev->netif_rx_pbuf(first_p, dev->netif_rx_arg);
	}
#endif /* HAVE_LWIP */
	return 1;

 err_drop:
	dprintk("  rx: dropped\n");
#ifdef HAVE_LWIP
	if (first_p) {
#ifdef CONFIG_NETFRONT_PERSISTENT_GRANTS
		pbuf_free(first_p);
#else /* CONFIG_NETFRONT_PERSISTENT_GRANTS */
		struct pbuf *next;

		/* unchain pbuf and release */
		p = first_p;
		while (p != NULL) {
			next = p->next;
			p->tot_len = p->len;
			p->next = NULL;
			netfront_free_rxpbuf(p);
			p = next;
		}
#endif /* CONFIG_NETFRONT_PERSISTENT_GRANTS */
	}
	if (likely(dev->netif_rx_pbuf))
		dev->netif_rx_pbuf(NULL, dev->netif_rx_arg); /* notify drop */
#endif
	return 0;
}
Exemplo n.º 13
0
/**
 * Transmit function for pbufs which can handle checksum and segmentation offloading for TCPv4 and TCPv6
 */
err_t netfront_xmit_pbuf(struct netfront_dev *dev, struct pbuf *p, int co_type, int push)
{
	struct netif_tx_request *first_tx;
	struct netif_extra_info *gso;
	int slots;
	int used = 0;
#ifdef CONFIG_NETFRONT_GSO
	int sego;
#endif /* CONFIG_NETFRONT_GSO */
#ifdef CONFIG_NETFRONT_WAITFORTX
	unsigned long flags;
	DEFINE_WAIT(w);
#endif /* CONFIG_NETFRONT_WAITFORTX */

	/* Counts how many slots we require for this buf */
	slots = netfront_count_pbuf_slots(dev, p);
#ifdef CONFIG_NETFRONT_GSO
#if TCP_GSO /* GSO flag is only available if lwIP is built with GSO support */
	sego = (p->flags & PBUF_FLAG_GSO) ? 1 : 0;
#else
	sego = 0;
#endif
	/* GSO requires checksum offloading set */
	BUG_ON(sego && !(co_type & (XEN_NETIF_GSO_TYPE_TCPV4 | XEN_NETIF_GSO_TYPE_TCPV6)));
#endif /* CONFIG_NETFRONT_GSO */

	/* Checks if there are enough requests for this many slots (gso requires one slot more) */
#ifdef CONFIG_NETFRONT_GSO
	BUG_ON(!netfront_tx_possible(dev, slots + sego));
#else
	BUG_ON(!netfront_tx_possible(dev, slots));
#endif /* CONFIG_NETFRONT_GSO */

#ifdef CONFIG_NETFRONT_WAITFORTX
	local_irq_save(flags);
#endif /* CONFIG_NETFRONT_WAITFORTX */
#ifdef CONFIG_NETFRONT_GSO
	if (unlikely(!netfront_tx_available(dev, slots + sego))) {
#else
	if (unlikely(!netfront_tx_available(dev, slots))) {
#endif /* CONFIG_NETFRONT_GSO */
		netfront_xmit_push(dev);
#ifdef CONFIG_NETFRONT_WAITFORTX
 try_again:
#ifdef CONFIG_NETFRONT_GSO
		if (!netfront_tx_available(dev, slots + sego)) {
#else
		if (!netfront_tx_available(dev, slots)) {
#endif /* CONFIG_NETFRONT_GSO */
#ifndef CONFIG_NETFRONT_WAITFORTX_BUSYLOOP
			add_waiter(w, netfront_txqueue); /* release thread until space is free'd */
			local_irq_restore(flags);
			schedule();
			local_irq_save(flags);
#endif /* !CONFIG_NETFRONT_WAITFORTX_BUSYLOOP */
			netfront_tx_buf_gc(dev);
			goto try_again;
		}
		remove_waiter(w, netfront_txqueue); /* release thread until space is free'd */
#else
		return ERR_MEM;
#endif /* CONFIG_NETFRONT_WAITFORTX */
	}
#ifdef CONFIG_NETFRONT_WAITFORTX
	local_irq_restore(flags);
#endif /* CONFIG_NETFRONT_WAITFORTX */

	/* Set extras if packet is GSO kind */
	first_tx = netfront_get_page(dev);
	ASSERT(first_tx != NULL);
#if defined CONFIG_NETFRONT_GSO && TCP_GSO
	if (sego) {
		gso = (struct netif_extra_info *) RING_GET_REQUEST(&dev->tx, dev->tx.req_prod_pvt++);

		first_tx->flags |= NETTXF_extra_info;
		gso->u.gso.size = p->gso_size; /* segmentation size */
		gso->u.gso.type = co_type; /* XEN_NETIF_GSO_TYPE_TCPV4, XEN_NETIF_GSO_TYPE_TCPV6 */
		gso->u.gso.pad = 0;
		gso->u.gso.features = 0;

		gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
		gso->flags = 0;

		used++;
	}
#endif /* CONFIG_NETFRONT_GSO */

	/* Make TX requests for the pbuf */
#ifdef CONFIG_NETFRONT_PERSISTENT_GRANTS
	netfront_make_txreqs_pgnt(dev, first_tx, p, &used);
#else
	netfront_make_txreqs(dev, first_tx, p, &used);
#endif
	ASSERT(slots >= used); /* we should have taken at most the number slots that we estimated before */
	ASSERT(slots <= XEN_NETIF_NR_SLOTS_MIN); /* we should never take more slots than the backend supports */

	/* partially checksummed (offload enabled), or checksummed */
	first_tx->flags |= co_type ? ((NETTXF_csum_blank) | (NETTXF_data_validated)) : (NETTXF_data_validated);

	push |= (((dev)->tx.req_prod_pvt - (dev)->tx.rsp_cons) <= NET_TX_RING_SIZE / 2);
	if (push)
		netfront_xmit_push(dev);

#ifdef CONFIG_NETFRONT_STATS
	++dev->txpkts;
	dev->txbytes += p->tot_len;
#endif
	dprintk("tx: %c%c%c %u bytes (%u slots)\n", sego ? 'S' : '-', co_type ? 'C' : '-', push ? 'P' : '-', p->tot_len, slots);
	return ERR_OK;
}

void netfront_xmit_push(struct netfront_dev *dev)
{
	int flags;

	netfront_xmit_notify(dev);

	/* Collects any outstanding responses for more requests */
	local_irq_save(flags);
	netfront_tx_buf_gc(dev);
	local_irq_restore(flags);
}

void netfront_set_rx_pbuf_handler(struct netfront_dev *dev,
				  void (*thenetif_rx)(struct pbuf *p, void *arg),
				  void *arg)
{
	if (dev->netif_rx_pbuf && dev->netif_rx_pbuf != netif_rx_pbuf)
		printk("Replacing netif_rx_pbuf handler for dev %s\n", dev->nodename);

	dev->netif_rx = NULL;
	dev->netif_rx_pbuf = thenetif_rx;
	dev->netif_rx_arg = arg;
}
#endif

static void free_netfront(struct netfront_dev *dev)
{
	int i;
	int separate_tx_rx_irq = (dev->tx_evtchn != dev->rx_evtchn);

	free(dev->mac);
	free(dev->backend);

#ifdef CONFIG_NETMAP
	if (dev->netmap)
		return;
#endif

	for(i=0; i<NET_TX_RING_SIZE; i++)
		down(&dev->tx_sem);

	mask_evtchn(dev->tx_evtchn);
	if (separate_tx_rx_irq)
		mask_evtchn(dev->rx_evtchn);

	gnttab_end_access(dev->rx_ring_ref);
	gnttab_end_access(dev->tx_ring_ref);

	free_page(dev->rx.sring);
	free_page(dev->tx.sring);

	unbind_evtchn(dev->tx_evtchn);
	if (separate_tx_rx_irq)
		unbind_evtchn(dev->rx_evtchn);

#ifdef CONFIG_NETFRONT_PERSISTENT_GRANTS
	for(i=0; i<NET_RX_RING_SIZE; i++) {
		if (dev->rx_buffers[i].page) {
			gnttab_end_access(dev->rx_buffers[i].gref);
			free_page(dev->rx_buffers[i].page);
		}
	}
#else
	for(i=0; i<NET_RX_BUFFERS; i++) {
		if (dev->rx_buffer_pool[i].page) {
			if (dev->rx_buffer_pool[i].gref != GRANT_INVALID_REF)
				gnttab_end_access(dev->rx_buffer_pool[i].gref);
			free_page(dev->rx_buffer_pool[i].page);
		}
	}
#endif

#if defined CONFIG_NETFRONT_PERSISTENT_GRANTS || !defined CONFIG_NETFRONT_LWIP_ONLY
	for(i=0; i<NET_TX_RING_SIZE; i++) {
		if (dev->tx_buffers[i].page) {
#ifndef CONFIG_NETFRONT_PERSISTENT_GRANTS
			if (dev->tx_buffers[i].gref != GRANT_INVALID_REF)
#endif
			gnttab_end_access(dev->tx_buffers[i].gref);
			free_page(dev->tx_buffers[i].page);
		}
	}
#endif
}
Exemplo n.º 14
0
int blkfront_aio_poll(struct blkfront_dev *dev)
{
    RING_IDX rp, cons;
    struct blkif_response *rsp;
    int more;
    int nr_consumed;

moretodo:

    rp = dev->ring.sring->rsp_prod;
    rmb(); /* Ensure we see queued responses up to 'rp'. */
    cons = dev->ring.rsp_cons;

    nr_consumed = 0;
    while ((cons != rp))
    {
        struct blkfront_aiocb *aiocbp;
        int status;

	rsp = RING_GET_RESPONSE(&dev->ring, cons);
	nr_consumed++;

        aiocbp = (void*) (uintptr_t) rsp->id;
        status = rsp->status;

        if (status != BLKIF_RSP_OKAY)
            minios_printk("block error %d for op %d\n", status, rsp->operation);

        switch (rsp->operation) {
        case BLKIF_OP_READ:
        case BLKIF_OP_WRITE:
        {
            int j;

            for (j = 0; j < aiocbp->n; j++)
                gnttab_end_access(aiocbp->gref[j]);

            break;
        }

        case BLKIF_OP_WRITE_BARRIER:
        case BLKIF_OP_FLUSH_DISKCACHE:
            break;

        default:
            minios_printk("unrecognized block operation %d response\n", rsp->operation);
        }

        dev->ring.rsp_cons = ++cons;
        /* Nota: callback frees aiocbp itself */
        if (aiocbp && aiocbp->aio_cb)
            aiocbp->aio_cb(aiocbp, status ? -EIO : 0);
        if (dev->ring.rsp_cons != cons)
            /* We reentered, we must not continue here */
            break;
    }

    RING_FINAL_CHECK_FOR_RESPONSES(&dev->ring, more);
    if (more) goto moretodo;

    return nr_consumed;
}
Exemplo n.º 15
0
void shutdown_tpmfront(struct tpmfront_dev* dev)
{
   char* err;
   char path[512];
   if(dev == NULL) {
      return;
   }
   TPMFRONT_LOG("Shutting down tpmfront\n");
   /* disconnect */
   if(dev->state == XenbusStateConnected) {
      /* Tell backend we are closing */
      dev->state = XenbusStateClosing;
      if((err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%u", (unsigned int) dev->state))) {
	 TPMFRONT_ERR("Unable to write to %s, error was %s", dev->nodename, err);
	 free(err);
      }

      /* Clean up xenstore entries */
      snprintf(path, 512, "%s/event-channel", dev->nodename);
      if((err = xenbus_rm(XBT_NIL, path))) {
	 free(err);
      }
      snprintf(path, 512, "%s/ring-ref", dev->nodename);
      if((err = xenbus_rm(XBT_NIL, path))) {
	 free(err);
      }

      /* Tell backend we are closed */
      dev->state = XenbusStateClosed;
      if((err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%u", (unsigned int) dev->state))) {
	 TPMFRONT_ERR("Unable to write to %s, error was %s", dev->nodename, err);
	 free(err);
      }

      /* Wait for the backend to close and unmap shared pages, ignore any errors */
      wait_for_backend_state_changed(dev, XenbusStateClosed);

      /* Prepare for a later reopen (possibly by a kexec'd kernel) */
      dev->state = XenbusStateInitialising;
      if((err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%u", (unsigned int) dev->state))) {
	 TPMFRONT_ERR("Unable to write to %s, error was %s", dev->nodename, err);
	 free(err);
      }

      /* Close event channel and unmap shared page */
      mask_evtchn(dev->evtchn);
      unbind_evtchn(dev->evtchn);
      gnttab_end_access(dev->ring_ref);

      free_page(dev->page);
   }

   /* Cleanup memory usage */
   if(dev->respbuf) {
      free(dev->respbuf);
   }
   if(dev->bepath) {
      free(dev->bepath);
   }
   if(dev->nodename) {
      free(dev->nodename);
   }
   free(dev);
}