Esempio n. 1
0
static void net_tx_response(struct XenNetDev *netdev, netif_tx_request_t *txp, int8_t st)
{
    RING_IDX i = netdev->tx_ring.rsp_prod_pvt;
    netif_tx_response_t *resp;
    int notify;

    resp = RING_GET_RESPONSE(&netdev->tx_ring, i);
    resp->id     = txp->id;
    resp->status = st;

#if 0
    if (txp->flags & NETTXF_extra_info) {
        RING_GET_RESPONSE(&netdev->tx_ring, ++i)->status = NETIF_RSP_NULL;
    }
#endif

    netdev->tx_ring.rsp_prod_pvt = ++i;
    RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->tx_ring, notify);
    if (notify) {
        xen_be_send_notify(&netdev->xendev);
    }

    if (i == netdev->tx_ring.req_cons) {
        int more_to_do;
        RING_FINAL_CHECK_FOR_REQUESTS(&netdev->tx_ring, more_to_do);
        if (more_to_do) {
            netdev->tx_work++;
        }
    }
}
Esempio n. 2
0
static int xen_block_send_response(XenBlockRequest *request)
{
    XenBlockDataPlane *dataplane = request->dataplane;
    int send_notify = 0;
    int have_requests = 0;
    blkif_response_t *resp;

    /* Place on the response ring for the relevant domain. */
    switch (dataplane->protocol) {
    case BLKIF_PROTOCOL_NATIVE:
        resp = (blkif_response_t *)RING_GET_RESPONSE(
            &dataplane->rings.native,
            dataplane->rings.native.rsp_prod_pvt);
        break;
    case BLKIF_PROTOCOL_X86_32:
        resp = (blkif_response_t *)RING_GET_RESPONSE(
            &dataplane->rings.x86_32_part,
            dataplane->rings.x86_32_part.rsp_prod_pvt);
        break;
    case BLKIF_PROTOCOL_X86_64:
        resp = (blkif_response_t *)RING_GET_RESPONSE(
            &dataplane->rings.x86_64_part,
            dataplane->rings.x86_64_part.rsp_prod_pvt);
        break;
    default:
        return 0;
    }

    resp->id = request->req.id;
    resp->operation = request->req.operation;
    resp->status = request->status;

    dataplane->rings.common.rsp_prod_pvt++;

    RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&dataplane->rings.common,
                                         send_notify);
    if (dataplane->rings.common.rsp_prod_pvt ==
        dataplane->rings.common.req_cons) {
        /*
         * Tail check for pending requests. Allows frontend to avoid
         * notifications if requests are already in flight (lower
         * overheads and promotes batching).
         */
        RING_FINAL_CHECK_FOR_REQUESTS(&dataplane->rings.common,
                                      have_requests);
    } else if (RING_HAS_UNCONSUMED_REQUESTS(&dataplane->rings.common)) {
        have_requests = 1;
    }

    if (have_requests) {
        dataplane->more_work++;
    }
    return send_notify;
}
Esempio n. 3
0
static int blk_send_response_one(struct ioreq *ioreq)
{
    struct XenBlkDev  *blkdev = ioreq->blkdev;
    int               send_notify   = 0;
    int               have_requests = 0;
    blkif_response_t  resp;
    void              *dst;

    resp.id        = ioreq->req.id;
    resp.operation = ioreq->req.operation;
    resp.status    = ioreq->status;

    /* Place on the response ring for the relevant domain. */
    switch (blkdev->protocol) {
    case BLKIF_PROTOCOL_NATIVE:
        dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt);
        break;
    case BLKIF_PROTOCOL_X86_32:
        dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
                                blkdev->rings.x86_32_part.rsp_prod_pvt);
        break;
    case BLKIF_PROTOCOL_X86_64:
        dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
                                blkdev->rings.x86_64_part.rsp_prod_pvt);
        break;
    default:
        dst = NULL;
    }
    memcpy(dst, &resp, sizeof(resp));
    blkdev->rings.common.rsp_prod_pvt++;

    RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
    if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
        /*
         * Tail check for pending requests. Allows frontend to avoid
         * notifications if requests are already in flight (lower
         * overheads and promotes batching).
         */
        RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests);
    } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) {
        have_requests = 1;
    }

    if (have_requests) {
        blkdev->more_work++;
    }
    return send_notify;
}
Esempio n. 4
0
static int
xpvtap_user_response_get(xpvtap_state_t *state, blkif_response_t *resp,
    uint_t *uid)
{
	blkif_front_ring_t *uring;
	blkif_response_t *target;


	uring = &state->bt_user_ring.ur_ring;

	if (!RING_HAS_UNCONSUMED_RESPONSES(uring)) {
		return (B_FALSE);
	}

	target = NULL;
	target = RING_GET_RESPONSE(uring, uring->rsp_cons);
	if (target == NULL) {
		return (B_FALSE);
	}

	/* copy out the user app response */
	bcopy(target, resp, sizeof (*resp));
	uring->rsp_cons++;

	/* restore the quests id from the original request */
	*uid = (uint_t)resp->id;
	resp->id = state->bt_map.um_outstanding_reqs[*uid].id;

	return (B_TRUE);
}
Esempio n. 5
0
// What if someone calls waiton the same desc several times?
int waiton_syscall(syscall_desc_t* desc)
{
	int retval = 0;
	if (desc == NULL || desc->channel == NULL){
		errno = EFAIL;
		return -1;
	}
	// Make sure we were given a desc with a non-NULL frontring.  This could
	// happen if someone forgot to check the error code on the paired syscall.
	syscall_front_ring_t *fr =  &desc->channel->sysfr;
	
	if (!fr){
		errno = EFAIL;
		return -1;
	}
	printf("waiting %d\n", vcore_id());
	syscall_rsp_t* rsp = RING_GET_RESPONSE(fr, desc->idx);

	// ignoring the ring push response from the kernel side now
	while (atomic_read(&rsp->sc->flags) != SC_DONE)
		cpu_relax();
	// memcpy(rsp, rsp_inring, sizeof(*rsp));
	
    // run a cleanup function for this desc, if available
    if (rsp->cleanup)
    	rsp->cleanup(rsp->data);
	if (RSP_ERRNO(rsp)){
		errno = RSP_ERRNO(rsp);
		retval = -1;
	} else 
		retval =  RSP_RESULT(rsp); 
	atomic_inc((atomic_t*) &(fr->rsp_cons));
	return retval;
}
Esempio n. 6
0
void netfront_rx(struct netfront_dev *dev)
{
	RING_IDX rp, cons;
	struct netif_rx_response *rsp = &(dev->rsp);
	int more, flags;

	local_irq_save(flags);
	netfront_tx_buf_gc(dev);
	local_irq_restore(flags);
#ifdef CONFIG_NETMAP
	if (dev->netmap) {
		netmap_netfront_rx(dev);
		return;
	}
#endif
moretodo:
	rp = dev->rx.sring->rsp_prod;
	rmb(); /* Ensure we see queued responses up to 'rp'. */
	cons = dev->rx.rsp_cons;

	while (cons != rp) {
		NETIF_MEMCPY(rsp, RING_GET_RESPONSE(&dev->rx, cons), sizeof(*rsp));
		netfront_get_responses(dev, cons);
		cons = dev->rx.rsp_cons;
	}

	dev->rx.rsp_cons = cons;
	RING_FINAL_CHECK_FOR_RESPONSES(&dev->rx, more);
	if(more)
		goto moretodo;

	netfront_fillup_rx_buffers(dev);
}
Esempio n. 7
0
static void net_rx_response(struct XenNetDev *netdev,
                            netif_rx_request_t *req, int8_t st,
                            uint16_t offset, uint16_t size,
                            uint16_t flags)
{
    RING_IDX i = netdev->rx_ring.rsp_prod_pvt;
    netif_rx_response_t *resp;
    int notify;

    resp = RING_GET_RESPONSE(&netdev->rx_ring, i);
    resp->offset     = offset;
    resp->flags      = flags;
    resp->id         = req->id;
    resp->status     = (int16_t)size;
    if (st < 0) {
        resp->status = (int16_t)st;
    }

    xen_be_printf(&netdev->xendev, 3, "rx response: idx %d, status %d, flags 0x%x\n",
                  i, resp->status, resp->flags);

    netdev->rx_ring.rsp_prod_pvt = ++i;
    RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->rx_ring, notify);
    if (notify) {
        xen_be_send_notify(&netdev->xendev);
    }
}
Esempio n. 8
0
static void
blktap_read_ring(struct blktap *tap)
{
	struct blktap_ring *ring = &tap->ring;
	struct blktap_ring_response rsp;
	RING_IDX rc, rp;

	down_read(&current->mm->mmap_sem);
	if (!ring->vma) {
		up_read(&current->mm->mmap_sem);
		return;
	}

	/* for each outstanding message on the ring  */
	rp = ring->ring.sring->rsp_prod;
	rmb();

	for (rc = ring->ring.rsp_cons; rc != rp; rc++) {
		memcpy(&rsp, RING_GET_RESPONSE(&ring->ring, rc), sizeof(rsp));
		blktap_ring_read_response(tap, &rsp);
	}

	ring->ring.rsp_cons = rc;

	up_read(&current->mm->mmap_sem);
}
Esempio n. 9
0
/**
 * Poll for completed packets
 *
 * @v netdev		Network device
 */
static void netfront_poll_tx ( struct net_device *netdev ) {
	struct netfront_nic *netfront = netdev->priv;
	struct xen_device *xendev = netfront->xendev;
	struct netif_tx_response *response;
	struct io_buffer *iobuf;
	unsigned int status;
	int rc;

	/* Consume any unconsumed responses */
	while ( RING_HAS_UNCONSUMED_RESPONSES ( &netfront->tx_fring ) ) {

		/* Get next response */
		response = RING_GET_RESPONSE ( &netfront->tx_fring,
					       netfront->tx_fring.rsp_cons++ );

		/* Retrieve from descriptor ring */
		iobuf = netfront_pull ( netfront, &netfront->tx, response->id );
		status = response->status;
		if ( status == NETIF_RSP_OKAY ) {
			DBGC2 ( netfront, "NETFRONT %s TX id %d complete\n",
				xendev->key, response->id );
			netdev_tx_complete ( netdev, iobuf );
		} else {
			rc = -EIO_NETIF_RSP ( status );
			DBGC2 ( netfront, "NETFRONT %s TX id %d error %d: %s\n",
				xendev->key, response->id, status,
				strerror ( rc ) );
			netdev_tx_complete_err ( netdev, iobuf, rc );
		}
	}
}
Esempio n. 10
0
File: mem_event.c Progetto: CPFL/xen
int mem_event_get_response(struct domain *d, struct mem_event_domain *med, mem_event_response_t *rsp)
{
    mem_event_front_ring_t *front_ring;
    RING_IDX rsp_cons;

    mem_event_ring_lock(med);

    front_ring = &med->front_ring;
    rsp_cons = front_ring->rsp_cons;

    if ( !RING_HAS_UNCONSUMED_RESPONSES(front_ring) )
    {
        mem_event_ring_unlock(med);
        return 0;
    }

    /* Copy response */
    memcpy(rsp, RING_GET_RESPONSE(front_ring, rsp_cons), sizeof(*rsp));
    rsp_cons++;

    /* Update ring */
    front_ring->rsp_cons = rsp_cons;
    front_ring->sring->rsp_event = rsp_cons + 1;

    /* Kick any waiters -- since we've just consumed an event,
     * there may be additional space available in the ring. */
    mem_event_wake(d, med);

    mem_event_ring_unlock(med);

    return 1;
}
Esempio n. 11
0
static irqreturn_t as_int (int irq, void *dev_id)
{
	struct as_response *ring_resp;
	RING_IDX i, rp;

	printk("\nxen:DomU: as_int called");
again:
	rp = info.ring.sring->rsp_prod;
	printk(KERN_DEBUG "\nxen:DomU:ring pointers %d to %d", info.ring.rsp_cons, rp);
	for (i = info.ring.rsp_cons; i != rp; i++) {
		unsigned long id;
		ring_resp = RING_GET_RESPONSE(&(info.ring), i);
		printk(KERN_DEBUG "\nxen:domU: Recvd in IDX-%d, with id=%d, op=%d, st=%d", i, ring_resp->id, ring_resp->operation, ring_resp->status);
		id = ring_resp->id;
		switch(ring_resp->operation) {
		case 0:
			printk(KERN_DEBUG "\nxen:DomU: operation: 0");
			break;
		default:
			break;
		}
	}

	info.ring.rsp_cons = i;
	if (i != info.ring.req_prod_pvt) {
		int more_to_do;
		RING_FINAL_CHECK_FOR_RESPONSES(&info.ring, more_to_do);
		if (more_to_do)
			goto again;
	} else
		info.ring.sring->rsp_event = i + 1;
	return IRQ_HANDLED;
}
Esempio n. 12
0
static void netfront_tx_buf_gc(struct netfront_dev *dev)
{
	RING_IDX cons, prod;
	unsigned short id;
#ifndef CONFIG_NETFRONT_PERSISTENT_GRANTS
	struct net_txbuffer *buf;
#endif

	do {
		prod = dev->tx.sring->rsp_prod;
		rmb(); /* Ensure we see responses up to 'rp'. */

		for (cons = dev->tx.rsp_cons; cons != prod; cons++) {
			struct netif_tx_response *txrsp;

			txrsp = RING_GET_RESPONSE(&dev->tx, cons);
			if (txrsp->status == NETIF_RSP_NULL)
				continue;

			if (txrsp->status == NETIF_RSP_DROPPED)
				printk("netif drop for tx\n");

			if (txrsp->status == NETIF_RSP_ERROR)
				printk("netif error for tx\n");

			id  = txrsp->id;
			BUG_ON(id >= NET_TX_RING_SIZE);

#ifndef CONFIG_NETFRONT_PERSISTENT_GRANTS
			buf = &dev->tx_buffers[id];
			gnttab_end_access(buf->gref);
			buf->gref = GRANT_INVALID_REF;
#ifdef HAVE_LWIP
			if (buf->pbuf) {
				pbuf_free(buf->pbuf);
				buf->pbuf = NULL;
			}
#endif /* HAVE_LWIP */
#endif /* CONFIG_NETFRONT_PERSISTENT_GRANTS */
			add_id_to_freelist(id, dev->tx_freelist);
			up(&dev->tx_sem);
		}

		dev->tx.rsp_cons = prod;

		/*
		 * Set a new event, then check for race with update of tx_cons.
		 * Note that it is essential to schedule a callback, no matter
		 * how few tx_buffers are pending. Even if there is space in the
		 * transmit ring, higher layers may be blocked because too much
		 * data is outstanding: in such cases notification from Xen is
		 * likely to be the only kick that we'll get.
		 */
		dev->tx.sring->rsp_event =
			prod + ((dev->tx.sring->req_prod - prod) >> 1) + 1;
		mb();
	} while ((cons == prod) && (prod != dev->tx.sring->rsp_prod));
}
Esempio n. 13
0
static inline void write_rsp_to_ring(struct td_state *s, blkif_response_t *rsp)
{
	tapdev_info_t *info = s->ring_info;
	blkif_response_t *rsp_d;
	
	rsp_d = RING_GET_RESPONSE(&info->fe_ring, info->fe_ring.rsp_prod_pvt);
	memcpy(rsp_d, rsp, sizeof(blkif_response_t));
	info->fe_ring.rsp_prod_pvt++;
}
static void xennet_tx_buf_gc(struct net_device *dev)
{
	RING_IDX cons, prod;
	unsigned short id;
	struct netfront_info *np = netdev_priv(dev);
	struct sk_buff *skb;

	BUG_ON(!netif_carrier_ok(dev));

	do {
		prod = np->tx.sring->rsp_prod;
		rmb(); /* Ensure we see responses up to 'rp'. */

		for (cons = np->tx.rsp_cons; cons != prod; cons++) {
			struct xen_netif_tx_response *txrsp;

			txrsp = RING_GET_RESPONSE(&np->tx, cons);
			if (txrsp->status == NETIF_RSP_NULL)
				continue;

			id  = txrsp->id;
			skb = np->tx_skbs[id].skb;
			if (unlikely(gnttab_query_foreign_access(
				np->grant_tx_ref[id]) != 0)) {
				printk(KERN_ALERT "xennet_tx_buf_gc: warning "
				       "-- grant still in use by backend "
				       "domain.\n");
				BUG();
			}
			gnttab_end_foreign_access_ref(
				np->grant_tx_ref[id], GNTMAP_readonly);
			gnttab_release_grant_reference(
				&np->gref_tx_head, np->grant_tx_ref[id]);
			np->grant_tx_ref[id] = GRANT_INVALID_REF;
			add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
			dev_kfree_skb_irq(skb);
		}

		np->tx.rsp_cons = prod;

		/*
		 * Set a new event, then check for race with update of tx_cons.
		 * Note that it is essential to schedule a callback, no matter
		 * how few buffers are pending. Even if there is space in the
		 * transmit ring, higher layers may be blocked because too much
		 * data is outstanding: in such cases notification from Xen is
		 * likely to be the only kick that we'll get.
		 */
		np->tx.sring->rsp_event =
			prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
		mb();		/* update shared area */
	} while ((cons == prod) && (prod != np->tx.sring->rsp_prod));

	xennet_maybe_wake_tx(dev);
}
Esempio n. 15
0
static irqreturn_t ixp_interrupt(int irq, void *dev_id)
{
	struct ixp_response *bret;
	RING_IDX i, rp;
	struct ixpfront_info *info = (struct ixpfront_info *)dev_id;
	int error;


	if (unlikely(info->connected != IXP_STATE_CONNECTED)) {
		return IRQ_HANDLED;
	}

 again:
	rp = info->ring.sring->rsp_prod;
	rmb(); /* Ensure we see queued responses up to 'rp'. */

	for (i = info->ring.rsp_cons; i != rp; i++) {
		unsigned long id;

		bret = RING_GET_RESPONSE(&info->ring, i);
		id   = bret->id;
		
		ixp_install_response(info, bret);
    		ixp_completion(&info->shadow[id]);

		add_id_to_freelist(info, id);

		error = (bret->status == IXPIF_RSP_OKAY) ? 0 : -EIO;
		switch (bret->operation) {
		case IXP_OP_3DES_ENCRYPT:
			if (unlikely(bret->status != IXPIF_RSP_OKAY))
				dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
					"request: %x\n", bret->status);

			break;
		default:
			BUG();
		}
	}

	info->ring.rsp_cons = i;

	if (i != info->ring.req_prod_pvt) {
		int more_to_do;
		RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
		if (more_to_do)
			goto again;
	} else
		info->ring.sring->rsp_event = i + 1;

	return IRQ_HANDLED;
}
Esempio n. 16
0
static int write_block(FILE *p, blkif_sector_t sector, size_t amt)
{
  static uint64_t next_reqid = 1;
  blkif_response_t *rsp;
  blkif_request_t *req;
  int notify, work_to_do;
  uint64_t reqid;
  RING_IDX i;

  /* wait until we can write something */
  while(RING_FULL(&p->ring)) runtime_block(1);

  /* write out the request */
  i = p->ring.req_prod_pvt++;
  req = RING_GET_REQUEST(&p->ring, i);
  memset(req, 0, sizeof(blkif_request_t));
  req->operation         = BLKIF_OP_WRITE;
  req->nr_segments       = 1;
  req->handle            = p->disk_handle;
  req->id                = reqid = next_reqid++;
  req->sector_number     = sector;
  req->seg[0].gref       = p->block_grant;
  req->seg[0].first_sect = 0;
  req->seg[0].last_sect  = (amt - 1) / 512;
  wmb();
  RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&p->ring, notify);
  if(notify) channel_send(p->chan);

  /* wait for it to be satisfied */
  do {
    while(!RING_HAS_UNCONSUMED_RESPONSES(&p->ring))
      runtime_block(1);
    i = p->ring.rsp_cons++;
    rsp = RING_GET_RESPONSE(&p->ring, i);
  } while(rsp->id != reqid);

  /* was it successful? */
  if(rsp->status != BLKIF_RSP_OKAY) {
    printf("PROFILING: Block write failed!\n");
    return 0;
  }

  /* we do writes one at a time, synchronously, so work_to_do should always
     be false */
  RING_FINAL_CHECK_FOR_RESPONSES(&p->ring, work_to_do);
  assert(!work_to_do);

  return 1;
}
Esempio n. 17
0
void network_tx_buf_gc(struct netfront_dev *dev)
{
    RING_IDX cons, prod;
    unsigned short id;

    do {
        prod = dev->tx.sring->rsp_prod;
        rmb(); /* Ensure we see responses up to 'rp'. */
        //printk("cons = %ld, prod = %ld\n", dev->tx.rsp_cons, prod); //farewellkou

        for (cons = dev->tx.rsp_cons; cons != prod; cons++) 
        {
            struct netif_tx_response *txrsp;
            struct net_buffer *buf;

            txrsp = RING_GET_RESPONSE(&dev->tx, cons);
            if (txrsp->status == NETIF_RSP_NULL){
                continue;
            }

            if (txrsp->status == NETIF_RSP_ERROR){
                printk("packet error\n");
            }

            id  = txrsp->id;
            BUG_ON(id >= NET_TX_RING_SIZE);
            buf = &dev->tx_buffers[id];
            gnttab_end_access(buf->gref);
            buf->gref=GRANT_INVALID_REF;

	    add_id_to_freelist(id,dev->tx_freelist);
	    up(&dev->tx_sem);
        }

        dev->tx.rsp_cons = prod;

        /*
         * Set a new event, then check for race with update of tx_cons.
         * Note that it is essential to schedule a callback, no matter
         * how few tx_buffers are pending. Even if there is space in the
         * transmit ring, higher layers may be blocked because too much
         * data is outstanding: in such cases notification from Xen is
         * likely to be the only kick that we'll get.
         */
        dev->tx.sring->rsp_event =
            prod + ((dev->tx.sring->req_prod - prod) >> 1) + 1;
        mb();
    } while ((cons == prod) && (prod != dev->tx.sring->rsp_prod));
}
Esempio n. 18
0
static void xennet_tx_buf_gc(struct net_device *dev)
{
	RING_IDX cons, prod;
	unsigned short id;
	struct netfront_info *np = netdev_priv(dev);
	struct sk_buff *skb;

	BUG_ON(!netif_carrier_ok(dev));

	do {
		prod = np->tx.sring->rsp_prod;
		rmb(); 

		for (cons = np->tx.rsp_cons; cons != prod; cons++) {
			struct xen_netif_tx_response *txrsp;

			txrsp = RING_GET_RESPONSE(&np->tx, cons);
			if (txrsp->status == NETIF_RSP_NULL)
				continue;

			id  = txrsp->id;
			skb = np->tx_skbs[id].skb;
			if (unlikely(gnttab_query_foreign_access(
				np->grant_tx_ref[id]) != 0)) {
				printk(KERN_ALERT "xennet_tx_buf_gc: warning "
				       "-- grant still in use by backend "
				       "domain.\n");
				BUG();
			}
			gnttab_end_foreign_access_ref(
				np->grant_tx_ref[id], GNTMAP_readonly);
			gnttab_release_grant_reference(
				&np->gref_tx_head, np->grant_tx_ref[id]);
			np->grant_tx_ref[id] = GRANT_INVALID_REF;
			add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
			dev_kfree_skb_irq(skb);
		}

		np->tx.rsp_cons = prod;

		
		np->tx.sring->rsp_event =
			prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
		mb();		
	} while ((cons == prod) && (prod != np->tx.sring->rsp_prod));

	xennet_maybe_wake_tx(dev);
}
Esempio n. 19
0
/*
 * Computes the size of the pbuf to allocate based
 * on how many slots the (possible GSO) frame requires.
 */
static int netfront_get_size(struct netfront_dev *dev, RING_IDX ri)
{
	struct netif_rx_response *rx;
	int32_t len = 0;
	int slots = 1;

	do {
		rx = RING_GET_RESPONSE(&dev->rx, ++ri);
		dprintk("rx: scan: slot %d len %d (more %s)\n",
			slots, rx->status,
			(rx->flags & NETRXF_more_data
				? "true": "false"));
		len += rx->status;
		slots++;
	} while (rx->flags & NETRXF_more_data);

	return len;
}
Esempio n. 20
0
static int writelog_dequeue_responses(struct writelog* wl)
{
  RING_IDX rstart, rend;
  log_response_t rsp;

  rstart = wl->fring.rsp_cons;
  rend = wl->sring->rsp_prod;

  BDPRINTF("ring kicked (start = %u, end = %u)", rstart, rend);

  while (rstart != rend) {
    memcpy(&rsp, RING_GET_RESPONSE(&wl->fring, rstart), sizeof(rsp));
    BDPRINTF("ctl: read response %"PRIu64":%u", rsp.sector, rsp.count);
    wl->fring.rsp_cons = ++rstart;
    wl->inflight--;
  }

  return 0;
}
Esempio n. 21
0
static irqreturn_t as_int(int irq, void *dev_id)
{
    RING_IDX rc, rp;
    as_request_t req;
    as_response_t resp;
    int more_to_do, notify;
    printk(KERN_DEBUG "\nxen:Dom0: as_int called with dev_id=%x info=%x", (unsigned int)dev_id, (unsigned int) &info);
    rc = info.ring.req_cons;
    rp = info.ring.sring->req_prod;
    printk(KERN_DEBUG " rc = %d rp = %d", rc, rp);
    while (rc != rp) {
       if (RING_REQUEST_CONS_OVERFLOW(&info.ring, rc))
           break;
       memcpy(&req, RING_GET_REQUEST(&info.ring, rc), sizeof(req));
       resp.id = req.id;
       resp.operation = req.operation;
       resp.status = req.status + 1;
       printk(KERN_DEBUG "\nxen:Dom0:Recvd at IDX-%d: id = %d, op=%d, status=%d", rc, req.id, req.operation, req.status);
       info.ring.req_cons = ++rc;
       barrier();
       switch(req.operation) {
          case 0:
              printk(KERN_DEBUG "\nxen:dom0:req.operation = 0");
              break;
          default:
              printk(KERN_DEBUG "\nxen:dom0:req.operation = %d", req.operation);
              break;
       }
      memcpy(RING_GET_RESPONSE(&info.ring, info.ring.rsp_prod_pvt), &resp, sizeof(resp));
      info.ring.rsp_prod_pvt++;
      RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info.ring, notify);
      if (info.ring.rsp_prod_pvt == info.ring.req_cons) {
          RING_FINAL_CHECK_FOR_REQUESTS(&info.ring, more_to_do);
       } else if (RING_HAS_UNCONSUMED_REQUESTS(&info.ring)) {
          more_to_do = 1;
       }
       if (notify) {
          printk(KERN_DEBUG "\nxen:dom0:send notify to domu");
          notify_remote_via_irq(info.irq);
       }
    }
    return IRQ_HANDLED;
}
Esempio n. 22
0
LOCAL void mcd_event_get_response(struct domain *d, mcd_event_response_t *rsp)
{
    mcd_event_front_ring_t *front_ring;
    RING_IDX rsp_cons;

    mcd_event_ring_lock(d);

    front_ring = &d->mcd_event.front_ring;
    rsp_cons = front_ring->rsp_cons;

    /* Copy response */
    memcpy(rsp, RING_GET_RESPONSE(front_ring, rsp_cons), sizeof(*rsp));
    rsp_cons++;

    /* Update ring */
    front_ring->rsp_cons = rsp_cons;
    front_ring->sring->rsp_event = rsp_cons + 1;

    mcd_event_ring_unlock(d);
}
Esempio n. 23
0
static FORCEINLINE VOID
FrontendCompleteResponses(
    IN  PXENVBD_FRONTEND        Frontend
    )
{
    for (;;) {
        ULONG   rsp_prod;
        ULONG   rsp_cons;

        rsp_prod = Frontend->SharedRing->rsp_prod;
        rsp_cons = Frontend->FrontRing.rsp_cons;

        __MemoryBarrier();

        while (rsp_cons != rsp_prod) {
            blkif_response_t*   Response;
            PXENVBD_REQUEST     Request;
            SHORT               Status;

            Response = RING_GET_RESPONSE(&Frontend->FrontRing, rsp_cons);
            Status = Response->status;
            Request = (PXENVBD_REQUEST)(ULONG_PTR)(Response->id);

            ++rsp_cons;

            if (Request) {
                PdoCompleteSubmittedRequest(Frontend->Pdo, Request, Status);
            }

            // zero request slot now its read
            RtlZeroMemory(Response, sizeof(blkif_response_t));
        }

        Frontend->FrontRing.rsp_cons = rsp_cons;

        __MemoryBarrier();

        if (!__FinalCheckForResponses(Frontend))
            break;
    }
}
Esempio n. 24
0
static int scsifront_ring_drain(struct vscsifrnt_info *info)
{
	struct vscsiif_response *ring_rsp;
	RING_IDX i, rp;
	int more_to_do = 0;

	rp = info->ring.sring->rsp_prod;
	rmb();	/* ordering required respective to dom0 */
	for (i = info->ring.rsp_cons; i != rp; i++) {
		ring_rsp = RING_GET_RESPONSE(&info->ring, i);
		scsifront_do_response(info, ring_rsp);
	}

	info->ring.rsp_cons = i;

	if (i != info->ring.req_prod_pvt)
		RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
	else
		info->ring.sring->rsp_event = i + 1;

	return more_to_do;
}
Esempio n. 25
0
/*
 * Reads extra slots to check for a GSO packet
 */
static int netfront_get_extras(struct netfront_dev *dev,
			       struct netif_extra_info *extras, RING_IDX ri)
{
	struct netif_extra_info *extra;
	RING_IDX cons = dev->rx.rsp_cons;
	int err = 0;
#ifndef CONFIG_NETFRONT_PERSISTENT_GRANTS
	struct net_rxbuffer *buf;
#endif

	do {
		extra = (struct netif_extra_info *)
			RING_GET_RESPONSE(&dev->rx, ++cons);

		if (unlikely(!extra->type ||
			     extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
			printk("Invalid extra type: %d\n", extra->type);
			err = -EINVAL;
		} else {
			dprintk("rx: scan: extra %u %s\n", extra->type,
				(extra->flags & XEN_NETIF_EXTRA_FLAG_MORE
					? "(more true)": ""));
			NETIF_MEMCPY(&extras[extra->type - 1], extra,
			       sizeof(*extra));
		}

#ifndef CONFIG_NETFRONT_PERSISTENT_GRANTS
		buf = dev->rx_buffers[netfront_rxidx(cons)];
		gnttab_end_access(buf->gref);
		buf->gref = GRANT_INVALID_REF;
		dev->rx_buffers[netfront_rxidx(cons)] = NULL;
		netfront_release_rxbuffer(buf, dev);
#endif
	} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);

	dev->rx.rsp_cons = cons;
	return err;
}
Esempio n. 26
0
/**
 * Poll for received packets
 *
 * @v netdev		Network device
 */
static void netfront_poll_rx ( struct net_device *netdev ) {
	struct netfront_nic *netfront = netdev->priv;
	struct xen_device *xendev = netfront->xendev;
	struct netif_rx_response *response;
	struct io_buffer *iobuf;
	int status;
	size_t len;
	int rc;

	/* Consume any unconsumed responses */
	while ( RING_HAS_UNCONSUMED_RESPONSES ( &netfront->rx_fring ) ) {

		/* Get next response */
		response = RING_GET_RESPONSE ( &netfront->rx_fring,
					       netfront->rx_fring.rsp_cons++ );

		/* Retrieve from descriptor ring */
		iobuf = netfront_pull ( netfront, &netfront->rx, response->id );
		status = response->status;
		if ( status >= 0 ) {
			len = status;
			iob_reserve ( iobuf, response->offset );
			iob_put ( iobuf, len );
			DBGC2 ( netfront, "NETFRONT %s RX id %d complete "
				"%#08lx+%zx\n", xendev->key, response->id,
				virt_to_phys ( iobuf->data ), len );
			netdev_rx ( netdev, iobuf );
		} else {
			rc = -EIO_NETIF_RSP ( status );
			DBGC2 ( netfront, "NETFRONT %s RX id %d error %d: %s\n",
				xendev->key, response->id, status,
				strerror ( rc ) );
			netdev_rx_err ( netdev, iobuf, rc );
		}
	}
}
Esempio n. 27
0
File: netfe.c Progetto: bkearns/ling
static void netfe_tx_buf_gc(netfe_t *fe)
{
	RING_IDX prod, cons;

	do {
		prod = fe->tx_ring.sring->rsp_prod;
		rmb(); // dark

		for (cons = fe->tx_ring.rsp_cons; cons != prod; cons++)
		{
			netif_tx_response_t *rsp = RING_GET_RESPONSE(&fe->tx_ring, cons);
			fe->free_tx_bufs[rsp->id] = fe->free_tx_head;
			fe->free_tx_head = rsp->id;
		}

		fe->tx_ring.rsp_cons = prod;

		// mindlessly copied from netfront.c
		fe->tx_ring.sring->rsp_event =
			prod + ((fe->tx_ring.sring->req_prod - prod) >> 1) +1;
		mb();

	} while ((cons == prod) && (prod != fe->tx_ring.sring->rsp_prod));
}
Esempio n. 28
0
void network_rx(struct netfront_dev *dev)
{
    RING_IDX rp,cons,req_prod;
    struct netif_rx_response *rx;
    int nr_consumed, some, more, i, notify;


moretodo:
    rp = dev->rx.sring->rsp_prod;
    rmb(); /* Ensure we see queued responses up to 'rp'. */
    cons = dev->rx.rsp_cons;

    nr_consumed = 0;
    some = 0;
    while ((cons != rp) && !some)
    {
        struct net_buffer* buf;
        unsigned char* page;
        int id;

        rx = RING_GET_RESPONSE(&dev->rx, cons);

        if (rx->flags & NETRXF_extra_info)
        {
            printk("+++++++++++++++++++++ we have extras!\n");
            continue;
        }


        if (rx->status == NETIF_RSP_NULL) continue;

        id = rx->id;
        BUG_ON(id >= NET_TX_RING_SIZE);

        buf = &dev->rx_buffers[id];
        page = (unsigned char*)buf->page;
        gnttab_end_access(buf->gref);

        if(rx->status>0)
        {
#ifdef HAVE_LIBC
	    if (dev->netif_rx == NETIF_SELECT_RX) {
		int len = rx->status;
		ASSERT(current == main_thread);
		if (len > dev->len)
		    len = dev->len;
		memcpy(dev->data, page+rx->offset, len);
		dev->rlen = len;
		some = 1;
	    } else
#endif
		dev->netif_rx(page+rx->offset,rx->status);
        }

        nr_consumed++;

        ++cons;
    }
    dev->rx.rsp_cons=cons;

    RING_FINAL_CHECK_FOR_RESPONSES(&dev->rx,more);
    if(more && !some) goto moretodo;

    req_prod = dev->rx.req_prod_pvt;

    for(i=0; i<nr_consumed; i++)
    {
        int id = xennet_rxidx(req_prod + i);
        netif_rx_request_t *req = RING_GET_REQUEST(&dev->rx, req_prod + i);
        struct net_buffer* buf = &dev->rx_buffers[id];
        void* page = buf->page;

        /* We are sure to have free gnttab entries since they got released above */
        buf->gref = req->gref = 
            gnttab_grant_access(dev->dom,virt_to_mfn(page),0);

        req->id = id;
    }

    wmb();

    dev->rx.req_prod_pvt = req_prod + i;
    
    RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->rx, notify);
    if (notify)
        notify_remote_via_evtchn(dev->evtchn);

}
Esempio n. 29
0
static irqreturn_t chrif_int(int irq, void *dev_id)
{
    int err;
    RING_IDX rc, rp;
    int more_to_do, notify;
    chrif_request_t req;
    chrif_response_t resp;
    printk(KERN_INFO "\n------------------------------start response-------------------------------------");
    printk(KERN_DEBUG "\nxen: Dom0: chrif_int called with dev_id=%x info=%x", (unsigned int)dev_id, (unsigned int) &info);
    rc = info.ring.req_cons;
    rp = info.ring.sring->req_prod;
    printk(KERN_DEBUG "\nxen: Dom0: rc = %d rp = %d", rc, rp);

    while (rc != rp) {
        if (RING_REQUEST_CONS_OVERFLOW(&info.ring, rc))
            break;
        memcpy(&req, RING_GET_REQUEST(&info.ring, rc), sizeof(req));
        resp.id = req.id;
        resp.operation = req.operation;
        resp.status = req.status + 1;
        printk(KERN_DEBUG "\nxen: Dom0: Recvd at IDX-%d: id = %d, op=%d, status=%d", rc, req.id, req.operation, req.status);
        info.ring.req_cons = ++rc;
        barrier();

        printk(KERN_DEBUG "\nxen: Dom0: operation:  %s", op_name(resp.operation));
        switch(resp.operation) {
        case CHRIF_OP_OPEN:
            info.chrif_filp = filp_open(DEVICE_PATH, O_RDWR, 0);
            printk(KERN_DEBUG "\nxen: dom0: response open");
            break;
        case CHRIF_OP_READ: {
            resp.rdwr.len = req.rdwr.len;
            //struct pdma_info pdma_info;
            //memset(op_page->addr, 0, resp.rdwr.len);
            old_fs = get_fs();
            set_fs(get_ds());
            //get read size of block
            //err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_INFO, (unsigned long)&pdma_info);
            //read data from device to page
            //err =info.chrif_filp->f_op->read(info.chrif_filp, op_page->addr, resp.rdwr.len, &info.chrif_filp->f_pos);
            set_fs(old_fs);
            if(err < 0)
                printk(KERN_DEBUG "\nxen: Dom0: read %u bytes error", resp.rdwr.len);
            printk(KERN_DEBUG "\nxen: dom0: response read");
            break;
        }
        case CHRIF_OP_WRITE: {
            int i = 0, count, ret;
            struct vm_struct *op_page;
            struct gnttab_map_grant_ref op_page_ops;
            struct gnttab_unmap_grant_ref op_page_unmap_ops;
            resp.rdwr.len = req.rdwr.len;

            count = resp.rdwr.len/4096;
            printk(KERN_DEBUG "\nxen: Dom0: write %u bytes %d times", resp.rdwr.len, count);

            block_buf = (char *)kmalloc(resp.rdwr.len, GFP_KERNEL);
            memset(block_buf, 0, resp.rdwr.len);

            while(i < count) {
                resp.op_gref[i] = req.op_gref[i];
                printk(KERN_DEBUG "\nxen: dom0: req.op_gref[0]: %d", resp.op_gref[i]);

                op_page = alloc_vm_area(PAGE_SIZE, NULL);
                if(op_page == 0) {
                    free_vm_area(op_page);
                    printk("\nxen: dom0: could not allocate shared_page");
                    return -EFAULT;
                }
                /*gnttab_set_map_op(&op_page_ops, (unsigned long)op_page->addr, GNTMAP_host_map, resp.op_gref[i], info.remoteDomain);

                 op_page_unmap_ops.host_addr = (unsigned long)(op_page->addr);
                 unmap_ops.handle = op_page_ops.handle;
                 if(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op_page_ops, 1)){
                     printk(KERN_DEBUG "\nxen: dom0: HYPERVISOR map grant ref failed");
                     return -EFAULT;
                 }
                 if (op_page_ops.status) {
                     printk(KERN_DEBUG "\nxen: dom0: HYPERVISOR map grant ref failed status = %d", op_page_ops.status);
                     return -EFAULT;
                 }
                 printk(KERN_DEBUG "\nxen: dom0: map shared page success, shared_page=%x, handle = %x, status = %x", (unsigned int)op_page->addr, op_page_ops.handle, op_page_ops.status);

                 memcpy(block_buf+i*4096, op_page->addr, 4096);
                 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op_page_unmap_ops, 1);
                 if (ret == 0) {
                     printk(KERN_DEBUG "\nxen: dom0: dom0_exit: unmapped shared frame");
                 } else {
                     printk(KERN_DEBUG "\nxen: dom0: dom0_exit: unmapped shared frame failed");
                 }
                 free_vm_area(op_page);*/
                i++;
            }

            /*  old_fs = get_fs();
            set_fs(get_ds());
            //write data from page to device
            //err = info.chrif_filp->f_op->write(info.chrif_filp, block_buf, resp.rdwr.len, &info.chrif_filp->f_pos);
            set_fs(old_fs);
              if(err < 0)
            	printk(KERN_DEBUG "\nxen: Dom0: write %u bytes error", resp.rdwr.len);

              */ //kfree(block_buf);
            printk(KERN_DEBUG "\nxen: dom0: response write");
            break;
        }
        case CHRIF_OP_IOCTL: {
            resp.ioc_parm.cmd = req.ioc_parm.cmd;
            switch(resp.ioc_parm.cmd) {
            case PDMA_IOC_START_DMA: {
                old_fs = get_fs();
                set_fs(get_ds());
                err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_START_DMA, NULL);
                set_fs(old_fs);
                if(err) {
                    printk(KERN_DEBUG "\nxen: Dom0: start-dma ioctl failed");
                    resp.status = 0;
                } else  printk(KERN_DEBUG "\nxen: Dom0: start-dma ioctl success");
                //err = call_ioctl(info.chrif_filp, PDMA_IOC_START_DMA, NULL);
                break;
            }
            case PDMA_IOC_STOP_DMA: {
                //err = call_ioctl(info.chrif_filp, PDMA_IOC_STOP_DMA, NULL);
                old_fs = get_fs();
                set_fs(get_ds());
                err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_STOP_DMA, NULL);
                set_fs(old_fs);
                if(err) {
                    printk(KERN_DEBUG "\nxen: Dom0: stop-dma ioctl failed");
                    resp.status = 0;
                } else  printk(KERN_DEBUG "\nxen: Dom0: stop-dma ioctl success");
                break;
            }
            case PDMA_IOC_INFO: {
                struct pdma_info pdma_info;
                //err = call_ioctl(info.chrif_filp, PDMA_IOC_INFO, (unsigned long)&pdma_info);
                old_fs = get_fs();
                set_fs(get_ds());
                err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_INFO, (unsigned long)&pdma_info);
                set_fs(old_fs);
                if(err) {
                    printk(KERN_DEBUG "\nxen: Dom0: info ioctl failed");
                    resp.status = 0;
                } else  printk(KERN_DEBUG "\nxen: Dom0: info ioctl success");
                resp.ioc_parm.info = pdma_info;
                break;
            }
            case PDMA_IOC_STAT: {
                struct pdma_stat pdma_stat;
                //err = call_ioctl(info.chrif_filp, PDMA_IOC_STAT, (unsigned long)&pdma_stat);
                old_fs = get_fs();
                set_fs(get_ds());
                err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_STAT, (unsigned long)&pdma_stat);
                set_fs(old_fs);
                if(err) {
                    printk(KERN_DEBUG "\nxen: Dom0: stat ioctl failed");
                    resp.status = 0;
                } else  printk(KERN_DEBUG "\nxen: Dom0: stat ioctl success");
                resp.ioc_parm.stat = pdma_stat;
                break;
            }
            case PDMA_IOC_RW_REG: {
                struct pdma_rw_reg ctrl = req.ioc_parm.ctrl;
                //err = call_ioctl(info.chrif_filp, PDMA_IOC_RW_REG, (unsigned long)&ctrl);
                old_fs = get_fs();
                set_fs(get_ds());
                err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_RW_REG, (unsigned long)&ctrl);
                set_fs(old_fs);
                if(err) {
                    printk(KERN_DEBUG "\nxen: Dom0: rw-reg ioctl failed");
                    resp.status = 0;
                } else  printk(KERN_DEBUG "\nxen: Dom0: rw-reg ioctl success");
                resp.ioc_parm.ctrl = ctrl;
                break;
            }
            default:
                printk(KERN_DEBUG "\nxen: Dom0: unknow the operation");
                break;
            }
            printk(KERN_INFO "\nxen: Dom0: response ioctl");
            break;
        }
        case CHRIF_OP_CLOSE:
            filp_close(info.chrif_filp, NULL);
            printk(KERN_INFO "\nxen: Dom0: response close");
            break;
        default:
            printk(KERN_DEBUG "\nxen: Dom0: unknow the operation");
            break;
        }

        memcpy(RING_GET_RESPONSE(&info.ring, info.ring.rsp_prod_pvt), &resp, sizeof(resp));
        info.ring.rsp_prod_pvt++;
        //put response and check whether or not notify domU
        RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info.ring, notify);
        if (info.ring.rsp_prod_pvt == info.ring.req_cons)
        {
            RING_FINAL_CHECK_FOR_REQUESTS(&info.ring, more_to_do);
        }
        else if (RING_HAS_UNCONSUMED_REQUESTS(&info.ring))
        {
            more_to_do = 1;
        }
        if (notify)
        {
            printk(KERN_DEBUG "\nxen:dom0:send notify to domu");
            notify_remote_via_irq(info.irq);
        }
    }
    return IRQ_HANDLED;
}
Esempio n. 30
0
File: netback.c Progetto: 7799/linux
static void xenvif_rx_action(struct xenvif *vif)
{
	s8 status;
	u16 flags;
	struct xen_netif_rx_response *resp;
	struct sk_buff_head rxq;
	struct sk_buff *skb;
	LIST_HEAD(notify);
	int ret;
	unsigned long offset;
	bool need_to_notify = false;

	struct netrx_pending_operations npo = {
		.copy  = vif->grant_copy_op,
		.meta  = vif->meta,
	};

	skb_queue_head_init(&rxq);

	while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
		RING_IDX max_slots_needed;
		RING_IDX old_req_cons;
		RING_IDX ring_slots_used;
		int i;

		/* We need a cheap worse case estimate for the number of
		 * slots we'll use.
		 */

		max_slots_needed = DIV_ROUND_UP(offset_in_page(skb->data) +
						skb_headlen(skb),
						PAGE_SIZE);
		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
			unsigned int size;
			unsigned int offset;

			size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
			offset = skb_shinfo(skb)->frags[i].page_offset;

			/* For a worse-case estimate we need to factor in
			 * the fragment page offset as this will affect the
			 * number of times xenvif_gop_frag_copy() will
			 * call start_new_rx_buffer().
			 */
			max_slots_needed += DIV_ROUND_UP(offset + size,
							 PAGE_SIZE);
		}

		/* To avoid the estimate becoming too pessimal for some
		 * frontends that limit posted rx requests, cap the estimate
		 * at MAX_SKB_FRAGS.
		 */
		if (max_slots_needed > MAX_SKB_FRAGS)
			max_slots_needed = MAX_SKB_FRAGS;

		/* We may need one more slot for GSO metadata */
		if (skb_is_gso(skb) &&
		   (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
		    skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
			max_slots_needed++;

		/* If the skb may not fit then bail out now */
		if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) {
			skb_queue_head(&vif->rx_queue, skb);
			need_to_notify = true;
			vif->rx_last_skb_slots = max_slots_needed;
			break;
		} else
			vif->rx_last_skb_slots = 0;

		old_req_cons = vif->rx.req_cons;
		XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo);
		ring_slots_used = vif->rx.req_cons - old_req_cons;

		BUG_ON(ring_slots_used > max_slots_needed);

		__skb_queue_tail(&rxq, skb);
	}

	BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));

	if (!npo.copy_prod)
		goto done;

	BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
	gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);

	while ((skb = __skb_dequeue(&rxq)) != NULL) {

		if ((1 << vif->meta[npo.meta_cons].gso_type) &
		    vif->gso_prefix_mask) {
			resp = RING_GET_RESPONSE(&vif->rx,
						 vif->rx.rsp_prod_pvt++);

			resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;

			resp->offset = vif->meta[npo.meta_cons].gso_size;
			resp->id = vif->meta[npo.meta_cons].id;
			resp->status = XENVIF_RX_CB(skb)->meta_slots_used;

			npo.meta_cons++;
			XENVIF_RX_CB(skb)->meta_slots_used--;
		}


		vif->dev->stats.tx_bytes += skb->len;
		vif->dev->stats.tx_packets++;

		status = xenvif_check_gop(vif,
					  XENVIF_RX_CB(skb)->meta_slots_used,
					  &npo);

		if (XENVIF_RX_CB(skb)->meta_slots_used == 1)
			flags = 0;
		else
			flags = XEN_NETRXF_more_data;

		if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
			flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
		else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
			/* remote but checksummed. */
			flags |= XEN_NETRXF_data_validated;

		offset = 0;
		resp = make_rx_response(vif, vif->meta[npo.meta_cons].id,
					status, offset,
					vif->meta[npo.meta_cons].size,
					flags);

		if ((1 << vif->meta[npo.meta_cons].gso_type) &
		    vif->gso_mask) {
			struct xen_netif_extra_info *gso =
				(struct xen_netif_extra_info *)
				RING_GET_RESPONSE(&vif->rx,
						  vif->rx.rsp_prod_pvt++);

			resp->flags |= XEN_NETRXF_extra_info;

			gso->u.gso.type = vif->meta[npo.meta_cons].gso_type;
			gso->u.gso.size = vif->meta[npo.meta_cons].gso_size;
			gso->u.gso.pad = 0;
			gso->u.gso.features = 0;

			gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
			gso->flags = 0;
		}

		xenvif_add_frag_responses(vif, status,
					  vif->meta + npo.meta_cons + 1,
					  XENVIF_RX_CB(skb)->meta_slots_used);

		RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);

		need_to_notify |= !!ret;

		npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used;
		dev_kfree_skb(skb);
	}

done:
	if (need_to_notify)
		notify_remote_via_irq(vif->rx_irq);
}

void xenvif_check_rx_xenvif(struct xenvif *vif)
{
	int more_to_do;

	RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);

	if (more_to_do)
		napi_schedule(&vif->napi);
}