示例#1
0
static int xen_block_get_request(XenBlockDataPlane *dataplane,
                                 XenBlockRequest *request, RING_IDX rc)
{
    switch (dataplane->protocol) {
    case BLKIF_PROTOCOL_NATIVE: {
        blkif_request_t *req =
            RING_GET_REQUEST(&dataplane->rings.native, rc);

        memcpy(&request->req, req, sizeof(request->req));
        break;
    }
    case BLKIF_PROTOCOL_X86_32: {
        blkif_x86_32_request_t *req =
            RING_GET_REQUEST(&dataplane->rings.x86_32_part, rc);

        blkif_get_x86_32_req(&request->req, req);
        break;
    }
    case BLKIF_PROTOCOL_X86_64: {
        blkif_x86_64_request_t *req =
            RING_GET_REQUEST(&dataplane->rings.x86_64_part, rc);

        blkif_get_x86_64_req(&request->req, req);
        break;
    }
    }
    /* Prevent the compiler from accessing the on-ring fields instead. */
    barrier();
    return 0;
}
示例#2
0
/*
 * Prepare an SKB to be transmitted to the frontend.
 *
 * This function is responsible for allocating grant operations, meta
 * structures, etc.
 *
 * It returns the number of meta structures consumed. The number of
 * ring slots used is always equal to the number of meta slots used
 * plus the number of GSO descriptors used. Currently, we use either
 * zero GSO descriptors (for non-GSO packets) or one descriptor (for
 * frontend-side LRO).
 */
static int netbk_gop_skb(struct sk_buff *skb,
			 struct netrx_pending_operations *npo)
{
	struct xenvif *vif = netdev_priv(skb->dev);
	int nr_frags = skb_shinfo(skb)->nr_frags;
	int i;
	struct xen_netif_rx_request *req;
	struct netbk_rx_meta *meta;
	unsigned char *data;
	int head = 1;
	int old_meta_prod;

	old_meta_prod = npo->meta_prod;

	/* Set up a GSO prefix descriptor, if necessary */
	if (skb_shinfo(skb)->gso_size && vif->gso_prefix) {
		req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
		meta = npo->meta + npo->meta_prod++;
		meta->gso_size = skb_shinfo(skb)->gso_size;
		meta->size = 0;
		meta->id = req->id;
	}

	req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
	meta = npo->meta + npo->meta_prod++;

	if (!vif->gso_prefix)
		meta->gso_size = skb_shinfo(skb)->gso_size;
	else
		meta->gso_size = 0;

	meta->size = 0;
	meta->id = req->id;
	npo->copy_off = 0;
	npo->copy_gref = req->gref;

	data = skb->data;
	while (data < skb_tail_pointer(skb)) {
		unsigned int offset = offset_in_page(data);
		unsigned int len = PAGE_SIZE - offset;

		if (data + len > skb_tail_pointer(skb))
			len = skb_tail_pointer(skb) - data;

		netbk_gop_frag_copy(vif, skb, npo,
				    virt_to_page(data), len, offset, &head);
		data += len;
	}

	for (i = 0; i < nr_frags; i++) {
		netbk_gop_frag_copy(vif, skb, npo,
				    skb_frag_page(&skb_shinfo(skb)->frags[i]),
				    skb_frag_size(&skb_shinfo(skb)->frags[i]),
				    skb_shinfo(skb)->frags[i].page_offset,
				    &head);
	}

	return npo->meta_prod - old_meta_prod;
}
示例#3
0
文件: asynccall.c 项目: 7perl/akaros
// TODO: right now there is one channel (remote), in the future, the caller
// may specify local which will cause it to give up the core to do the work.
// creation of additional remote channel also allows the caller to prioritize
// work, because the default policy for the kernel is to roundrobin between them.
int async_syscall(arsc_channel_t* chan, syscall_req_t* req, syscall_desc_t** desc_ptr2)
{
	// Note that this assumes one global frontring (TODO)
	// abort if there is no room for our request.  ring size is currently 64.
	// we could spin til it's free, but that could deadlock if this same thread
	// is supposed to consume the requests it is waiting on later.
	syscall_desc_t* desc = malloc(sizeof (syscall_desc_t));
	desc->channel = chan;
	syscall_front_ring_t *fr = &(desc->channel->sysfr);
	//TODO: can do it locklessly using CAS, but could change with local async calls
	struct mcs_lock_qnode local_qn = {0};
	mcs_lock_lock(&(chan->aclock), &local_qn);
	if (RING_FULL(fr)) {
		errno = EBUSY;
		return -1;
	}
	// req_prod_pvt comes in as the previously produced item.  need to
	// increment to the next available spot, which is the one we'll work on.
	// at some point, we need to listen for the responses.
	desc->idx = ++(fr->req_prod_pvt);
	syscall_req_t* r = RING_GET_REQUEST(fr, desc->idx);
	// CAS on the req->status perhaps
	req->status = REQ_alloc;

	memcpy(r, req, sizeof(syscall_req_t));
	r->status = REQ_ready;
	// push our updates to syscallfrontring.req_prod_pvt
	// note: it is ok to push without protection since it is atomic and kernel
	// won't process any requests until they are marked REQ_ready (also atomic)
	RING_PUSH_REQUESTS(fr);
	//cprintf("DEBUG: sring->req_prod: %d, sring->rsp_prod: %d\n", 
	mcs_lock_unlock(&desc->channel->aclock, &local_qn);
	*desc_ptr2 = desc;
	return 0;
}
示例#4
0
LOCAL void mcd_event_put_request(struct domain *d, mcd_event_request_t *req)
{
    mcd_event_front_ring_t *front_ring;
    RING_IDX req_prod;

my_trace()

    mcd_event_ring_lock(d);

    front_ring = &d->mcd_event.front_ring;
    req_prod = front_ring->req_prod_pvt;

    /* Copy request */
    memcpy(RING_GET_REQUEST(front_ring, req_prod), req, sizeof(*req));
    req_prod++;

    /* Update ring */
    front_ring->req_prod_pvt = req_prod;
    RING_PUSH_REQUESTS(front_ring);

    mcd_event_ring_unlock(d);

my_trace()

    // TODO check whether I have to use notifying through channel or just ring.. ???
    notify_via_xen_event_channel(d, d->mcd_event.xen_port);
}
示例#5
0
static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc)
{
    switch (blkdev->protocol) {
    case BLKIF_PROTOCOL_NATIVE:
	memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
	       sizeof(ioreq->req));
	break;
    case BLKIF_PROTOCOL_X86_32:
	blkif_get_x86_32_req(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.x86_32, rc));
	break;
    case BLKIF_PROTOCOL_X86_64:
	blkif_get_x86_64_req(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.x86_64, rc));
	break;
    }
    return 0;
}
示例#6
0
文件: netfront.c 项目: cnplab/mini-os
/*
 * Gets a free TX request for copying data to backend
 */
static inline struct netif_tx_request *netfront_get_page(struct netfront_dev *dev)
{
	struct netif_tx_request *tx;
	unsigned short id;
	struct net_txbuffer* buf;
	int flags;

	local_irq_save(flags);
	if (unlikely(!trydown(&dev->tx_sem))) {
		local_irq_restore(flags);
		return NULL; /* we run out of available pages */
	}
	id = get_id_from_freelist(dev->tx_freelist);
	buf = &dev->tx_buffers[id];
	local_irq_restore(flags);

	tx = RING_GET_REQUEST(&dev->tx, dev->tx.req_prod_pvt++);
	tx->offset = 0;
	tx->size = 0;
	tx->id = id;
	tx->flags = 0;
#ifdef CONFIG_NETFRONT_PERSISTENT_GRANTS
	tx->gref = buf->gref;
#else
	tx->gref = buf->gref = GRANT_INVALID_REF;
#endif
#ifdef HAVE_LWIP
	buf->pbuf = NULL;
#endif
	return tx;
}
示例#7
0
void init_rx_buffers(struct netfront_dev *dev)
{
    int i, requeue_idx;
    netif_rx_request_t *req;
    int notify;

    /* Rebuild the RX buffer freelist and the RX ring itself. */
    for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) 
    {
        struct net_buffer* buf = &dev->rx_buffers[requeue_idx];
        req = RING_GET_REQUEST(&dev->rx, requeue_idx);

        buf->gref = req->gref = 
            gnttab_grant_access(dev->dom,virt_to_mfn(buf->page),0);

        req->id = requeue_idx;

        requeue_idx++;
    }

    dev->rx.req_prod_pvt = requeue_idx;

    RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->rx, notify);

    if (notify) 
        notify_remote_via_evtchn(dev->evtchn);

    dev->rx.sring->rsp_event = dev->rx.rsp_cons + 1;
}
示例#8
0
int send_request_to_dom0(void) 
{
	struct as_request *ring_req;
	int notify;
	static int reqid = 9;
	
	ring_req = RING_GET_REQUEST(&(info.ring), info.ring.req_prod_pvt);
	ring_req->id = reqid;
	ring_req->operation = reqid;
	ring_req->status = reqid;
	
	printk(KERN_DEBUG "\nxen:DomU: Fill in IDX-%d, with id=%d, op=%d, st=%d",
		info.ring.req_prod_pvt, ring_req->id, ring_req->operation, ring_req->status);
	reqid++;
	info.ring.req_prod_pvt += 1;
	
	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&(info.ring), notify);
	if (notify) {
		printk(KERN_DEBUG "\nxen: DomU: sent a req to Dom0");
		notify_remote_via_irq(info.irq);
	} else {
		printk(KERN_DEBUG "\nxen:DomU: No notify req to Dom0");
		notify_remote_via_irq(info.irq);
	}
	
	printk("...\n");
	return 0;
}
示例#9
0
/*
 * xpvtap_user_request_push()
 */
static int
xpvtap_user_request_push(xpvtap_state_t *state, blkif_request_t *req,
    uint_t uid)
{
	blkif_request_t *outstanding_req;
	blkif_front_ring_t *uring;
	blkif_request_t *target;
	xpvtap_user_map_t *map;


	uring = &state->bt_user_ring.ur_ring;
	map = &state->bt_map;

	target = RING_GET_REQUEST(uring, uring->req_prod_pvt);

	/*
	 * Save request from the frontend. used for ID mapping and unmap
	 * on response/cleanup
	 */
	outstanding_req = &map->um_outstanding_reqs[uid];
	bcopy(req, outstanding_req, sizeof (*outstanding_req));

	/* put the request on the user ring */
	bcopy(req, target, sizeof (*req));
	target->id = (uint64_t)uid;
	uring->req_prod_pvt++;

	pollwakeup(&state->bt_pollhead, POLLIN | POLLRDNORM);

	return (DDI_SUCCESS);
}
示例#10
0
/* called with urb ring lock held */
static VOID
PutRequestsOnRing(PXENUSB_DEVICE_DATA xudd) {
  partial_pvurb_t *partial_pvurb;
  uint16_t id;
  int notify;

  FUNCTION_ENTER();
  FUNCTION_MSG("IRQL = %d\n", KeGetCurrentIrql());

  while ((partial_pvurb = (partial_pvurb_t *)RemoveHeadList((PLIST_ENTRY)&xudd->partial_pvurb_queue)) != (partial_pvurb_t *)&xudd->partial_pvurb_queue) {
    FUNCTION_MSG("partial_pvurb = %p\n", partial_pvurb);
    /* if this partial_pvurb is cancelling another we don't need to check if the cancelled partial_pvurb is on the ring - that is taken care of in HandleEvent */
    id = get_id_from_freelist(xudd->req_id_ss);
    if (id == (uint16_t)-1) {
      FUNCTION_MSG("no free ring slots\n");
      InsertHeadList(&xudd->partial_pvurb_queue, &partial_pvurb->entry);
      break;
    }
    InsertTailList(&xudd->partial_pvurb_ring, &partial_pvurb->entry);
    xudd->partial_pvurbs[id] = partial_pvurb;
    partial_pvurb->req.id = id;    
    *RING_GET_REQUEST(&xudd->urb_ring, xudd->urb_ring.req_prod_pvt) = partial_pvurb->req;
    xudd->urb_ring.req_prod_pvt++;
  }
  RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&xudd->urb_ring, notify);
  if (notify) {
    FUNCTION_MSG("Notifying\n");
    XnNotify(xudd->handle, xudd->event_channel);
  }
  
  FUNCTION_EXIT();
}
示例#11
0
文件: netback.c 项目: 7799/linux
static int xenvif_get_extras(struct xenvif *vif,
				struct xen_netif_extra_info *extras,
				int work_to_do)
{
	struct xen_netif_extra_info extra;
	RING_IDX cons = vif->tx.req_cons;

	do {
		if (unlikely(work_to_do-- <= 0)) {
			netdev_err(vif->dev, "Missing extra info\n");
			xenvif_fatal_tx_err(vif);
			return -EBADR;
		}

		memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons),
		       sizeof(extra));
		if (unlikely(!extra.type ||
			     extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
			vif->tx.req_cons = ++cons;
			netdev_err(vif->dev,
				   "Invalid extra type: %d\n", extra.type);
			xenvif_fatal_tx_err(vif);
			return -EINVAL;
		}

		memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
		vif->tx.req_cons = ++cons;
	} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);

	return work_to_do;
}
示例#12
0
static int ixp_recover(struct ixpfront_info *info)
{
	int i;
	struct ixp_request *req;
	struct ixp_shadow *copy;
	int j;

	/* Stage 1: Make a safe copy of the shadow state. */
	copy = kmalloc(sizeof(info->shadow),
		       GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
	if (!copy)
		return -ENOMEM;
	memcpy(copy, info->shadow, sizeof(info->shadow));

	/* Stage 2: Set up free list. */
	memset(&info->shadow, 0, sizeof(info->shadow));
	for (i = 0; i < IXP_RING_SIZE; i++)
		info->shadow[i].req.id = i+1;
	info->shadow_free = info->ring.req_prod_pvt;
	info->shadow[IXP_RING_SIZE-1].req.id = 0x0fffffff;

	/* Stage 3: Find pending requests and requeue them. */
	for (i = 0; i < IXP_RING_SIZE; i++) {
		/* Not in use? */
		if (copy[i].req_page == NULL)
			continue;

		/* Grab a request slot and copy shadow state into it. */
		req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
		*req = copy[i].req;

		/* We get a new request id, and must reset the shadow state. */
		req->id = get_id_from_freelist(info);
		memcpy(&info->shadow[req->id], &copy[i], sizeof(copy[i]));

		/* Rewrite any grant references invalidated by susp/resume. */
		for (j = 0; j < req->nr_segments; j++)
			gnttab_grant_foreign_access_ref(
				req->seg[j].gref,
				info->xbdev->otherend_id,
				pfn_to_mfn(info->shadow[req->id].frame[j]),
				0);
		info->shadow[req->id].req = *req;

		info->ring.req_prod_pvt++;
	}

	kfree(copy);

	xenbus_switch_state(info->xbdev, XenbusStateConnected);

	/* Now safe for us to use the shared ring */
	info->connected = IXP_STATE_CONNECTED;

	/* Send off requeued requests */
	flush_requests(info);

	return 0;
}
示例#13
0
/* Issue an aio */
void blkfront_aio(struct blkfront_aiocb *aiocbp, int write)
{
    struct blkfront_dev *dev = aiocbp->aio_dev;
    struct blkif_request *req;
    RING_IDX i;
    int notify;
    int n, j;
    uintptr_t start, end;

    // Can't io at non-sector-aligned location
    ASSERT(!(aiocbp->aio_offset & (dev->info.sector_size-1)));
    // Can't io non-sector-sized amounts
    ASSERT(!(aiocbp->aio_nbytes & (dev->info.sector_size-1)));
    // Can't io non-sector-aligned buffer
    ASSERT(!((uintptr_t) aiocbp->aio_buf & (dev->info.sector_size-1)));

    start = (uintptr_t)aiocbp->aio_buf & PAGE_MASK;
    end = ((uintptr_t)aiocbp->aio_buf + aiocbp->aio_nbytes + PAGE_SIZE - 1) & PAGE_MASK;
    aiocbp->n = n = (end - start) / PAGE_SIZE;

    /* qemu's IDE max multsect is 16 (8KB) and SCSI max DMA was set to 32KB,
     * so max 44KB can't happen */
    ASSERT(n <= BLKIF_MAX_SEGMENTS_PER_REQUEST);

    blkfront_wait_slot(dev);
    i = dev->ring.req_prod_pvt;
    req = RING_GET_REQUEST(&dev->ring, i);

    req->operation = write ? BLKIF_OP_WRITE : BLKIF_OP_READ;
    req->nr_segments = n;
    req->handle = dev->handle;
    req->id = (uintptr_t) aiocbp;
    req->sector_number = aiocbp->aio_offset / 512;

    for (j = 0; j < n; j++) {
        req->seg[j].first_sect = 0;
        req->seg[j].last_sect = PAGE_SIZE / 512 - 1;
    }
    req->seg[0].first_sect = ((uintptr_t)aiocbp->aio_buf & ~PAGE_MASK) / 512;
    req->seg[n-1].last_sect = (((uintptr_t)aiocbp->aio_buf + aiocbp->aio_nbytes - 1) & ~PAGE_MASK) / 512;
    for (j = 0; j < n; j++) {
	uintptr_t data = start + j * PAGE_SIZE;
        if (!write) {
            /* Trigger CoW if needed */
            *(char*)(data + (req->seg[j].first_sect << 9)) = 0;
            barrier();
        }
	aiocbp->gref[j] = req->seg[j].gref =
            gnttab_grant_access(dev->dom, virtual_to_mfn(data), write);
    }

    dev->ring.req_prod_pvt = i + 1;

    wmb();
    RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->ring, notify);

    if(notify) notify_remote_via_evtchn(dev->evtchn);
}
示例#14
0
文件: td-ctx.c 项目: andyhhp/blktap
/**
 * Utility function that retrieves a request using @idx as the ring index,
 * copying it to the @dst in a H/W independent way.
 *
 * @param blkif the block interface
 * @param dst address that receives the request
 * @param rc the index of the request in the ring
 */
static inline void
xenio_blkif_get_request(struct td_xenblkif * const blkif,
        blkif_request_t *const dst, const RING_IDX idx)
{
    blkif_back_rings_t * rings;

    ASSERT(blkif);
    ASSERT(dst);

    rings = &blkif->rings;

    switch (blkif->proto) {
        case BLKIF_PROTOCOL_NATIVE:
            {
                blkif_request_t *src;
                src = RING_GET_REQUEST(&rings->native, idx);
                memcpy(dst, src, sizeof(blkif_request_t));
                break;
            }

        case BLKIF_PROTOCOL_X86_32:
            {
                blkif_x86_32_request_t *src;
                src = RING_GET_REQUEST(&rings->x86_32, idx);
                blkif_get_req(dst, src);
                break;
            }

        case BLKIF_PROTOCOL_X86_64:
            {
                blkif_x86_64_request_t *src;
                src = RING_GET_REQUEST(&rings->x86_64, idx);
                blkif_get_req(dst, src);
                break;
            }

        default:
            /*
             * TODO log error
             */
            ASSERT(0);
    }
}
示例#15
0
文件: xen_disk.c 项目: Mellanox/qemu
static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc)
{
    switch (blkdev->protocol) {
    case BLKIF_PROTOCOL_NATIVE:
        memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
               sizeof(ioreq->req));
        break;
    case BLKIF_PROTOCOL_X86_32:
        blkif_get_x86_32_req(&ioreq->req,
                             RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc));
        break;
    case BLKIF_PROTOCOL_X86_64:
        blkif_get_x86_64_req(&ioreq->req,
                             RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc));
        break;
    }
    /* Prevent the compiler from accessing the on-ring fields instead. */
    barrier();
    return 0;
}
示例#16
0
static void xenvif_tx_err(struct xenvif *vif,
			  struct xen_netif_tx_request *txp, RING_IDX end)
{
	RING_IDX cons = vif->tx.req_cons;

	do {
		make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
		if (cons == end)
			break;
		txp = RING_GET_REQUEST(&vif->tx, cons++);
	} while (1);
	vif->tx.req_cons = cons;
}
示例#17
0
文件: netfront.c 项目: cnplab/mini-os
static void netfront_fillup_rx_buffers(struct netfront_dev *dev)
{
	RING_IDX prod;
	struct netif_rx_request *req;
	grant_ref_t ref;
	unsigned short id;
	int notify;
#ifndef CONFIG_NETFRONT_PERSISTENT_GRANTS
	struct net_rxbuffer* buf;
	int flags;
#endif

#ifndef CONFIG_NETFRONT_PERSISTENT_GRANTS
	local_irq_save(flags);
#endif
	/* fill-up slots again */
	for (prod = dev->rx.req_prod_pvt;
	     prod - dev->rx.rsp_cons < NET_RX_RING_SIZE;
	     prod++) {
		id = netfront_rxidx(prod);
#ifndef CONFIG_NETFRONT_PERSISTENT_GRANTS
		buf = netfront_get_rxbuffer(dev);
		if (buf == NULL)
			break; /* out of rx buffers */
		BUG_ON(buf->page == NULL);
		ref = gnttab_grant_access(dev->dom,virt_to_mfn(buf->page),0);
		buf->gref = ref;
		BUG_ON(ref == GRANT_INVALID_REF);
		dev->rx_buffers[id] = buf;
#else
		ref = dev->rx_buffers[id].gref;
#endif
		req = RING_GET_REQUEST(&dev->rx, prod);
		req->id = id;
		req->gref = ref;
	}
#ifndef CONFIG_NETFRONT_PERSISTENT_GRANTS
	local_irq_restore(flags);
#endif

	if (dev->rx.req_prod_pvt != prod) {
		dev->rx.req_prod_pvt = prod;
		wmb();
		RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->rx, notify);
#ifdef CONFIG_SELECT_POLL
		files[dev->fd].read = 0;
#endif
		if (notify)
			notify_remote_via_evtchn(dev->rx_evtchn);
	}
}
示例#18
0
文件: vm_event.c 项目: prosig/xen
/*
 * This must be preceded by a call to claim_slot(), and is guaranteed to
 * succeed.  As a side-effect however, the vCPU may be paused if the ring is
 * overly full and its continued execution would cause stalling and excessive
 * waiting.  The vCPU will be automatically unpaused when the ring clears.
 */
void vm_event_put_request(struct domain *d,
                          struct vm_event_domain *ved,
                          vm_event_request_t *req)
{
    vm_event_front_ring_t *front_ring;
    int free_req;
    unsigned int avail_req;
    RING_IDX req_prod;

    if ( current->domain != d )
    {
        req->flags |= VM_EVENT_FLAG_FOREIGN;
#ifndef NDEBUG
        if ( !(req->flags & VM_EVENT_FLAG_VCPU_PAUSED) )
            gdprintk(XENLOG_G_WARNING, "d%dv%d was not paused.\n",
                     d->domain_id, req->vcpu_id);
#endif
    }

    req->version = VM_EVENT_INTERFACE_VERSION;

    vm_event_ring_lock(ved);

    /* Due to the reservations, this step must succeed. */
    front_ring = &ved->front_ring;
    free_req = RING_FREE_REQUESTS(front_ring);
    ASSERT(free_req > 0);

    /* Copy request */
    req_prod = front_ring->req_prod_pvt;
    memcpy(RING_GET_REQUEST(front_ring, req_prod), req, sizeof(*req));
    req_prod++;

    /* Update ring */
    front_ring->req_prod_pvt = req_prod;
    RING_PUSH_REQUESTS(front_ring);

    /* We've actually *used* our reservation, so release the slot. */
    vm_event_release_slot(d, ved);

    /* Give this vCPU a black eye if necessary, on the way out.
     * See the comments above wake_blocked() for more information
     * on how this mechanism works to avoid waiting. */
    avail_req = vm_event_ring_available(ved);
    if( current->domain == d && avail_req < d->max_vcpus )
        vm_event_mark_and_pause(current, ved);

    vm_event_ring_unlock(ved);

    notify_via_xen_event_channel(d, ved->xen_port);
}
示例#19
0
void netfront_xmit(struct netfront_dev *dev, unsigned char* data,int len)
{
    int flags;
    struct netif_tx_request *tx;
    RING_IDX i;
    int notify;
    unsigned short id;
    struct net_buffer* buf;
    void* page;
#ifdef CONFIG_NETMAP
    if (dev->netmap) {
        netmap_netfront_xmit(dev->na, data, len);
        return;
    }
#endif

    BUG_ON(len > PAGE_SIZE);

    down(&dev->tx_sem);

    local_irq_save(flags);
    id = get_id_from_freelist(dev->tx_freelist);
    local_irq_restore(flags);

    buf = &dev->tx_buffers[id];
    page = buf->page;

    i = dev->tx.req_prod_pvt;
    tx = RING_GET_REQUEST(&dev->tx, i);

    memcpy(page,data,len);

    tx->gref = buf->gref;

    tx->offset=0;
    tx->size = len;
    tx->flags=0;
    tx->id = id;
    dev->tx.req_prod_pvt = i + 1;

    wmb();

    RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->tx, notify);

    if(notify) notify_remote_via_evtchn(dev->evtchn);

    local_irq_save(flags);
    network_tx_buf_gc(dev);
    local_irq_restore(flags);
}
示例#20
0
文件: ring.c 项目: relip/blktap-dkms
void
blktap_ring_submit_request(struct blktap *tap,
			   struct blktap_request *request)
{
	struct blktap_ring *ring = &tap->ring;
	struct blktap_ring_request *breq;
	int nsecs;

	dev_dbg(ring->dev,
		"request %d [%p] submit\n", request->usr_idx, request);

	breq = RING_GET_REQUEST(&ring->ring, ring->ring.req_prod_pvt);

	breq->id            = request->usr_idx;
	breq->__pad         = 0;
	breq->operation     = request->operation;
	breq->nr_segments   = request->nr_pages;

	switch (breq->operation) {
	case BLKTAP_OP_READ:
		nsecs = blktap_ring_make_rw_request(tap, request, breq);

		tap->stats.st_rd_sect += nsecs;
		tap->stats.st_rd_req++;
		break;

	case BLKTAP_OP_WRITE:
		nsecs = blktap_ring_make_rw_request(tap, request, breq);

		tap->stats.st_wr_sect += nsecs;
		tap->stats.st_wr_req++;
		break;

	case BLKTAP_OP_FLUSH:
		breq->u.rw.sector_number = 0;
		tap->stats.st_fl_req++;
		break;

	case BLKTAP_OP_TRIM:
		nsecs = blktap_ring_make_tr_request(tap, request, breq);

		tap->stats.st_tr_sect += nsecs;
		tap->stats.st_tr_req++;
		break;
	default:
		BUG();
	}

	ring->ring.req_prod_pvt++;
}
示例#21
0
文件: profiling.c 项目: dmjio/ghc
static int write_block(FILE *p, blkif_sector_t sector, size_t amt)
{
  static uint64_t next_reqid = 1;
  blkif_response_t *rsp;
  blkif_request_t *req;
  int notify, work_to_do;
  uint64_t reqid;
  RING_IDX i;

  /* wait until we can write something */
  while(RING_FULL(&p->ring)) runtime_block(1);

  /* write out the request */
  i = p->ring.req_prod_pvt++;
  req = RING_GET_REQUEST(&p->ring, i);
  memset(req, 0, sizeof(blkif_request_t));
  req->operation         = BLKIF_OP_WRITE;
  req->nr_segments       = 1;
  req->handle            = p->disk_handle;
  req->id                = reqid = next_reqid++;
  req->sector_number     = sector;
  req->seg[0].gref       = p->block_grant;
  req->seg[0].first_sect = 0;
  req->seg[0].last_sect  = (amt - 1) / 512;
  wmb();
  RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&p->ring, notify);
  if(notify) channel_send(p->chan);

  /* wait for it to be satisfied */
  do {
    while(!RING_HAS_UNCONSUMED_RESPONSES(&p->ring))
      runtime_block(1);
    i = p->ring.rsp_cons++;
    rsp = RING_GET_RESPONSE(&p->ring, i);
  } while(rsp->id != reqid);

  /* was it successful? */
  if(rsp->status != BLKIF_RSP_OKAY) {
    printf("PROFILING: Block write failed!\n");
    return 0;
  }

  /* we do writes one at a time, synchronously, so work_to_do should always
     be false */
  RING_FINAL_CHECK_FOR_RESPONSES(&p->ring, work_to_do);
  assert(!work_to_do);

  return 1;
}
示例#22
0
void netfront_xmit(struct netfront_dev *dev, unsigned char* data,int len)
{
    int flags;
    struct netif_tx_request *tx;
    RING_IDX i;
    int notify;
    unsigned short id;
    struct net_buffer* buf;
    void* page;

    //printf("netfront_xmit\n"); //farewellkou

    BUG_ON(len > PAGE_SIZE);

    down(&dev->tx_sem);

    local_irq_save(flags);
    id = get_id_from_freelist(dev->tx_freelist);
    local_irq_restore(flags);

    buf = &dev->tx_buffers[id];
    page = buf->page;
    if (!page)
	page = buf->page = (char*) alloc_page();

    i = dev->tx.req_prod_pvt;
    tx = RING_GET_REQUEST(&dev->tx, i);

    memcpy(page,data,len);

    buf->gref = 
        tx->gref = gnttab_grant_access(dev->dom,virt_to_mfn(page),1);

    tx->offset=0;
    tx->size = len;
    tx->flags=0;
    tx->id = id;
    dev->tx.req_prod_pvt = i + 1;

    wmb();

    RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->tx, notify);

    if(notify) notify_remote_via_evtchn(dev->evtchn);

    local_irq_save(flags);
    network_tx_buf_gc(dev);
    local_irq_restore(flags);
}
示例#23
0
static int scsifront_do_request(struct vscsifrnt_info *info,
				struct vscsifrnt_shadow *shadow)
{
	struct vscsiif_front_ring *ring = &(info->ring);
	struct vscsiif_request *ring_req;
	struct scsi_cmnd *sc = shadow->sc;
	uint32_t id;
	int i, notify;

	if (RING_FULL(&info->ring))
		return -EBUSY;

	id = scsifront_get_rqid(info);	/* use id in response */
	if (id >= VSCSIIF_MAX_REQS)
		return -EBUSY;

	info->shadow[id] = shadow;
	shadow->rqid = id;

	ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt);
	ring->req_prod_pvt++;

	ring_req->rqid        = id;
	ring_req->act         = shadow->act;
	ring_req->ref_rqid    = shadow->ref_rqid;
	ring_req->nr_segments = shadow->nr_segments;

	ring_req->id      = sc->device->id;
	ring_req->lun     = sc->device->lun;
	ring_req->channel = sc->device->channel;
	ring_req->cmd_len = sc->cmd_len;

	BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);

	memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);

	ring_req->sc_data_direction   = (uint8_t)sc->sc_data_direction;
	ring_req->timeout_per_command = sc->request->timeout / HZ;

	for (i = 0; i < (shadow->nr_segments & ~VSCSIIF_SG_GRANT); i++)
		ring_req->seg[i] = shadow->seg[i];

	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
	if (notify)
		notify_remote_via_irq(info->irq);

	return 0;
}
示例#24
0
文件: netback.c 项目: 7799/linux
static void xenvif_tx_err(struct xenvif *vif,
			  struct xen_netif_tx_request *txp, RING_IDX end)
{
	RING_IDX cons = vif->tx.req_cons;
	unsigned long flags;

	do {
		spin_lock_irqsave(&vif->response_lock, flags);
		make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
		spin_unlock_irqrestore(&vif->response_lock, flags);
		if (cons == end)
			break;
		txp = RING_GET_REQUEST(&vif->tx, cons++);
	} while (1);
	vif->tx.req_cons = cons;
}
示例#25
0
VOID
FrontendInsertRequestOnRing(
    IN  PXENVBD_FRONTEND        Frontend,
    IN  PXENVBD_REQUEST         Request
    )
{
    ULONG               Index;
    blkif_request_t*    RingReq;
    //blkif_request_discard_t*    Discard;

    RingReq = RING_GET_REQUEST(&Frontend->FrontRing, Frontend->FrontRing.req_prod_pvt);
    Frontend->FrontRing.req_prod_pvt++;

    switch (Request->Operation) {
    //case BLKIF_OP_DISCARD:
    //    Discard = (blkif_request_discard_t*)Request;
    //    Discard->operation       = BLKIF_OP_DISCARD;
    //    Discard->handle          = (USHORT)Frontend->DeviceId;
    //    Discard->id              = (ULONG64)Request;
    //    Discard->sector_number   = Request->FirstSector;
    //    Discard->nr_sectors      = Request->NrSectors;
    //    break;
    case BLKIF_OP_READ:
    case BLKIF_OP_WRITE:
        RingReq->operation          = Request->Operation;
        RingReq->nr_segments        = Request->NrSegments;
        RingReq->handle             = (USHORT)Frontend->DeviceId;
        RingReq->id                 = (ULONG64)Request;
        RingReq->sector_number      = Request->FirstSector;
        for (Index = 0; Index < Request->NrSegments; ++Index) {
            RingReq->seg[Index].gref       = Request->Segments[Index].GrantRef;
            RingReq->seg[Index].first_sect = Request->Segments[Index].FirstSector;
            RingReq->seg[Index].last_sect  = Request->Segments[Index].LastSector;
        }
        break;
    case BLKIF_OP_WRITE_BARRIER:
        RingReq->operation          = Request->Operation;
        RingReq->nr_segments        = 0;
        RingReq->handle             = (USHORT)Frontend->DeviceId;
        RingReq->id                 = (ULONG64)Request;
        RingReq->sector_number      = Request->FirstSector;
        break;
    default:
        ASSERT(FALSE);
        break;
    }
}
示例#26
0
/*
 * This must be preceded by a call to claim_slot(), and is guaranteed to
 * succeed.  As a side-effect however, the vCPU may be paused if the ring is
 * overly full and its continued execution would cause stalling and excessive
 * waiting.  The vCPU will be automatically unpaused when the ring clears.
 */
void mem_event_put_request(struct domain *d,
                           struct mem_event_domain *med,
                           mem_event_request_t *req)
{
    mem_event_front_ring_t *front_ring;
    int free_req;
    unsigned int avail_req;
    RING_IDX req_prod;

    if ( current->domain != d )
    {
        req->flags |= MEM_EVENT_FLAG_FOREIGN;
        ASSERT( !(req->flags & MEM_EVENT_FLAG_VCPU_PAUSED) );
    }

    mem_event_ring_lock(med);

    /* Due to the reservations, this step must succeed. */
    front_ring = &med->front_ring;
    free_req = RING_FREE_REQUESTS(front_ring);
    ASSERT(free_req > 0);

    /* Copy request */
    req_prod = front_ring->req_prod_pvt;
    memcpy(RING_GET_REQUEST(front_ring, req_prod), req, sizeof(*req));
    req_prod++;

    /* Update ring */
    front_ring->req_prod_pvt = req_prod;
    RING_PUSH_REQUESTS(front_ring);

    /* We've actually *used* our reservation, so release the slot. */
    mem_event_release_slot(d, med);

    /* Give this vCPU a black eye if necessary, on the way out.
     * See the comments above wake_blocked() for more information
     * on how this mechanism works to avoid waiting. */
    avail_req = mem_event_ring_available(med);
    if( current->domain == d && avail_req < d->max_vcpus )
        mem_event_mark_and_pause(current, med);

    mem_event_ring_unlock(med);

    notify_via_xen_event_channel(d, med->xen_port);
}
示例#27
0
文件: netfront.c 项目: pipcet/ipxe
/**
 * Refill receive descriptor ring
 *
 * @v netdev		Network device
 */
static void netfront_refill_rx ( struct net_device *netdev ) {
	struct netfront_nic *netfront = netdev->priv;
	struct xen_device *xendev = netfront->xendev;
	struct io_buffer *iobuf;
	struct netif_rx_request *request;
	int notify;
	int rc;

	/* Do nothing if ring is already full */
	if ( netfront_ring_is_full ( &netfront->rx ) )
		return;

	/* Refill ring */
	do {

		/* Allocate I/O buffer */
		iobuf = alloc_iob ( PAGE_SIZE );
		if ( ! iobuf ) {
			/* Wait for next refill */
			break;
		}

		/* Add to descriptor ring */
		request = RING_GET_REQUEST ( &netfront->rx_fring,
					     netfront->rx_fring.req_prod_pvt );
		if ( ( rc = netfront_push ( netfront, &netfront->rx,
					    iobuf, &request->id,
					    &request->gref ) ) != 0 ) {
			netdev_rx_err ( netdev, iobuf, rc );
			break;
		}
		DBGC2 ( netfront, "NETFRONT %s RX id %d ref %d is %#08lx+%zx\n",
			xendev->key, request->id, request->gref,
			virt_to_phys ( iobuf->data ), iob_tailroom ( iobuf ) );

		/* Move to next descriptor */
		netfront->rx_fring.req_prod_pvt++;

	} while ( ! netfront_ring_is_full ( &netfront->rx ) );

	/* Push new descriptors and notify backend if applicable */
	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY ( &netfront->rx_fring, notify );
	if ( notify )
		netfront_send_event ( netfront );
}
示例#28
0
static struct vscsiif_request *scsifront_pre_req(struct vscsifrnt_info *info)
{
	struct vscsiif_front_ring *ring = &(info->ring);
	struct vscsiif_request *ring_req;
	uint32_t id;

	id = scsifront_get_rqid(info);	/* use id in response */
	if (id >= VSCSIIF_MAX_REQS)
		return NULL;

	ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt);

	ring->req_prod_pvt++;

	ring_req->rqid = (uint16_t)id;

	return ring_req;
}
示例#29
0
static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif,
						struct netrx_pending_operations *npo)
{
	struct netbk_rx_meta *meta;
	struct xen_netif_rx_request *req;

	req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);

	meta = npo->meta + npo->meta_prod++;
	meta->gso_size = 0;
	meta->size = 0;
	meta->id = req->id;

	npo->copy_off = 0;
	npo->copy_gref = req->gref;

	return meta;
}
示例#30
0
文件: netback.c 项目: 7799/linux
static void tx_add_credit(struct xenvif *vif)
{
	unsigned long max_burst, max_credit;

	/*
	 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
	 * Otherwise the interface can seize up due to insufficient credit.
	 */
	max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size;
	max_burst = min(max_burst, 131072UL);
	max_burst = max(max_burst, vif->credit_bytes);

	/* Take care that adding a new chunk of credit doesn't wrap to zero. */
	max_credit = vif->remaining_credit + vif->credit_bytes;
	if (max_credit < vif->remaining_credit)
		max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */

	vif->remaining_credit = min(max_credit, max_burst);
}