コード例 #1
0
ファイル: xen_nic.c プロジェクト: D3cedut0/QEMU
static void net_rx_response(struct XenNetDev *netdev,
                            netif_rx_request_t *req, int8_t st,
                            uint16_t offset, uint16_t size,
                            uint16_t flags)
{
    RING_IDX i = netdev->rx_ring.rsp_prod_pvt;
    netif_rx_response_t *resp;
    int notify;

    resp = RING_GET_RESPONSE(&netdev->rx_ring, i);
    resp->offset     = offset;
    resp->flags      = flags;
    resp->id         = req->id;
    resp->status     = (int16_t)size;
    if (st < 0) {
        resp->status = (int16_t)st;
    }

    xen_be_printf(&netdev->xendev, 3, "rx response: idx %d, status %d, flags 0x%x\n",
                  i, resp->status, resp->flags);

    netdev->rx_ring.rsp_prod_pvt = ++i;
    RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->rx_ring, notify);
    if (notify) {
        xen_be_send_notify(&netdev->xendev);
    }
}
コード例 #2
0
ファイル: xen_nic.c プロジェクト: D3cedut0/QEMU
static void net_tx_response(struct XenNetDev *netdev, netif_tx_request_t *txp, int8_t st)
{
    RING_IDX i = netdev->tx_ring.rsp_prod_pvt;
    netif_tx_response_t *resp;
    int notify;

    resp = RING_GET_RESPONSE(&netdev->tx_ring, i);
    resp->id     = txp->id;
    resp->status = st;

#if 0
    if (txp->flags & NETTXF_extra_info) {
        RING_GET_RESPONSE(&netdev->tx_ring, ++i)->status = NETIF_RSP_NULL;
    }
#endif

    netdev->tx_ring.rsp_prod_pvt = ++i;
    RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->tx_ring, notify);
    if (notify) {
        xen_be_send_notify(&netdev->xendev);
    }

    if (i == netdev->tx_ring.req_cons) {
        int more_to_do;
        RING_FINAL_CHECK_FOR_REQUESTS(&netdev->tx_ring, more_to_do);
        if (more_to_do) {
            netdev->tx_work++;
        }
    }
}
コード例 #3
0
ファイル: fs-backend.c プロジェクト: tumf/xen-3.3.1
static void handle_aio_events(struct fs_mount *mount)
{
    int fd, ret, count, i, notify;
    evtchn_port_t port;
    /* AIO control block for the evtchn file destriptor */
    struct aiocb evtchn_cb;
    const struct aiocb * cb_list[mount->nr_entries];
    int request_ids[mount->nr_entries];

    /* Prepare the AIO control block for evtchn */ 
    fd = xc_evtchn_fd(mount->evth); 
    bzero(&evtchn_cb, sizeof(struct aiocb));
    evtchn_cb.aio_fildes = fd;
    evtchn_cb.aio_nbytes = sizeof(port);
    evtchn_cb.aio_buf = &port;
    assert(aio_read(&evtchn_cb) == 0);

wait_again:   
    /* Create list of active AIO requests */
    count = 0;
    for(i=0; i<mount->nr_entries; i++)
        if(mount->requests[i].active)
        {
            cb_list[count] = &mount->requests[i].aiocb;
            request_ids[count] = i;
            count++;
        }
    /* Add the event channel at the end of the list. Event channel needs to be
     * handled last as it exits this function. */
    cb_list[count] = &evtchn_cb;
    request_ids[count] = -1;
    count++;

    /* Block till an AIO requset finishes, or we get an event */ 
    while(1) {
	int ret = aio_suspend(cb_list, count, NULL);
	if (!ret)
	    break;
	assert(errno == EINTR);
    }
    for(i=0; i<count; i++)
        if(aio_error(cb_list[i]) != EINPROGRESS)
        {
            if(request_ids[i] >= 0)
                dispatch_response(mount, request_ids[i]);
            else
                goto read_event_channel;
        }
 
    RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&mount->ring, notify);
    printf("Pushed responces and notify=%d\n", notify);
    if(notify)
        xc_evtchn_notify(mount->evth, mount->local_evtchn);
    
    goto wait_again;

read_event_channel:    
    assert(aio_return(&evtchn_cb) == sizeof(evtchn_port_t)); 
    assert(xc_evtchn_unmask(mount->evth, mount->local_evtchn) >= 0);
}
コード例 #4
0
ファイル: xen-block.c プロジェクト: MaddTheSane/qemu
static int xen_block_send_response(XenBlockRequest *request)
{
    XenBlockDataPlane *dataplane = request->dataplane;
    int send_notify = 0;
    int have_requests = 0;
    blkif_response_t *resp;

    /* Place on the response ring for the relevant domain. */
    switch (dataplane->protocol) {
    case BLKIF_PROTOCOL_NATIVE:
        resp = (blkif_response_t *)RING_GET_RESPONSE(
            &dataplane->rings.native,
            dataplane->rings.native.rsp_prod_pvt);
        break;
    case BLKIF_PROTOCOL_X86_32:
        resp = (blkif_response_t *)RING_GET_RESPONSE(
            &dataplane->rings.x86_32_part,
            dataplane->rings.x86_32_part.rsp_prod_pvt);
        break;
    case BLKIF_PROTOCOL_X86_64:
        resp = (blkif_response_t *)RING_GET_RESPONSE(
            &dataplane->rings.x86_64_part,
            dataplane->rings.x86_64_part.rsp_prod_pvt);
        break;
    default:
        return 0;
    }

    resp->id = request->req.id;
    resp->operation = request->req.operation;
    resp->status = request->status;

    dataplane->rings.common.rsp_prod_pvt++;

    RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&dataplane->rings.common,
                                         send_notify);
    if (dataplane->rings.common.rsp_prod_pvt ==
        dataplane->rings.common.req_cons) {
        /*
         * Tail check for pending requests. Allows frontend to avoid
         * notifications if requests are already in flight (lower
         * overheads and promotes batching).
         */
        RING_FINAL_CHECK_FOR_REQUESTS(&dataplane->rings.common,
                                      have_requests);
    } else if (RING_HAS_UNCONSUMED_REQUESTS(&dataplane->rings.common)) {
        have_requests = 1;
    }

    if (have_requests) {
        dataplane->more_work++;
    }
    return send_notify;
}
コード例 #5
0
ファイル: xen_disk.c プロジェクト: MrPavel3243/limbo-android
static int blk_send_response_one(struct ioreq *ioreq)
{
    struct XenBlkDev  *blkdev = ioreq->blkdev;
    int               send_notify   = 0;
    int               have_requests = 0;
    blkif_response_t  resp;
    void              *dst;

    resp.id        = ioreq->req.id;
    resp.operation = ioreq->req.operation;
    resp.status    = ioreq->status;

    /* Place on the response ring for the relevant domain. */
    switch (blkdev->protocol) {
    case BLKIF_PROTOCOL_NATIVE:
        dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt);
        break;
    case BLKIF_PROTOCOL_X86_32:
        dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
                                blkdev->rings.x86_32_part.rsp_prod_pvt);
        break;
    case BLKIF_PROTOCOL_X86_64:
        dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
                                blkdev->rings.x86_64_part.rsp_prod_pvt);
        break;
    default:
        dst = NULL;
    }
    memcpy(dst, &resp, sizeof(resp));
    blkdev->rings.common.rsp_prod_pvt++;

    RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
    if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
        /*
         * Tail check for pending requests. Allows frontend to avoid
         * notifications if requests are already in flight (lower
         * overheads and promotes batching).
         */
        RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests);
    } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) {
        have_requests = 1;
    }

    if (have_requests) {
        blkdev->more_work++;
    }
    return send_notify;
}
コード例 #6
0
ファイル: dom0_20140922.c プロジェクト: huier103/virtual
static irqreturn_t as_int(int irq, void *dev_id)
{
    RING_IDX rc, rp;
    as_request_t req;
    as_response_t resp;
    int more_to_do, notify;
    printk(KERN_DEBUG "\nxen:Dom0: as_int called with dev_id=%x info=%x", (unsigned int)dev_id, (unsigned int) &info);
    rc = info.ring.req_cons;
    rp = info.ring.sring->req_prod;
    printk(KERN_DEBUG " rc = %d rp = %d", rc, rp);
    while (rc != rp) {
       if (RING_REQUEST_CONS_OVERFLOW(&info.ring, rc))
           break;
       memcpy(&req, RING_GET_REQUEST(&info.ring, rc), sizeof(req));
       resp.id = req.id;
       resp.operation = req.operation;
       resp.status = req.status + 1;
       printk(KERN_DEBUG "\nxen:Dom0:Recvd at IDX-%d: id = %d, op=%d, status=%d", rc, req.id, req.operation, req.status);
       info.ring.req_cons = ++rc;
       barrier();
       switch(req.operation) {
          case 0:
              printk(KERN_DEBUG "\nxen:dom0:req.operation = 0");
              break;
          default:
              printk(KERN_DEBUG "\nxen:dom0:req.operation = %d", req.operation);
              break;
       }
      memcpy(RING_GET_RESPONSE(&info.ring, info.ring.rsp_prod_pvt), &resp, sizeof(resp));
      info.ring.rsp_prod_pvt++;
      RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info.ring, notify);
      if (info.ring.rsp_prod_pvt == info.ring.req_cons) {
          RING_FINAL_CHECK_FOR_REQUESTS(&info.ring, more_to_do);
       } else if (RING_HAS_UNCONSUMED_REQUESTS(&info.ring)) {
          more_to_do = 1;
       }
       if (notify) {
          printk(KERN_DEBUG "\nxen:dom0:send notify to domu");
          notify_remote_via_irq(info.irq);
       }
    }
    return IRQ_HANDLED;
}
コード例 #7
0
static irqreturn_t chrif_int(int irq, void *dev_id)
{
    int err;
    RING_IDX rc, rp;
    int more_to_do, notify;
    chrif_request_t req;
    chrif_response_t resp;
    printk(KERN_INFO "\n------------------------------start response-------------------------------------");
    printk(KERN_DEBUG "\nxen: Dom0: chrif_int called with dev_id=%x info=%x", (unsigned int)dev_id, (unsigned int) &info);
    rc = info.ring.req_cons;
    rp = info.ring.sring->req_prod;
    printk(KERN_DEBUG "\nxen: Dom0: rc = %d rp = %d", rc, rp);

    while (rc != rp) {
        if (RING_REQUEST_CONS_OVERFLOW(&info.ring, rc))
            break;
        memcpy(&req, RING_GET_REQUEST(&info.ring, rc), sizeof(req));
        resp.id = req.id;
        resp.operation = req.operation;
        resp.status = req.status + 1;
        printk(KERN_DEBUG "\nxen: Dom0: Recvd at IDX-%d: id = %d, op=%d, status=%d", rc, req.id, req.operation, req.status);
        info.ring.req_cons = ++rc;
        barrier();

        printk(KERN_DEBUG "\nxen: Dom0: operation:  %s", op_name(resp.operation));
        switch(resp.operation) {
        case CHRIF_OP_OPEN:
            info.chrif_filp = filp_open(DEVICE_PATH, O_RDWR, 0);
            printk(KERN_DEBUG "\nxen: dom0: response open");
            break;
        case CHRIF_OP_READ: {
            resp.rdwr.len = req.rdwr.len;
            //struct pdma_info pdma_info;
            //memset(op_page->addr, 0, resp.rdwr.len);
            old_fs = get_fs();
            set_fs(get_ds());
            //get read size of block
            //err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_INFO, (unsigned long)&pdma_info);
            //read data from device to page
            //err =info.chrif_filp->f_op->read(info.chrif_filp, op_page->addr, resp.rdwr.len, &info.chrif_filp->f_pos);
            set_fs(old_fs);
            if(err < 0)
                printk(KERN_DEBUG "\nxen: Dom0: read %u bytes error", resp.rdwr.len);
            printk(KERN_DEBUG "\nxen: dom0: response read");
            break;
        }
        case CHRIF_OP_WRITE: {
            int i = 0, count, ret;
            struct vm_struct *op_page;
            struct gnttab_map_grant_ref op_page_ops;
            struct gnttab_unmap_grant_ref op_page_unmap_ops;
            resp.rdwr.len = req.rdwr.len;

            count = resp.rdwr.len/4096;
            printk(KERN_DEBUG "\nxen: Dom0: write %u bytes %d times", resp.rdwr.len, count);

            block_buf = (char *)kmalloc(resp.rdwr.len, GFP_KERNEL);
            memset(block_buf, 0, resp.rdwr.len);

            while(i < count) {
                resp.op_gref[i] = req.op_gref[i];
                printk(KERN_DEBUG "\nxen: dom0: req.op_gref[0]: %d", resp.op_gref[i]);

                op_page = alloc_vm_area(PAGE_SIZE, NULL);
                if(op_page == 0) {
                    free_vm_area(op_page);
                    printk("\nxen: dom0: could not allocate shared_page");
                    return -EFAULT;
                }
                /*gnttab_set_map_op(&op_page_ops, (unsigned long)op_page->addr, GNTMAP_host_map, resp.op_gref[i], info.remoteDomain);

                 op_page_unmap_ops.host_addr = (unsigned long)(op_page->addr);
                 unmap_ops.handle = op_page_ops.handle;
                 if(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op_page_ops, 1)){
                     printk(KERN_DEBUG "\nxen: dom0: HYPERVISOR map grant ref failed");
                     return -EFAULT;
                 }
                 if (op_page_ops.status) {
                     printk(KERN_DEBUG "\nxen: dom0: HYPERVISOR map grant ref failed status = %d", op_page_ops.status);
                     return -EFAULT;
                 }
                 printk(KERN_DEBUG "\nxen: dom0: map shared page success, shared_page=%x, handle = %x, status = %x", (unsigned int)op_page->addr, op_page_ops.handle, op_page_ops.status);

                 memcpy(block_buf+i*4096, op_page->addr, 4096);
                 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op_page_unmap_ops, 1);
                 if (ret == 0) {
                     printk(KERN_DEBUG "\nxen: dom0: dom0_exit: unmapped shared frame");
                 } else {
                     printk(KERN_DEBUG "\nxen: dom0: dom0_exit: unmapped shared frame failed");
                 }
                 free_vm_area(op_page);*/
                i++;
            }

            /*  old_fs = get_fs();
            set_fs(get_ds());
            //write data from page to device
            //err = info.chrif_filp->f_op->write(info.chrif_filp, block_buf, resp.rdwr.len, &info.chrif_filp->f_pos);
            set_fs(old_fs);
              if(err < 0)
            	printk(KERN_DEBUG "\nxen: Dom0: write %u bytes error", resp.rdwr.len);

              */ //kfree(block_buf);
            printk(KERN_DEBUG "\nxen: dom0: response write");
            break;
        }
        case CHRIF_OP_IOCTL: {
            resp.ioc_parm.cmd = req.ioc_parm.cmd;
            switch(resp.ioc_parm.cmd) {
            case PDMA_IOC_START_DMA: {
                old_fs = get_fs();
                set_fs(get_ds());
                err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_START_DMA, NULL);
                set_fs(old_fs);
                if(err) {
                    printk(KERN_DEBUG "\nxen: Dom0: start-dma ioctl failed");
                    resp.status = 0;
                } else  printk(KERN_DEBUG "\nxen: Dom0: start-dma ioctl success");
                //err = call_ioctl(info.chrif_filp, PDMA_IOC_START_DMA, NULL);
                break;
            }
            case PDMA_IOC_STOP_DMA: {
                //err = call_ioctl(info.chrif_filp, PDMA_IOC_STOP_DMA, NULL);
                old_fs = get_fs();
                set_fs(get_ds());
                err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_STOP_DMA, NULL);
                set_fs(old_fs);
                if(err) {
                    printk(KERN_DEBUG "\nxen: Dom0: stop-dma ioctl failed");
                    resp.status = 0;
                } else  printk(KERN_DEBUG "\nxen: Dom0: stop-dma ioctl success");
                break;
            }
            case PDMA_IOC_INFO: {
                struct pdma_info pdma_info;
                //err = call_ioctl(info.chrif_filp, PDMA_IOC_INFO, (unsigned long)&pdma_info);
                old_fs = get_fs();
                set_fs(get_ds());
                err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_INFO, (unsigned long)&pdma_info);
                set_fs(old_fs);
                if(err) {
                    printk(KERN_DEBUG "\nxen: Dom0: info ioctl failed");
                    resp.status = 0;
                } else  printk(KERN_DEBUG "\nxen: Dom0: info ioctl success");
                resp.ioc_parm.info = pdma_info;
                break;
            }
            case PDMA_IOC_STAT: {
                struct pdma_stat pdma_stat;
                //err = call_ioctl(info.chrif_filp, PDMA_IOC_STAT, (unsigned long)&pdma_stat);
                old_fs = get_fs();
                set_fs(get_ds());
                err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_STAT, (unsigned long)&pdma_stat);
                set_fs(old_fs);
                if(err) {
                    printk(KERN_DEBUG "\nxen: Dom0: stat ioctl failed");
                    resp.status = 0;
                } else  printk(KERN_DEBUG "\nxen: Dom0: stat ioctl success");
                resp.ioc_parm.stat = pdma_stat;
                break;
            }
            case PDMA_IOC_RW_REG: {
                struct pdma_rw_reg ctrl = req.ioc_parm.ctrl;
                //err = call_ioctl(info.chrif_filp, PDMA_IOC_RW_REG, (unsigned long)&ctrl);
                old_fs = get_fs();
                set_fs(get_ds());
                err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_RW_REG, (unsigned long)&ctrl);
                set_fs(old_fs);
                if(err) {
                    printk(KERN_DEBUG "\nxen: Dom0: rw-reg ioctl failed");
                    resp.status = 0;
                } else  printk(KERN_DEBUG "\nxen: Dom0: rw-reg ioctl success");
                resp.ioc_parm.ctrl = ctrl;
                break;
            }
            default:
                printk(KERN_DEBUG "\nxen: Dom0: unknow the operation");
                break;
            }
            printk(KERN_INFO "\nxen: Dom0: response ioctl");
            break;
        }
        case CHRIF_OP_CLOSE:
            filp_close(info.chrif_filp, NULL);
            printk(KERN_INFO "\nxen: Dom0: response close");
            break;
        default:
            printk(KERN_DEBUG "\nxen: Dom0: unknow the operation");
            break;
        }

        memcpy(RING_GET_RESPONSE(&info.ring, info.ring.rsp_prod_pvt), &resp, sizeof(resp));
        info.ring.rsp_prod_pvt++;
        //put response and check whether or not notify domU
        RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info.ring, notify);
        if (info.ring.rsp_prod_pvt == info.ring.req_cons)
        {
            RING_FINAL_CHECK_FOR_REQUESTS(&info.ring, more_to_do);
        }
        else if (RING_HAS_UNCONSUMED_REQUESTS(&info.ring))
        {
            more_to_do = 1;
        }
        if (notify)
        {
            printk(KERN_DEBUG "\nxen:dom0:send notify to domu");
            notify_remote_via_irq(info.irq);
        }
    }
    return IRQ_HANDLED;
}
コード例 #8
0
ファイル: netback.c プロジェクト: 7799/linux
static void xenvif_rx_action(struct xenvif *vif)
{
	s8 status;
	u16 flags;
	struct xen_netif_rx_response *resp;
	struct sk_buff_head rxq;
	struct sk_buff *skb;
	LIST_HEAD(notify);
	int ret;
	unsigned long offset;
	bool need_to_notify = false;

	struct netrx_pending_operations npo = {
		.copy  = vif->grant_copy_op,
		.meta  = vif->meta,
	};

	skb_queue_head_init(&rxq);

	while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
		RING_IDX max_slots_needed;
		RING_IDX old_req_cons;
		RING_IDX ring_slots_used;
		int i;

		/* We need a cheap worse case estimate for the number of
		 * slots we'll use.
		 */

		max_slots_needed = DIV_ROUND_UP(offset_in_page(skb->data) +
						skb_headlen(skb),
						PAGE_SIZE);
		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
			unsigned int size;
			unsigned int offset;

			size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
			offset = skb_shinfo(skb)->frags[i].page_offset;

			/* For a worse-case estimate we need to factor in
			 * the fragment page offset as this will affect the
			 * number of times xenvif_gop_frag_copy() will
			 * call start_new_rx_buffer().
			 */
			max_slots_needed += DIV_ROUND_UP(offset + size,
							 PAGE_SIZE);
		}

		/* To avoid the estimate becoming too pessimal for some
		 * frontends that limit posted rx requests, cap the estimate
		 * at MAX_SKB_FRAGS.
		 */
		if (max_slots_needed > MAX_SKB_FRAGS)
			max_slots_needed = MAX_SKB_FRAGS;

		/* We may need one more slot for GSO metadata */
		if (skb_is_gso(skb) &&
		   (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
		    skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
			max_slots_needed++;

		/* If the skb may not fit then bail out now */
		if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) {
			skb_queue_head(&vif->rx_queue, skb);
			need_to_notify = true;
			vif->rx_last_skb_slots = max_slots_needed;
			break;
		} else
			vif->rx_last_skb_slots = 0;

		old_req_cons = vif->rx.req_cons;
		XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo);
		ring_slots_used = vif->rx.req_cons - old_req_cons;

		BUG_ON(ring_slots_used > max_slots_needed);

		__skb_queue_tail(&rxq, skb);
	}

	BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));

	if (!npo.copy_prod)
		goto done;

	BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
	gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);

	while ((skb = __skb_dequeue(&rxq)) != NULL) {

		if ((1 << vif->meta[npo.meta_cons].gso_type) &
		    vif->gso_prefix_mask) {
			resp = RING_GET_RESPONSE(&vif->rx,
						 vif->rx.rsp_prod_pvt++);

			resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;

			resp->offset = vif->meta[npo.meta_cons].gso_size;
			resp->id = vif->meta[npo.meta_cons].id;
			resp->status = XENVIF_RX_CB(skb)->meta_slots_used;

			npo.meta_cons++;
			XENVIF_RX_CB(skb)->meta_slots_used--;
		}


		vif->dev->stats.tx_bytes += skb->len;
		vif->dev->stats.tx_packets++;

		status = xenvif_check_gop(vif,
					  XENVIF_RX_CB(skb)->meta_slots_used,
					  &npo);

		if (XENVIF_RX_CB(skb)->meta_slots_used == 1)
			flags = 0;
		else
			flags = XEN_NETRXF_more_data;

		if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
			flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
		else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
			/* remote but checksummed. */
			flags |= XEN_NETRXF_data_validated;

		offset = 0;
		resp = make_rx_response(vif, vif->meta[npo.meta_cons].id,
					status, offset,
					vif->meta[npo.meta_cons].size,
					flags);

		if ((1 << vif->meta[npo.meta_cons].gso_type) &
		    vif->gso_mask) {
			struct xen_netif_extra_info *gso =
				(struct xen_netif_extra_info *)
				RING_GET_RESPONSE(&vif->rx,
						  vif->rx.rsp_prod_pvt++);

			resp->flags |= XEN_NETRXF_extra_info;

			gso->u.gso.type = vif->meta[npo.meta_cons].gso_type;
			gso->u.gso.size = vif->meta[npo.meta_cons].gso_size;
			gso->u.gso.pad = 0;
			gso->u.gso.features = 0;

			gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
			gso->flags = 0;
		}

		xenvif_add_frag_responses(vif, status,
					  vif->meta + npo.meta_cons + 1,
					  XENVIF_RX_CB(skb)->meta_slots_used);

		RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);

		need_to_notify |= !!ret;

		npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used;
		dev_kfree_skb(skb);
	}

done:
	if (need_to_notify)
		notify_remote_via_irq(vif->rx_irq);
}

void xenvif_check_rx_xenvif(struct xenvif *vif)
{
	int more_to_do;

	RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);

	if (more_to_do)
		napi_schedule(&vif->napi);
}
コード例 #9
0
ファイル: fs-backend.c プロジェクト: tumf/xen-3.3.1
static void *handle_mount(void *data)
{
    int more, notify;
    struct fs_mount *mount = (struct fs_mount *)data;
    
    printf("Starting a thread for mount: %d\n", mount->mount_id);
    allocate_request_array(mount);

    for(;;)
    {
        int nr_consumed=0;
        RING_IDX cons, rp;
        struct fsif_request *req;

        handle_aio_events(mount);
moretodo:
        rp = mount->ring.sring->req_prod;
        xen_rmb(); /* Ensure we see queued requests up to 'rp'. */

        while ((cons = mount->ring.req_cons) != rp)
        {
            int i;
            struct fs_op *op;

            printf("Got a request at %d (of %d)\n", 
                    cons, RING_SIZE(&mount->ring));
            req = RING_GET_REQUEST(&mount->ring, cons);
            printf("Request type=%d\n", req->type); 
            for(i=0;;i++)
            {
                op = fsops[i];
                if(op == NULL)
                {
                    /* We've reached the end of the array, no appropirate
                     * handler found. Warn, ignore and continue. */
                    printf("WARN: Unknown request type: %d\n", req->type);
                    mount->ring.req_cons++; 
                    break;
                }
                if(op->type == req->type)
                {
                    /* There needs to be a dispatch handler */
                    assert(op->dispatch_handler != NULL);
                    op->dispatch_handler(mount, req);
                    break;
                }
             }

            nr_consumed++;
        }
        printf("Backend consumed: %d requests\n", nr_consumed);
        RING_FINAL_CHECK_FOR_REQUESTS(&mount->ring, more);
        if(more) goto moretodo;

        RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&mount->ring, notify);
        printf("Pushed responces and notify=%d\n", notify);
        if(notify)
            xc_evtchn_notify(mount->evth, mount->local_evtchn);
    }
 
    printf("Destroying thread for mount: %d\n", mount->mount_id);
    xc_gnttab_munmap(mount->gnth, mount->ring.sring, 1);
    xc_gnttab_close(mount->gnth);
    xc_evtchn_unbind(mount->evth, mount->local_evtchn);
    xc_evtchn_close(mount->evth);
    free(mount->frontend);
    pthread_exit(NULL);
}
コード例 #10
0
static irqreturn_t chrif_int(int irq, void *dev_id)
{
    int err;
	RING_IDX rc, rp;
    int more_to_do, notify;
    chrif_request_t req;
    chrif_response_t resp;
	printk(KERN_INFO "\n------------------------------start response-------------------------------------");
    printk(KERN_DEBUG "\nxen: Dom0: chrif_int called with dev_id=%x info=%x", (unsigned int)dev_id, (unsigned int) &info);
    rc = info.ring.req_cons;
    rp = info.ring.sring->req_prod;
    printk(KERN_DEBUG "\nxen: Dom0: rc = %d rp = %d", rc, rp);

    while (rc != rp) {
        if (RING_REQUEST_CONS_OVERFLOW(&info.ring, rc))
           	break;
       	memcpy(&req, RING_GET_REQUEST(&info.ring, rc), sizeof(req));
       	resp.id = req.id;
        resp.operation = req.operation;
        resp.status = req.status + 1;
        printk(KERN_DEBUG "\nxen: Dom0: Recvd at IDX-%d: id = %d, op=%d, status=%d", rc, req.id, req.operation, req.status);
        info.ring.req_cons = ++rc;
        barrier();
                
		printk(KERN_DEBUG "\nxen: Dom0: operation:  %s", op_name(resp.operation));
      	switch(resp.operation) {
            case CHRIF_OP_OPEN:
                info.chrif_filp = filp_open(DEVICE_PATH, O_RDWR, 0);
				printk(KERN_DEBUG "\nxen: dom0: response open");
                break;
            case CHRIF_OP_READ:{
				int cnt;
                resp.rdwr.len = req.rdwr.len;
				cnt = resp.rdwr.len/4096;
                printk(KERN_DEBUG "\nxen: dom0: read %d times", cnt);
				memset(op_page->addr, 0, 4096);
              if(rd_time == 0){
				old_fs = get_fs();
				set_fs(get_ds());
			    //read data from device to page 
				err =info.chrif_filp->f_op->read(info.chrif_filp, block_buf, resp.rdwr.len, &info.chrif_filp->f_pos);	
				set_fs(old_fs);
                if(err < 0)
					printk(KERN_DEBUG "\nxen: Dom0: read %u bytes error", resp.rdwr.len);
			  }	
                memcpy(op_page->addr, block_buf+rd_time*4096, 4096);
                rd_time++;
                if(rd_time == cnt){
                    rd_time = 0;
                    memset(block_buf, 0, resp.rdwr.len);
                }
                printk(KERN_DEBUG "\nxen: dom0: response read");
				break;
            }
            case CHRIF_OP_WRITE:{
				int count;
                resp.rdwr.len = req.rdwr.len;
				count = resp.rdwr.len/4096;
                printk(KERN_DEBUG "\nxen: dom0: write %d times", count);
                //if(count == 0){ block_buf = (char *)kmalloc(resp.rdwr.len, GFP_KERNEL);}
                memcpy(block_buf+wr_time*4096, op_page->addr, 4096);
                wr_time++;
              if(wr_time == count){
                old_fs = get_fs();
				set_fs(get_ds());
				//write data from page to device  
				err = info.chrif_filp->f_op->write(info.chrif_filp, block_buf, resp.rdwr.len, &info.chrif_filp->f_pos);	
				set_fs(old_fs);
                wr_time = 0;
                if(err < 0)
					printk(KERN_DEBUG "\nxen: Dom0: write %u bytes error", resp.rdwr.len);
                memset(block_buf, 0, resp.rdwr.len);
			  }	
                //kfree(block_buf);
                printk(KERN_DEBUG "\nxen: dom0: response write");
                break;
            }    
			case CHRIF_OP_IOCTL:{
				resp.ioc_parm.cmd = req.ioc_parm.cmd;
				switch(resp.ioc_parm.cmd){
					case PDMA_IOC_START_DMA:{
						old_fs = get_fs();
						set_fs(get_ds());
						err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_START_DMA, NULL);	
						set_fs(old_fs);
					 	if(err){
							printk(KERN_DEBUG "\nxen: Dom0: start-dma ioctl failed");
							resp.status = 0;
						}else  printk(KERN_DEBUG "\nxen: Dom0: start-dma ioctl success");
						//err = call_ioctl(info.chrif_filp, PDMA_IOC_START_DMA, NULL);	
                        get_block_info();
						break;
					}
					case PDMA_IOC_STOP_DMA:{
						//err = call_ioctl(info.chrif_filp, PDMA_IOC_STOP_DMA, NULL);	
						old_fs = get_fs();
						set_fs(get_ds());
						err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_STOP_DMA, NULL);	
						set_fs(old_fs);
					 	if(err){
							printk(KERN_DEBUG "\nxen: Dom0: stop-dma ioctl failed");
							resp.status = 0;
						}else  printk(KERN_DEBUG "\nxen: Dom0: stop-dma ioctl success");
						break;
					}
					case PDMA_IOC_INFO:{
						struct pdma_info pdma_info;
						//err = call_ioctl(info.chrif_filp, PDMA_IOC_INFO, (unsigned long)&pdma_info);	
						old_fs = get_fs();
						set_fs(get_ds());
						err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_INFO, (unsigned long)&pdma_info);	
						set_fs(old_fs);
					 	if(err){
							printk(KERN_DEBUG "\nxen: Dom0: info ioctl failed");
							resp.status = 0;
						}else  printk(KERN_DEBUG "\nxen: Dom0: info ioctl success");
						resp.ioc_parm.info = pdma_info;
						break;
					}
					case PDMA_IOC_STAT:{
						struct pdma_stat pdma_stat;
						//err = call_ioctl(info.chrif_filp, PDMA_IOC_STAT, (unsigned long)&pdma_stat);	
						old_fs = get_fs();
						set_fs(get_ds());
						err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_STAT, (unsigned long)&pdma_stat);	
						set_fs(old_fs);
					 	if(err){
							printk(KERN_DEBUG "\nxen: Dom0: stat ioctl failed");
							resp.status = 0;
						}else  printk(KERN_DEBUG "\nxen: Dom0: stat ioctl success");
						resp.ioc_parm.stat = pdma_stat;
						break;
					}
					case PDMA_IOC_RW_REG:{
						struct pdma_rw_reg ctrl = req.ioc_parm.ctrl;
						//err = call_ioctl(info.chrif_filp, PDMA_IOC_RW_REG, (unsigned long)&ctrl);	
						old_fs = get_fs();
						set_fs(get_ds());
						err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_RW_REG, (unsigned long)&ctrl);	
						set_fs(old_fs);
					 	if(err){
							printk(KERN_DEBUG "\nxen: Dom0: rw-reg ioctl failed");
							resp.status = 0;
						}else  printk(KERN_DEBUG "\nxen: Dom0: rw-reg ioctl success");
						resp.ioc_parm.ctrl = ctrl;
						break;
					}
                    default:
                        printk(KERN_DEBUG "\nxen: Dom0: unknow the operation");
                        break;
				}
				printk(KERN_INFO "\nxen: Dom0: response ioctl");
				break;
			}
			case CHRIF_OP_CLOSE:
				filp_close(info.chrif_filp, NULL);
				printk(KERN_INFO "\nxen: Dom0: response close");
				break;
            default:
                printk(KERN_DEBUG "\nxen: Dom0: unknow the operation");
                break;
        }

		memcpy(RING_GET_RESPONSE(&info.ring, info.ring.rsp_prod_pvt), &resp, sizeof(resp));
       	info.ring.rsp_prod_pvt++;
		//put response and check whether or not notify domU
        RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info.ring, notify);
        if (info.ring.rsp_prod_pvt == info.ring.req_cons) 
		{
            RING_FINAL_CHECK_FOR_REQUESTS(&info.ring, more_to_do);
       	}
		else if (RING_HAS_UNCONSUMED_REQUESTS(&info.ring)) 
		{
           	more_to_do = 1;
        }
        if (notify) 
		{
          	printk(KERN_DEBUG "\nxen:dom0:send notify to domu");
          	notify_remote_via_irq(info.irq);
       	}
    }
    return IRQ_HANDLED;
}