Example #1
0
static void blk_handle_requests(struct XenBlkDev *blkdev)
{
    RING_IDX rc, rp;
    struct ioreq *ioreq;

    blkdev->more_work = 0;

    rc = blkdev->rings.common.req_cons;
    rp = blkdev->rings.common.sring->req_prod;
    xen_rmb(); /* Ensure we see queued requests up to 'rp'. */

    blk_send_response_all(blkdev);
    while (rc != rp) {
        /* pull request from ring */
        if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
            break;
        }
        ioreq = ioreq_start(blkdev);
        if (ioreq == NULL) {
            blkdev->more_work++;
            break;
        }
        blk_get_request(blkdev, ioreq, rc);
        blkdev->rings.common.req_cons = ++rc;

        /* parse them */
        if (ioreq_parse(ioreq) != 0) {

            switch (ioreq->req.operation) {
            case BLKIF_OP_READ:
                block_acct_invalid(blk_get_stats(blkdev->blk),
                                   BLOCK_ACCT_READ);
                break;
            case BLKIF_OP_WRITE:
                block_acct_invalid(blk_get_stats(blkdev->blk),
                                   BLOCK_ACCT_WRITE);
                break;
            case BLKIF_OP_FLUSH_DISKCACHE:
                block_acct_invalid(blk_get_stats(blkdev->blk),
                                   BLOCK_ACCT_FLUSH);
            default:
                break;
            };

            if (blk_send_response_one(ioreq)) {
                xen_pv_send_notify(&blkdev->xendev);
            }
            ioreq_release(ioreq, false);
            continue;
        }

        ioreq_runio_qemu_aio(ioreq);
    }

    if (blkdev->more_work && blkdev->requests_inflight < blkdev->max_requests) {
        qemu_bh_schedule(blkdev->bh);
    }
}
Example #2
0
static void blk_handle_requests(struct XenBlkDev *blkdev)
{
    RING_IDX rc, rp;
    struct ioreq *ioreq;

    blkdev->more_work = 0;

    rc = blkdev->rings.common.req_cons;
    rp = blkdev->rings.common.sring->req_prod;
    xen_rmb(); /* Ensure we see queued requests up to 'rp'. */

    if (use_aio) {
        blk_send_response_all(blkdev);
    }
    while (rc != rp) {
        /* pull request from ring */
        if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
            break;
        }
        ioreq = ioreq_start(blkdev);
        if (ioreq == NULL) {
            blkdev->more_work++;
            break;
        }
        blk_get_request(blkdev, ioreq, rc);
        blkdev->rings.common.req_cons = ++rc;

        /* parse them */
        if (ioreq_parse(ioreq) != 0) {
            if (blk_send_response_one(ioreq)) {
                xen_be_send_notify(&blkdev->xendev);
            }
            ioreq_release(ioreq);
            continue;
        }

        if (use_aio) {
            /* run i/o in aio mode */
            ioreq_runio_qemu_aio(ioreq);
        } else {
            /* run i/o in sync mode */
            ioreq_runio_qemu_sync(ioreq);
        }
    }
    if (!use_aio) {
        blk_send_response_all(blkdev);
    }

    if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
        qemu_bh_schedule(blkdev->bh);
    }
}
Example #3
0
static ssize_t net_rx_packet(VLANClientState *nc, const uint8_t *buf, size_t size)
{
    struct XenNetDev *netdev = DO_UPCAST(NICState, nc, nc)->opaque;
    netif_rx_request_t rxreq;
    RING_IDX rc, rp;
    void *page;

    if (netdev->xendev.be_state != XenbusStateConnected) {
        return -1;
    }

    rc = netdev->rx_ring.req_cons;
    rp = netdev->rx_ring.sring->req_prod;
    xen_rmb(); /* Ensure we see queued requests up to 'rp'. */

    if (rc == rp || RING_REQUEST_CONS_OVERFLOW(&netdev->rx_ring, rc)) {
        xen_be_printf(&netdev->xendev, 2, "no buffer, drop packet\n");
        return -1;
    }
    if (size > XC_PAGE_SIZE - NET_IP_ALIGN) {
        xen_be_printf(&netdev->xendev, 0, "packet too big (%lu > %ld)",
                      (unsigned long)size, XC_PAGE_SIZE - NET_IP_ALIGN);
        return -1;
    }

    memcpy(&rxreq, RING_GET_REQUEST(&netdev->rx_ring, rc), sizeof(rxreq));
    netdev->rx_ring.req_cons = ++rc;

    page = xc_gnttab_map_grant_ref(netdev->xendev.gnttabdev,
                                   netdev->xendev.dom,
                                   rxreq.gref, PROT_WRITE);
    if (page == NULL) {
        xen_be_printf(&netdev->xendev, 0, "error: rx gref dereference failed (%d)\n",
                      rxreq.gref);
        net_rx_response(netdev, &rxreq, NETIF_RSP_ERROR, 0, 0, 0);
        return -1;
    }
    memcpy(page + NET_IP_ALIGN, buf, size);
    xc_gnttab_munmap(netdev->xendev.gnttabdev, page, 1);
    net_rx_response(netdev, &rxreq, NETIF_RSP_OKAY, NET_IP_ALIGN, size, 0);

    return size;
}
Example #4
0
static int net_rx_ok(VLANClientState *vc)
{
    struct XenNetDev *netdev = vc->opaque;
    RING_IDX rc, rp;

    if (netdev->xendev.be_state != XenbusStateConnected)
	return 0;

    rc = netdev->rx_ring.req_cons;
    rp = netdev->rx_ring.sring->req_prod;
    xen_rmb();

    if (rc == rp || RING_REQUEST_CONS_OVERFLOW(&netdev->rx_ring, rc)) {
	xen_be_printf(&netdev->xendev, 2, "%s: no rx buffers (%d/%d)\n",
		      __FUNCTION__, rc, rp);
	return 0;
    }
    return 1;
}
Example #5
0
static irqreturn_t as_int(int irq, void *dev_id)
{
    RING_IDX rc, rp;
    as_request_t req;
    as_response_t resp;
    int more_to_do, notify;
    printk(KERN_DEBUG "\nxen:Dom0: as_int called with dev_id=%x info=%x", (unsigned int)dev_id, (unsigned int) &info);
    rc = info.ring.req_cons;
    rp = info.ring.sring->req_prod;
    printk(KERN_DEBUG " rc = %d rp = %d", rc, rp);
    while (rc != rp) {
       if (RING_REQUEST_CONS_OVERFLOW(&info.ring, rc))
           break;
       memcpy(&req, RING_GET_REQUEST(&info.ring, rc), sizeof(req));
       resp.id = req.id;
       resp.operation = req.operation;
       resp.status = req.status + 1;
       printk(KERN_DEBUG "\nxen:Dom0:Recvd at IDX-%d: id = %d, op=%d, status=%d", rc, req.id, req.operation, req.status);
       info.ring.req_cons = ++rc;
       barrier();
       switch(req.operation) {
          case 0:
              printk(KERN_DEBUG "\nxen:dom0:req.operation = 0");
              break;
          default:
              printk(KERN_DEBUG "\nxen:dom0:req.operation = %d", req.operation);
              break;
       }
      memcpy(RING_GET_RESPONSE(&info.ring, info.ring.rsp_prod_pvt), &resp, sizeof(resp));
      info.ring.rsp_prod_pvt++;
      RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info.ring, notify);
      if (info.ring.rsp_prod_pvt == info.ring.req_cons) {
          RING_FINAL_CHECK_FOR_REQUESTS(&info.ring, more_to_do);
       } else if (RING_HAS_UNCONSUMED_REQUESTS(&info.ring)) {
          more_to_do = 1;
       }
       if (notify) {
          printk(KERN_DEBUG "\nxen:dom0:send notify to domu");
          notify_remote_via_irq(info.irq);
       }
    }
    return IRQ_HANDLED;
}
static irqreturn_t chrif_int(int irq, void *dev_id)
{
    int err;
    RING_IDX rc, rp;
    int more_to_do, notify;
    chrif_request_t req;
    chrif_response_t resp;
    printk(KERN_INFO "\n------------------------------start response-------------------------------------");
    printk(KERN_DEBUG "\nxen: Dom0: chrif_int called with dev_id=%x info=%x", (unsigned int)dev_id, (unsigned int) &info);
    rc = info.ring.req_cons;
    rp = info.ring.sring->req_prod;
    printk(KERN_DEBUG "\nxen: Dom0: rc = %d rp = %d", rc, rp);

    while (rc != rp) {
        if (RING_REQUEST_CONS_OVERFLOW(&info.ring, rc))
            break;
        memcpy(&req, RING_GET_REQUEST(&info.ring, rc), sizeof(req));
        resp.id = req.id;
        resp.operation = req.operation;
        resp.status = req.status + 1;
        printk(KERN_DEBUG "\nxen: Dom0: Recvd at IDX-%d: id = %d, op=%d, status=%d", rc, req.id, req.operation, req.status);
        info.ring.req_cons = ++rc;
        barrier();

        printk(KERN_DEBUG "\nxen: Dom0: operation:  %s", op_name(resp.operation));
        switch(resp.operation) {
        case CHRIF_OP_OPEN:
            info.chrif_filp = filp_open(DEVICE_PATH, O_RDWR, 0);
            printk(KERN_DEBUG "\nxen: dom0: response open");
            break;
        case CHRIF_OP_READ: {
            resp.rdwr.len = req.rdwr.len;
            //struct pdma_info pdma_info;
            //memset(op_page->addr, 0, resp.rdwr.len);
            old_fs = get_fs();
            set_fs(get_ds());
            //get read size of block
            //err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_INFO, (unsigned long)&pdma_info);
            //read data from device to page
            //err =info.chrif_filp->f_op->read(info.chrif_filp, op_page->addr, resp.rdwr.len, &info.chrif_filp->f_pos);
            set_fs(old_fs);
            if(err < 0)
                printk(KERN_DEBUG "\nxen: Dom0: read %u bytes error", resp.rdwr.len);
            printk(KERN_DEBUG "\nxen: dom0: response read");
            break;
        }
        case CHRIF_OP_WRITE: {
            int i = 0, count, ret;
            struct vm_struct *op_page;
            struct gnttab_map_grant_ref op_page_ops;
            struct gnttab_unmap_grant_ref op_page_unmap_ops;
            resp.rdwr.len = req.rdwr.len;

            count = resp.rdwr.len/4096;
            printk(KERN_DEBUG "\nxen: Dom0: write %u bytes %d times", resp.rdwr.len, count);

            block_buf = (char *)kmalloc(resp.rdwr.len, GFP_KERNEL);
            memset(block_buf, 0, resp.rdwr.len);

            while(i < count) {
                resp.op_gref[i] = req.op_gref[i];
                printk(KERN_DEBUG "\nxen: dom0: req.op_gref[0]: %d", resp.op_gref[i]);

                op_page = alloc_vm_area(PAGE_SIZE, NULL);
                if(op_page == 0) {
                    free_vm_area(op_page);
                    printk("\nxen: dom0: could not allocate shared_page");
                    return -EFAULT;
                }
                /*gnttab_set_map_op(&op_page_ops, (unsigned long)op_page->addr, GNTMAP_host_map, resp.op_gref[i], info.remoteDomain);

                 op_page_unmap_ops.host_addr = (unsigned long)(op_page->addr);
                 unmap_ops.handle = op_page_ops.handle;
                 if(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op_page_ops, 1)){
                     printk(KERN_DEBUG "\nxen: dom0: HYPERVISOR map grant ref failed");
                     return -EFAULT;
                 }
                 if (op_page_ops.status) {
                     printk(KERN_DEBUG "\nxen: dom0: HYPERVISOR map grant ref failed status = %d", op_page_ops.status);
                     return -EFAULT;
                 }
                 printk(KERN_DEBUG "\nxen: dom0: map shared page success, shared_page=%x, handle = %x, status = %x", (unsigned int)op_page->addr, op_page_ops.handle, op_page_ops.status);

                 memcpy(block_buf+i*4096, op_page->addr, 4096);
                 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op_page_unmap_ops, 1);
                 if (ret == 0) {
                     printk(KERN_DEBUG "\nxen: dom0: dom0_exit: unmapped shared frame");
                 } else {
                     printk(KERN_DEBUG "\nxen: dom0: dom0_exit: unmapped shared frame failed");
                 }
                 free_vm_area(op_page);*/
                i++;
            }

            /*  old_fs = get_fs();
            set_fs(get_ds());
            //write data from page to device
            //err = info.chrif_filp->f_op->write(info.chrif_filp, block_buf, resp.rdwr.len, &info.chrif_filp->f_pos);
            set_fs(old_fs);
              if(err < 0)
            	printk(KERN_DEBUG "\nxen: Dom0: write %u bytes error", resp.rdwr.len);

              */ //kfree(block_buf);
            printk(KERN_DEBUG "\nxen: dom0: response write");
            break;
        }
        case CHRIF_OP_IOCTL: {
            resp.ioc_parm.cmd = req.ioc_parm.cmd;
            switch(resp.ioc_parm.cmd) {
            case PDMA_IOC_START_DMA: {
                old_fs = get_fs();
                set_fs(get_ds());
                err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_START_DMA, NULL);
                set_fs(old_fs);
                if(err) {
                    printk(KERN_DEBUG "\nxen: Dom0: start-dma ioctl failed");
                    resp.status = 0;
                } else  printk(KERN_DEBUG "\nxen: Dom0: start-dma ioctl success");
                //err = call_ioctl(info.chrif_filp, PDMA_IOC_START_DMA, NULL);
                break;
            }
            case PDMA_IOC_STOP_DMA: {
                //err = call_ioctl(info.chrif_filp, PDMA_IOC_STOP_DMA, NULL);
                old_fs = get_fs();
                set_fs(get_ds());
                err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_STOP_DMA, NULL);
                set_fs(old_fs);
                if(err) {
                    printk(KERN_DEBUG "\nxen: Dom0: stop-dma ioctl failed");
                    resp.status = 0;
                } else  printk(KERN_DEBUG "\nxen: Dom0: stop-dma ioctl success");
                break;
            }
            case PDMA_IOC_INFO: {
                struct pdma_info pdma_info;
                //err = call_ioctl(info.chrif_filp, PDMA_IOC_INFO, (unsigned long)&pdma_info);
                old_fs = get_fs();
                set_fs(get_ds());
                err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_INFO, (unsigned long)&pdma_info);
                set_fs(old_fs);
                if(err) {
                    printk(KERN_DEBUG "\nxen: Dom0: info ioctl failed");
                    resp.status = 0;
                } else  printk(KERN_DEBUG "\nxen: Dom0: info ioctl success");
                resp.ioc_parm.info = pdma_info;
                break;
            }
            case PDMA_IOC_STAT: {
                struct pdma_stat pdma_stat;
                //err = call_ioctl(info.chrif_filp, PDMA_IOC_STAT, (unsigned long)&pdma_stat);
                old_fs = get_fs();
                set_fs(get_ds());
                err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_STAT, (unsigned long)&pdma_stat);
                set_fs(old_fs);
                if(err) {
                    printk(KERN_DEBUG "\nxen: Dom0: stat ioctl failed");
                    resp.status = 0;
                } else  printk(KERN_DEBUG "\nxen: Dom0: stat ioctl success");
                resp.ioc_parm.stat = pdma_stat;
                break;
            }
            case PDMA_IOC_RW_REG: {
                struct pdma_rw_reg ctrl = req.ioc_parm.ctrl;
                //err = call_ioctl(info.chrif_filp, PDMA_IOC_RW_REG, (unsigned long)&ctrl);
                old_fs = get_fs();
                set_fs(get_ds());
                err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_RW_REG, (unsigned long)&ctrl);
                set_fs(old_fs);
                if(err) {
                    printk(KERN_DEBUG "\nxen: Dom0: rw-reg ioctl failed");
                    resp.status = 0;
                } else  printk(KERN_DEBUG "\nxen: Dom0: rw-reg ioctl success");
                resp.ioc_parm.ctrl = ctrl;
                break;
            }
            default:
                printk(KERN_DEBUG "\nxen: Dom0: unknow the operation");
                break;
            }
            printk(KERN_INFO "\nxen: Dom0: response ioctl");
            break;
        }
        case CHRIF_OP_CLOSE:
            filp_close(info.chrif_filp, NULL);
            printk(KERN_INFO "\nxen: Dom0: response close");
            break;
        default:
            printk(KERN_DEBUG "\nxen: Dom0: unknow the operation");
            break;
        }

        memcpy(RING_GET_RESPONSE(&info.ring, info.ring.rsp_prod_pvt), &resp, sizeof(resp));
        info.ring.rsp_prod_pvt++;
        //put response and check whether or not notify domU
        RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info.ring, notify);
        if (info.ring.rsp_prod_pvt == info.ring.req_cons)
        {
            RING_FINAL_CHECK_FOR_REQUESTS(&info.ring, more_to_do);
        }
        else if (RING_HAS_UNCONSUMED_REQUESTS(&info.ring))
        {
            more_to_do = 1;
        }
        if (notify)
        {
            printk(KERN_DEBUG "\nxen:dom0:send notify to domu");
            notify_remote_via_irq(info.irq);
        }
    }
    return IRQ_HANDLED;
}
Example #7
0
static void net_tx_packets(struct XenNetDev *netdev)
{
    netif_tx_request_t txreq;
    RING_IDX rc, rp;
    void *page;
    void *tmpbuf = NULL;

    for (;;) {
        rc = netdev->tx_ring.req_cons;
        rp = netdev->tx_ring.sring->req_prod;
        xen_rmb(); /* Ensure we see queued requests up to 'rp'. */

        while ((rc != rp)) {
            if (RING_REQUEST_CONS_OVERFLOW(&netdev->tx_ring, rc)) {
                break;
            }
            memcpy(&txreq, RING_GET_REQUEST(&netdev->tx_ring, rc), sizeof(txreq));
            netdev->tx_ring.req_cons = ++rc;

#if 1
            /* should not happen in theory, we don't announce the *
             * feature-{sg,gso,whatelse} flags in xenstore (yet?) */
            if (txreq.flags & NETTXF_extra_info) {
                xen_be_printf(&netdev->xendev, 0, "FIXME: extra info flag\n");
                net_tx_error(netdev, &txreq, rc);
                continue;
            }
            if (txreq.flags & NETTXF_more_data) {
                xen_be_printf(&netdev->xendev, 0, "FIXME: more data flag\n");
                net_tx_error(netdev, &txreq, rc);
                continue;
            }
#endif

            if (txreq.size < 14) {
                xen_be_printf(&netdev->xendev, 0, "bad packet size: %d\n", txreq.size);
                net_tx_error(netdev, &txreq, rc);
                continue;
            }

            if ((txreq.offset + txreq.size) > XC_PAGE_SIZE) {
                xen_be_printf(&netdev->xendev, 0, "error: page crossing\n");
                net_tx_error(netdev, &txreq, rc);
                continue;
            }

            xen_be_printf(&netdev->xendev, 3, "tx packet ref %d, off %d, len %d, flags 0x%x%s%s%s%s\n",
                          txreq.gref, txreq.offset, txreq.size, txreq.flags,
                          (txreq.flags & NETTXF_csum_blank)     ? " csum_blank"     : "",
                          (txreq.flags & NETTXF_data_validated) ? " data_validated" : "",
                          (txreq.flags & NETTXF_more_data)      ? " more_data"      : "",
                          (txreq.flags & NETTXF_extra_info)     ? " extra_info"     : "");

            page = xc_gnttab_map_grant_ref(netdev->xendev.gnttabdev,
                                           netdev->xendev.dom,
                                           txreq.gref, PROT_READ);
            if (page == NULL) {
                xen_be_printf(&netdev->xendev, 0, "error: tx gref dereference failed (%d)\n",
                              txreq.gref);
                net_tx_error(netdev, &txreq, rc);
                continue;
            }
            if (txreq.flags & NETTXF_csum_blank) {
                /* have read-only mapping -> can't fill checksum in-place */
                if (!tmpbuf) {
                    tmpbuf = g_malloc(XC_PAGE_SIZE);
                }
                memcpy(tmpbuf, page + txreq.offset, txreq.size);
                net_checksum_calculate(tmpbuf, txreq.size);
                qemu_send_packet(&netdev->nic->nc, tmpbuf, txreq.size);
            } else {
                qemu_send_packet(&netdev->nic->nc, page + txreq.offset, txreq.size);
            }
            xc_gnttab_munmap(netdev->xendev.gnttabdev, page, 1);
            net_tx_response(netdev, &txreq, NETIF_RSP_OKAY);
        }
        if (!netdev->tx_work) {
            break;
        }
        netdev->tx_work = 0;
    }
    g_free(tmpbuf);
}
Example #8
0
static irqreturn_t chrif_int(int irq, void *dev_id)
{
    int err;
	RING_IDX rc, rp;
    int more_to_do, notify;
    chrif_request_t req;
    chrif_response_t resp;
	printk(KERN_INFO "\n------------------------------start response-------------------------------------");
    printk(KERN_DEBUG "\nxen: Dom0: chrif_int called with dev_id=%x info=%x", (unsigned int)dev_id, (unsigned int) &info);
    rc = info.ring.req_cons;
    rp = info.ring.sring->req_prod;
    printk(KERN_DEBUG "\nxen: Dom0: rc = %d rp = %d", rc, rp);

    while (rc != rp) {
        if (RING_REQUEST_CONS_OVERFLOW(&info.ring, rc))
           	break;
       	memcpy(&req, RING_GET_REQUEST(&info.ring, rc), sizeof(req));
       	resp.id = req.id;
        resp.operation = req.operation;
        resp.status = req.status + 1;
        printk(KERN_DEBUG "\nxen: Dom0: Recvd at IDX-%d: id = %d, op=%d, status=%d", rc, req.id, req.operation, req.status);
        info.ring.req_cons = ++rc;
        barrier();
                
		printk(KERN_DEBUG "\nxen: Dom0: operation:  %s", op_name(resp.operation));
      	switch(resp.operation) {
            case CHRIF_OP_OPEN:
                info.chrif_filp = filp_open(DEVICE_PATH, O_RDWR, 0);
				printk(KERN_DEBUG "\nxen: dom0: response open");
                break;
            case CHRIF_OP_READ:{
				int cnt;
                resp.rdwr.len = req.rdwr.len;
				cnt = resp.rdwr.len/4096;
                printk(KERN_DEBUG "\nxen: dom0: read %d times", cnt);
				memset(op_page->addr, 0, 4096);
              if(rd_time == 0){
				old_fs = get_fs();
				set_fs(get_ds());
			    //read data from device to page 
				err =info.chrif_filp->f_op->read(info.chrif_filp, block_buf, resp.rdwr.len, &info.chrif_filp->f_pos);	
				set_fs(old_fs);
                if(err < 0)
					printk(KERN_DEBUG "\nxen: Dom0: read %u bytes error", resp.rdwr.len);
			  }	
                memcpy(op_page->addr, block_buf+rd_time*4096, 4096);
                rd_time++;
                if(rd_time == cnt){
                    rd_time = 0;
                    memset(block_buf, 0, resp.rdwr.len);
                }
                printk(KERN_DEBUG "\nxen: dom0: response read");
				break;
            }
            case CHRIF_OP_WRITE:{
				int count;
                resp.rdwr.len = req.rdwr.len;
				count = resp.rdwr.len/4096;
                printk(KERN_DEBUG "\nxen: dom0: write %d times", count);
                //if(count == 0){ block_buf = (char *)kmalloc(resp.rdwr.len, GFP_KERNEL);}
                memcpy(block_buf+wr_time*4096, op_page->addr, 4096);
                wr_time++;
              if(wr_time == count){
                old_fs = get_fs();
				set_fs(get_ds());
				//write data from page to device  
				err = info.chrif_filp->f_op->write(info.chrif_filp, block_buf, resp.rdwr.len, &info.chrif_filp->f_pos);	
				set_fs(old_fs);
                wr_time = 0;
                if(err < 0)
					printk(KERN_DEBUG "\nxen: Dom0: write %u bytes error", resp.rdwr.len);
                memset(block_buf, 0, resp.rdwr.len);
			  }	
                //kfree(block_buf);
                printk(KERN_DEBUG "\nxen: dom0: response write");
                break;
            }    
			case CHRIF_OP_IOCTL:{
				resp.ioc_parm.cmd = req.ioc_parm.cmd;
				switch(resp.ioc_parm.cmd){
					case PDMA_IOC_START_DMA:{
						old_fs = get_fs();
						set_fs(get_ds());
						err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_START_DMA, NULL);	
						set_fs(old_fs);
					 	if(err){
							printk(KERN_DEBUG "\nxen: Dom0: start-dma ioctl failed");
							resp.status = 0;
						}else  printk(KERN_DEBUG "\nxen: Dom0: start-dma ioctl success");
						//err = call_ioctl(info.chrif_filp, PDMA_IOC_START_DMA, NULL);	
                        get_block_info();
						break;
					}
					case PDMA_IOC_STOP_DMA:{
						//err = call_ioctl(info.chrif_filp, PDMA_IOC_STOP_DMA, NULL);	
						old_fs = get_fs();
						set_fs(get_ds());
						err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_STOP_DMA, NULL);	
						set_fs(old_fs);
					 	if(err){
							printk(KERN_DEBUG "\nxen: Dom0: stop-dma ioctl failed");
							resp.status = 0;
						}else  printk(KERN_DEBUG "\nxen: Dom0: stop-dma ioctl success");
						break;
					}
					case PDMA_IOC_INFO:{
						struct pdma_info pdma_info;
						//err = call_ioctl(info.chrif_filp, PDMA_IOC_INFO, (unsigned long)&pdma_info);	
						old_fs = get_fs();
						set_fs(get_ds());
						err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_INFO, (unsigned long)&pdma_info);	
						set_fs(old_fs);
					 	if(err){
							printk(KERN_DEBUG "\nxen: Dom0: info ioctl failed");
							resp.status = 0;
						}else  printk(KERN_DEBUG "\nxen: Dom0: info ioctl success");
						resp.ioc_parm.info = pdma_info;
						break;
					}
					case PDMA_IOC_STAT:{
						struct pdma_stat pdma_stat;
						//err = call_ioctl(info.chrif_filp, PDMA_IOC_STAT, (unsigned long)&pdma_stat);	
						old_fs = get_fs();
						set_fs(get_ds());
						err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_STAT, (unsigned long)&pdma_stat);	
						set_fs(old_fs);
					 	if(err){
							printk(KERN_DEBUG "\nxen: Dom0: stat ioctl failed");
							resp.status = 0;
						}else  printk(KERN_DEBUG "\nxen: Dom0: stat ioctl success");
						resp.ioc_parm.stat = pdma_stat;
						break;
					}
					case PDMA_IOC_RW_REG:{
						struct pdma_rw_reg ctrl = req.ioc_parm.ctrl;
						//err = call_ioctl(info.chrif_filp, PDMA_IOC_RW_REG, (unsigned long)&ctrl);	
						old_fs = get_fs();
						set_fs(get_ds());
						err = info.chrif_filp->f_op->unlocked_ioctl(info.chrif_filp, PDMA_IOC_RW_REG, (unsigned long)&ctrl);	
						set_fs(old_fs);
					 	if(err){
							printk(KERN_DEBUG "\nxen: Dom0: rw-reg ioctl failed");
							resp.status = 0;
						}else  printk(KERN_DEBUG "\nxen: Dom0: rw-reg ioctl success");
						resp.ioc_parm.ctrl = ctrl;
						break;
					}
                    default:
                        printk(KERN_DEBUG "\nxen: Dom0: unknow the operation");
                        break;
				}
				printk(KERN_INFO "\nxen: Dom0: response ioctl");
				break;
			}
			case CHRIF_OP_CLOSE:
				filp_close(info.chrif_filp, NULL);
				printk(KERN_INFO "\nxen: Dom0: response close");
				break;
            default:
                printk(KERN_DEBUG "\nxen: Dom0: unknow the operation");
                break;
        }

		memcpy(RING_GET_RESPONSE(&info.ring, info.ring.rsp_prod_pvt), &resp, sizeof(resp));
       	info.ring.rsp_prod_pvt++;
		//put response and check whether or not notify domU
        RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info.ring, notify);
        if (info.ring.rsp_prod_pvt == info.ring.req_cons) 
		{
            RING_FINAL_CHECK_FOR_REQUESTS(&info.ring, more_to_do);
       	}
		else if (RING_HAS_UNCONSUMED_REQUESTS(&info.ring)) 
		{
           	more_to_do = 1;
        }
        if (notify) 
		{
          	printk(KERN_DEBUG "\nxen:dom0:send notify to domu");
          	notify_remote_via_irq(info.irq);
       	}
    }
    return IRQ_HANDLED;
}
Example #9
0
static void xen_block_handle_requests(XenBlockDataPlane *dataplane)
{
    RING_IDX rc, rp;
    XenBlockRequest *request;
    int inflight_atstart = dataplane->requests_inflight;
    int batched = 0;

    dataplane->more_work = 0;

    rc = dataplane->rings.common.req_cons;
    rp = dataplane->rings.common.sring->req_prod;
    xen_rmb(); /* Ensure we see queued requests up to 'rp'. */

    /*
     * If there was more than IO_PLUG_THRESHOLD requests in flight
     * when we got here, this is an indication that there the bottleneck
     * is below us, so it's worth beginning to batch up I/O requests
     * rather than submitting them immediately. The maximum number
     * of requests we're willing to batch is the number already in
     * flight, so it can grow up to max_requests when the bottleneck
     * is below us.
     */
    if (inflight_atstart > IO_PLUG_THRESHOLD) {
        blk_io_plug(dataplane->blk);
    }
    while (rc != rp) {
        /* pull request from ring */
        if (RING_REQUEST_CONS_OVERFLOW(&dataplane->rings.common, rc)) {
            break;
        }
        request = xen_block_start_request(dataplane);
        if (request == NULL) {
            dataplane->more_work++;
            break;
        }
        xen_block_get_request(dataplane, request, rc);
        dataplane->rings.common.req_cons = ++rc;

        /* parse them */
        if (xen_block_parse_request(request) != 0) {
            switch (request->req.operation) {
            case BLKIF_OP_READ:
                block_acct_invalid(blk_get_stats(dataplane->blk),
                                   BLOCK_ACCT_READ);
                break;
            case BLKIF_OP_WRITE:
                block_acct_invalid(blk_get_stats(dataplane->blk),
                                   BLOCK_ACCT_WRITE);
                break;
            case BLKIF_OP_FLUSH_DISKCACHE:
                block_acct_invalid(blk_get_stats(dataplane->blk),
                                   BLOCK_ACCT_FLUSH);
            default:
                break;
            };

            if (xen_block_send_response(request)) {
                Error *local_err = NULL;

                xen_device_notify_event_channel(dataplane->xendev,
                                                dataplane->event_channel,
                                                &local_err);
                if (local_err) {
                    error_report_err(local_err);
                }
            }
            xen_block_release_request(request);
            continue;
        }

        if (inflight_atstart > IO_PLUG_THRESHOLD &&
            batched >= inflight_atstart) {
            blk_io_unplug(dataplane->blk);
        }
        xen_block_do_aio(request);
        if (inflight_atstart > IO_PLUG_THRESHOLD) {
            if (batched >= inflight_atstart) {
                blk_io_plug(dataplane->blk);
                batched = 0;
            } else {
                batched++;
            }
        }
    }
    if (inflight_atstart > IO_PLUG_THRESHOLD) {
        blk_io_unplug(dataplane->blk);
    }

    if (dataplane->more_work &&
        dataplane->requests_inflight < dataplane->max_requests) {
        qemu_bh_schedule(dataplane->bh);
    }
}