Ejemplo n.º 1
0
static void blk_handle_requests(struct XenBlkDev *blkdev)
{
    RING_IDX rc, rp;
    struct ioreq *ioreq;

    blkdev->more_work = 0;

    rc = blkdev->rings.common.req_cons;
    rp = blkdev->rings.common.sring->req_prod;
    xen_rmb(); /* Ensure we see queued requests up to 'rp'. */

    blk_send_response_all(blkdev);
    while (rc != rp) {
        /* pull request from ring */
        if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
            break;
        }
        ioreq = ioreq_start(blkdev);
        if (ioreq == NULL) {
            blkdev->more_work++;
            break;
        }
        blk_get_request(blkdev, ioreq, rc);
        blkdev->rings.common.req_cons = ++rc;

        /* parse them */
        if (ioreq_parse(ioreq) != 0) {

            switch (ioreq->req.operation) {
            case BLKIF_OP_READ:
                block_acct_invalid(blk_get_stats(blkdev->blk),
                                   BLOCK_ACCT_READ);
                break;
            case BLKIF_OP_WRITE:
                block_acct_invalid(blk_get_stats(blkdev->blk),
                                   BLOCK_ACCT_WRITE);
                break;
            case BLKIF_OP_FLUSH_DISKCACHE:
                block_acct_invalid(blk_get_stats(blkdev->blk),
                                   BLOCK_ACCT_FLUSH);
            default:
                break;
            };

            if (blk_send_response_one(ioreq)) {
                xen_pv_send_notify(&blkdev->xendev);
            }
            ioreq_release(ioreq, false);
            continue;
        }

        ioreq_runio_qemu_aio(ioreq);
    }

    if (blkdev->more_work && blkdev->requests_inflight < blkdev->max_requests) {
        qemu_bh_schedule(blkdev->bh);
    }
}
Ejemplo n.º 2
0
static void xen_block_handle_requests(XenBlockDataPlane *dataplane)
{
    RING_IDX rc, rp;
    XenBlockRequest *request;
    int inflight_atstart = dataplane->requests_inflight;
    int batched = 0;

    dataplane->more_work = 0;

    rc = dataplane->rings.common.req_cons;
    rp = dataplane->rings.common.sring->req_prod;
    xen_rmb(); /* Ensure we see queued requests up to 'rp'. */

    /*
     * If there was more than IO_PLUG_THRESHOLD requests in flight
     * when we got here, this is an indication that there the bottleneck
     * is below us, so it's worth beginning to batch up I/O requests
     * rather than submitting them immediately. The maximum number
     * of requests we're willing to batch is the number already in
     * flight, so it can grow up to max_requests when the bottleneck
     * is below us.
     */
    if (inflight_atstart > IO_PLUG_THRESHOLD) {
        blk_io_plug(dataplane->blk);
    }
    while (rc != rp) {
        /* pull request from ring */
        if (RING_REQUEST_CONS_OVERFLOW(&dataplane->rings.common, rc)) {
            break;
        }
        request = xen_block_start_request(dataplane);
        if (request == NULL) {
            dataplane->more_work++;
            break;
        }
        xen_block_get_request(dataplane, request, rc);
        dataplane->rings.common.req_cons = ++rc;

        /* parse them */
        if (xen_block_parse_request(request) != 0) {
            switch (request->req.operation) {
            case BLKIF_OP_READ:
                block_acct_invalid(blk_get_stats(dataplane->blk),
                                   BLOCK_ACCT_READ);
                break;
            case BLKIF_OP_WRITE:
                block_acct_invalid(blk_get_stats(dataplane->blk),
                                   BLOCK_ACCT_WRITE);
                break;
            case BLKIF_OP_FLUSH_DISKCACHE:
                block_acct_invalid(blk_get_stats(dataplane->blk),
                                   BLOCK_ACCT_FLUSH);
            default:
                break;
            };

            if (xen_block_send_response(request)) {
                Error *local_err = NULL;

                xen_device_notify_event_channel(dataplane->xendev,
                                                dataplane->event_channel,
                                                &local_err);
                if (local_err) {
                    error_report_err(local_err);
                }
            }
            xen_block_release_request(request);
            continue;
        }

        if (inflight_atstart > IO_PLUG_THRESHOLD &&
            batched >= inflight_atstart) {
            blk_io_unplug(dataplane->blk);
        }
        xen_block_do_aio(request);
        if (inflight_atstart > IO_PLUG_THRESHOLD) {
            if (batched >= inflight_atstart) {
                blk_io_plug(dataplane->blk);
                batched = 0;
            } else {
                batched++;
            }
        }
    }
    if (inflight_atstart > IO_PLUG_THRESHOLD) {
        blk_io_unplug(dataplane->blk);
    }

    if (dataplane->more_work &&
        dataplane->requests_inflight < dataplane->max_requests) {
        qemu_bh_schedule(dataplane->bh);
    }
}