コード例 #1
0
ファイル: xen_disk.c プロジェクト: Mellanox/qemu
static void blk_handle_requests(struct XenBlkDev *blkdev)
{
    RING_IDX rc, rp;
    struct ioreq *ioreq;

    blkdev->more_work = 0;

    rc = blkdev->rings.common.req_cons;
    rp = blkdev->rings.common.sring->req_prod;
    xen_rmb(); /* Ensure we see queued requests up to 'rp'. */

    blk_send_response_all(blkdev);
    while (rc != rp) {
        /* pull request from ring */
        if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
            break;
        }
        ioreq = ioreq_start(blkdev);
        if (ioreq == NULL) {
            blkdev->more_work++;
            break;
        }
        blk_get_request(blkdev, ioreq, rc);
        blkdev->rings.common.req_cons = ++rc;

        /* parse them */
        if (ioreq_parse(ioreq) != 0) {

            switch (ioreq->req.operation) {
            case BLKIF_OP_READ:
                block_acct_invalid(blk_get_stats(blkdev->blk),
                                   BLOCK_ACCT_READ);
                break;
            case BLKIF_OP_WRITE:
                block_acct_invalid(blk_get_stats(blkdev->blk),
                                   BLOCK_ACCT_WRITE);
                break;
            case BLKIF_OP_FLUSH_DISKCACHE:
                block_acct_invalid(blk_get_stats(blkdev->blk),
                                   BLOCK_ACCT_FLUSH);
            default:
                break;
            };

            if (blk_send_response_one(ioreq)) {
                xen_pv_send_notify(&blkdev->xendev);
            }
            ioreq_release(ioreq, false);
            continue;
        }

        ioreq_runio_qemu_aio(ioreq);
    }

    if (blkdev->more_work && blkdev->requests_inflight < blkdev->max_requests) {
        qemu_bh_schedule(blkdev->bh);
    }
}
コード例 #2
0
ファイル: virtio-blk.c プロジェクト: MollyZhang/esesc
static void virtio_blk_rw_complete(void *opaque, int ret)
{
    VirtIOBlockReq *next = opaque;

    while (next) {
        VirtIOBlockReq *req = next;
        next = req->mr_next;
        trace_virtio_blk_rw_complete(req, ret);

        if (req->qiov.nalloc != -1) {
            /* If nalloc is != 1 req->qiov is a local copy of the original
             * external iovec. It was allocated in submit_merged_requests
             * to be able to merge requests. */
            qemu_iovec_destroy(&req->qiov);
        }

        if (ret) {
            int p = virtio_ldl_p(VIRTIO_DEVICE(req->dev), &req->out.type);
            bool is_read = !(p & VIRTIO_BLK_T_OUT);
            if (virtio_blk_handle_rw_error(req, -ret, is_read)) {
                continue;
            }
        }

        virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
        block_acct_done(blk_get_stats(req->dev->blk), &req->acct);
        virtio_blk_free_request(req);
    }
}
コード例 #3
0
ファイル: virtio-blk.c プロジェクト: qhjindev/qemu-vsock
static void virtio_blk_handle_flush(VirtIOBlockReq *req, MultiReqBuffer *mrb)
{
    block_acct_start(blk_get_stats(req->dev->blk), &req->acct, 0,
                     BLOCK_ACCT_FLUSH);

    /*
     * Make sure all outstanding writes are posted to the backing device.
     */
    if (mrb->is_write && mrb->num_reqs > 0) {
        virtio_blk_submit_multireq(req->dev->blk, mrb);
    }
    blk_aio_flush(req->dev->blk, virtio_blk_flush_complete, req);
}
コード例 #4
0
ファイル: virtio-blk.c プロジェクト: qhjindev/qemu-vsock
static void virtio_blk_flush_complete(void *opaque, int ret)
{
    VirtIOBlockReq *req = opaque;

    if (ret) {
        if (virtio_blk_handle_rw_error(req, -ret, 0)) {
            return;
        }
    }

    virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
    block_acct_done(blk_get_stats(req->dev->blk), &req->acct);
    virtio_blk_free_request(req);
}
コード例 #5
0
ファイル: virtio-blk.c プロジェクト: qhjindev/qemu-vsock
static inline void submit_requests(BlockBackend *blk, MultiReqBuffer *mrb,
                                   int start, int num_reqs, int niov)
{
    QEMUIOVector *qiov = &mrb->reqs[start]->qiov;
    int64_t sector_num = mrb->reqs[start]->sector_num;
    int nb_sectors = mrb->reqs[start]->qiov.size / BDRV_SECTOR_SIZE;
    bool is_write = mrb->is_write;

    if (num_reqs > 1) {
        int i;
        struct iovec *tmp_iov = qiov->iov;
        int tmp_niov = qiov->niov;

        /* mrb->reqs[start]->qiov was initialized from external so we can't
         * modifiy it here. We need to initialize it locally and then add the
         * external iovecs. */
        qemu_iovec_init(qiov, niov);

        for (i = 0; i < tmp_niov; i++) {
            qemu_iovec_add(qiov, tmp_iov[i].iov_base, tmp_iov[i].iov_len);
        }

        for (i = start + 1; i < start + num_reqs; i++) {
            qemu_iovec_concat(qiov, &mrb->reqs[i]->qiov, 0,
                              mrb->reqs[i]->qiov.size);
            mrb->reqs[i - 1]->mr_next = mrb->reqs[i];
            nb_sectors += mrb->reqs[i]->qiov.size / BDRV_SECTOR_SIZE;
        }
        assert(nb_sectors == qiov->size / BDRV_SECTOR_SIZE);

        trace_virtio_blk_submit_multireq(mrb, start, num_reqs, sector_num,
                                         nb_sectors, is_write);
        block_acct_merge_done(blk_get_stats(blk),
                              is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ,
                              num_reqs - 1);
    }

    if (is_write) {
        blk_aio_writev(blk, sector_num, qiov, nb_sectors,
                       virtio_blk_rw_complete, mrb->reqs[start]);
    } else {
        blk_aio_readv(blk, sector_num, qiov, nb_sectors,
                      virtio_blk_rw_complete, mrb->reqs[start]);
    }
}
コード例 #6
0
ファイル: virtio-blk.c プロジェクト: qhjindev/qemu-vsock
static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
    bool is_read)
{
    BlockErrorAction action = blk_get_error_action(req->dev->blk,
                                                   is_read, error);
    VirtIOBlock *s = req->dev;

    if (action == BLOCK_ERROR_ACTION_STOP) {
        req->next = s->rq;
        s->rq = req;
    } else if (action == BLOCK_ERROR_ACTION_REPORT) {
        virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
        block_acct_done(blk_get_stats(s->blk), &req->acct);
        virtio_blk_free_request(req);
    }

    blk_error_action(s->blk, action, is_read, error);
    return action != BLOCK_ERROR_ACTION_IGNORE;
}
コード例 #7
0
ファイル: virtio-blk.c プロジェクト: Dovgalyuk/qemu
static void virtio_blk_flush_complete(void *opaque, int ret)
{
    VirtIOBlockReq *req = opaque;
    VirtIOBlock *s = req->dev;

    aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
    if (ret) {
        if (virtio_blk_handle_rw_error(req, -ret, 0)) {
            goto out;
        }
    }

    virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
    block_acct_done(blk_get_stats(req->dev->blk), &req->acct);
    virtio_blk_free_request(req);

out:
    aio_context_release(blk_get_aio_context(s->conf.conf.blk));
}
コード例 #8
0
ファイル: virtio-blk.c プロジェクト: chelseaqwang/qemu-tpm
static void virtio_blk_rw_complete(void *opaque, int ret)
{
    VirtIOBlockReq *next = opaque;

    while (next) {
        VirtIOBlockReq *req = next;
        next = req->mr_next;
        trace_virtio_blk_rw_complete(req, ret);

        if (req->qiov.nalloc != -1) {
            /* If nalloc is != 1 req->qiov is a local copy of the original
             * external iovec. It was allocated in submit_merged_requests
             * to be able to merge requests. */
            qemu_iovec_destroy(&req->qiov);
        }

        if (ret) {
            int p = virtio_ldl_p(VIRTIO_DEVICE(req->dev), &req->out.type);
            bool is_read = !(p & VIRTIO_BLK_T_OUT);
            /* Note that memory may be dirtied on read failure.  If the
             * virtio request is not completed here, as is the case for
             * BLOCK_ERROR_ACTION_STOP, the memory may not be copied
             * correctly during live migration.  While this is ugly,
             * it is acceptable because the device is free to write to
             * the memory until the request is completed (which will
             * happen on the other side of the migration).
             */
            if (virtio_blk_handle_rw_error(req, -ret, is_read)) {
                /* Break the link in case the next request is added to the
                 * restart queue and is going to be parsed from the ring again.
                 */
                req->mr_next = NULL;
                continue;
            }
        }

        virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
        block_acct_done(blk_get_stats(req->dev->blk), &req->acct);
        virtio_blk_free_request(req);
    }
}
コード例 #9
0
ファイル: virtio-blk.c プロジェクト: cminyard/qemu
static void virtio_blk_rw_complete(void *opaque, int ret)
{
    VirtIOBlockReq *next = opaque;
    VirtIOBlock *s = next->dev;
    VirtIODevice *vdev = VIRTIO_DEVICE(s);

    aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
    while (next) {
        VirtIOBlockReq *req = next;
        next = req->mr_next;
        trace_virtio_blk_rw_complete(vdev, req, ret);

        if (req->qiov.nalloc != -1) {
            /* If nalloc is != -1 req->qiov is a local copy of the original
             * external iovec. It was allocated in submit_requests to be
             * able to merge requests. */
            qemu_iovec_destroy(&req->qiov);
        }

        if (ret) {
            int p = virtio_ldl_p(VIRTIO_DEVICE(req->dev), &req->out.type);
            bool is_read = !(p & VIRTIO_BLK_T_OUT);
            /* Note that memory may be dirtied on read failure.  If the
             * virtio request is not completed here, as is the case for
             * BLOCK_ERROR_ACTION_STOP, the memory may not be copied
             * correctly during live migration.  While this is ugly,
             * it is acceptable because the device is free to write to
             * the memory until the request is completed (which will
             * happen on the other side of the migration).
             */
            if (virtio_blk_handle_rw_error(req, -ret, is_read, true)) {
                continue;
            }
        }

        virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
        block_acct_done(blk_get_stats(req->dev->blk), &req->acct);
        virtio_blk_free_request(req);
    }
    aio_context_release(blk_get_aio_context(s->conf.conf.blk));
}
コード例 #10
0
ファイル: virtio-blk.c プロジェクト: gkurz/qemu
static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
    bool is_read)
{
    VirtIOBlock *s = req->dev;
    BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);

    if (action == BLOCK_ERROR_ACTION_STOP) {
        /* Break the link as the next request is going to be parsed from the
         * ring again. Otherwise we may end up doing a double completion! */
        req->mr_next = NULL;
        req->next = s->rq;
        s->rq = req;
    } else if (action == BLOCK_ERROR_ACTION_REPORT) {
        virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
        block_acct_failed(blk_get_stats(s->blk), &req->acct);
        virtio_blk_free_request(req);
    }

    blk_error_action(s->blk, action, is_read, error);
    return action != BLOCK_ERROR_ACTION_IGNORE;
}
コード例 #11
0
ファイル: virtio-blk.c プロジェクト: cminyard/qemu
static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret)
{
    VirtIOBlockReq *req = opaque;
    VirtIOBlock *s = req->dev;
    bool is_write_zeroes = (virtio_ldl_p(VIRTIO_DEVICE(s), &req->out.type) &
                            ~VIRTIO_BLK_T_BARRIER) == VIRTIO_BLK_T_WRITE_ZEROES;

    aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
    if (ret) {
        if (virtio_blk_handle_rw_error(req, -ret, false, is_write_zeroes)) {
            goto out;
        }
    }

    virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
    if (is_write_zeroes) {
        block_acct_done(blk_get_stats(s->blk), &req->acct);
    }
    virtio_blk_free_request(req);

out:
    aio_context_release(blk_get_aio_context(s->conf.conf.blk));
}
コード例 #12
0
ファイル: xen-block.c プロジェクト: MaddTheSane/qemu
static void xen_block_handle_requests(XenBlockDataPlane *dataplane)
{
    RING_IDX rc, rp;
    XenBlockRequest *request;
    int inflight_atstart = dataplane->requests_inflight;
    int batched = 0;

    dataplane->more_work = 0;

    rc = dataplane->rings.common.req_cons;
    rp = dataplane->rings.common.sring->req_prod;
    xen_rmb(); /* Ensure we see queued requests up to 'rp'. */

    /*
     * If there was more than IO_PLUG_THRESHOLD requests in flight
     * when we got here, this is an indication that there the bottleneck
     * is below us, so it's worth beginning to batch up I/O requests
     * rather than submitting them immediately. The maximum number
     * of requests we're willing to batch is the number already in
     * flight, so it can grow up to max_requests when the bottleneck
     * is below us.
     */
    if (inflight_atstart > IO_PLUG_THRESHOLD) {
        blk_io_plug(dataplane->blk);
    }
    while (rc != rp) {
        /* pull request from ring */
        if (RING_REQUEST_CONS_OVERFLOW(&dataplane->rings.common, rc)) {
            break;
        }
        request = xen_block_start_request(dataplane);
        if (request == NULL) {
            dataplane->more_work++;
            break;
        }
        xen_block_get_request(dataplane, request, rc);
        dataplane->rings.common.req_cons = ++rc;

        /* parse them */
        if (xen_block_parse_request(request) != 0) {
            switch (request->req.operation) {
            case BLKIF_OP_READ:
                block_acct_invalid(blk_get_stats(dataplane->blk),
                                   BLOCK_ACCT_READ);
                break;
            case BLKIF_OP_WRITE:
                block_acct_invalid(blk_get_stats(dataplane->blk),
                                   BLOCK_ACCT_WRITE);
                break;
            case BLKIF_OP_FLUSH_DISKCACHE:
                block_acct_invalid(blk_get_stats(dataplane->blk),
                                   BLOCK_ACCT_FLUSH);
            default:
                break;
            };

            if (xen_block_send_response(request)) {
                Error *local_err = NULL;

                xen_device_notify_event_channel(dataplane->xendev,
                                                dataplane->event_channel,
                                                &local_err);
                if (local_err) {
                    error_report_err(local_err);
                }
            }
            xen_block_release_request(request);
            continue;
        }

        if (inflight_atstart > IO_PLUG_THRESHOLD &&
            batched >= inflight_atstart) {
            blk_io_unplug(dataplane->blk);
        }
        xen_block_do_aio(request);
        if (inflight_atstart > IO_PLUG_THRESHOLD) {
            if (batched >= inflight_atstart) {
                blk_io_plug(dataplane->blk);
                batched = 0;
            } else {
                batched++;
            }
        }
    }
    if (inflight_atstart > IO_PLUG_THRESHOLD) {
        blk_io_unplug(dataplane->blk);
    }

    if (dataplane->more_work &&
        dataplane->requests_inflight < dataplane->max_requests) {
        qemu_bh_schedule(dataplane->bh);
    }
}
コード例 #13
0
ファイル: qapi.c プロジェクト: Biamp-Systems/mb-qemu
static void bdrv_query_blk_stats(BlockDeviceStats *ds, BlockBackend *blk)
{
    BlockAcctStats *stats = blk_get_stats(blk);
    BlockAcctTimedStats *ts = NULL;

    ds->rd_bytes = stats->nr_bytes[BLOCK_ACCT_READ];
    ds->wr_bytes = stats->nr_bytes[BLOCK_ACCT_WRITE];
    ds->rd_operations = stats->nr_ops[BLOCK_ACCT_READ];
    ds->wr_operations = stats->nr_ops[BLOCK_ACCT_WRITE];

    ds->failed_rd_operations = stats->failed_ops[BLOCK_ACCT_READ];
    ds->failed_wr_operations = stats->failed_ops[BLOCK_ACCT_WRITE];
    ds->failed_flush_operations = stats->failed_ops[BLOCK_ACCT_FLUSH];

    ds->invalid_rd_operations = stats->invalid_ops[BLOCK_ACCT_READ];
    ds->invalid_wr_operations = stats->invalid_ops[BLOCK_ACCT_WRITE];
    ds->invalid_flush_operations =
        stats->invalid_ops[BLOCK_ACCT_FLUSH];

    ds->rd_merged = stats->merged[BLOCK_ACCT_READ];
    ds->wr_merged = stats->merged[BLOCK_ACCT_WRITE];
    ds->flush_operations = stats->nr_ops[BLOCK_ACCT_FLUSH];
    ds->wr_total_time_ns = stats->total_time_ns[BLOCK_ACCT_WRITE];
    ds->rd_total_time_ns = stats->total_time_ns[BLOCK_ACCT_READ];
    ds->flush_total_time_ns = stats->total_time_ns[BLOCK_ACCT_FLUSH];

    ds->has_idle_time_ns = stats->last_access_time_ns > 0;
    if (ds->has_idle_time_ns) {
        ds->idle_time_ns = block_acct_idle_time_ns(stats);
    }

    ds->account_invalid = stats->account_invalid;
    ds->account_failed = stats->account_failed;

    while ((ts = block_acct_interval_next(stats, ts))) {
        BlockDeviceTimedStatsList *timed_stats =
            g_malloc0(sizeof(*timed_stats));
        BlockDeviceTimedStats *dev_stats = g_malloc0(sizeof(*dev_stats));
        timed_stats->next = ds->timed_stats;
        timed_stats->value = dev_stats;
        ds->timed_stats = timed_stats;

        TimedAverage *rd = &ts->latency[BLOCK_ACCT_READ];
        TimedAverage *wr = &ts->latency[BLOCK_ACCT_WRITE];
        TimedAverage *fl = &ts->latency[BLOCK_ACCT_FLUSH];

        dev_stats->interval_length = ts->interval_length;

        dev_stats->min_rd_latency_ns = timed_average_min(rd);
        dev_stats->max_rd_latency_ns = timed_average_max(rd);
        dev_stats->avg_rd_latency_ns = timed_average_avg(rd);

        dev_stats->min_wr_latency_ns = timed_average_min(wr);
        dev_stats->max_wr_latency_ns = timed_average_max(wr);
        dev_stats->avg_wr_latency_ns = timed_average_avg(wr);

        dev_stats->min_flush_latency_ns = timed_average_min(fl);
        dev_stats->max_flush_latency_ns = timed_average_max(fl);
        dev_stats->avg_flush_latency_ns = timed_average_avg(fl);

        dev_stats->avg_rd_queue_depth =
            block_acct_queue_depth(ts, BLOCK_ACCT_READ);
        dev_stats->avg_wr_queue_depth =
            block_acct_queue_depth(ts, BLOCK_ACCT_WRITE);
    }
}
コード例 #14
0
ファイル: dma-helpers.c プロジェクト: 32bitmicro/riscv-qemu
void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie,
                    QEMUSGList *sg, enum BlockAcctType type)
{
    block_acct_start(blk_get_stats(blk), cookie, sg->size, type);
}
コード例 #15
0
ファイル: xen-block.c プロジェクト: MaddTheSane/qemu
static int xen_block_do_aio(XenBlockRequest *request)
{
    XenBlockDataPlane *dataplane = request->dataplane;

    if (request->req.nr_segments &&
        (request->req.operation == BLKIF_OP_WRITE ||
         request->req.operation == BLKIF_OP_FLUSH_DISKCACHE) &&
        xen_block_copy_request(request)) {
        goto err;
    }

    request->aio_inflight++;
    if (request->presync) {
        blk_aio_flush(request->dataplane->blk, xen_block_complete_aio,
                      request);
        return 0;
    }

    switch (request->req.operation) {
    case BLKIF_OP_READ:
        qemu_iovec_add(&request->v, request->buf, request->size);
        block_acct_start(blk_get_stats(dataplane->blk), &request->acct,
                         request->v.size, BLOCK_ACCT_READ);
        request->aio_inflight++;
        blk_aio_preadv(dataplane->blk, request->start, &request->v, 0,
                       xen_block_complete_aio, request);
        break;
    case BLKIF_OP_WRITE:
    case BLKIF_OP_FLUSH_DISKCACHE:
        if (!request->req.nr_segments) {
            break;
        }

        qemu_iovec_add(&request->v, request->buf, request->size);
        block_acct_start(blk_get_stats(dataplane->blk), &request->acct,
                         request->v.size,
                         request->req.operation == BLKIF_OP_WRITE ?
                         BLOCK_ACCT_WRITE : BLOCK_ACCT_FLUSH);
        request->aio_inflight++;
        blk_aio_pwritev(dataplane->blk, request->start, &request->v, 0,
                        xen_block_complete_aio, request);
        break;
    case BLKIF_OP_DISCARD:
    {
        struct blkif_request_discard *req = (void *)&request->req;
        if (!xen_block_split_discard(request, req->sector_number,
                                     req->nr_sectors)) {
            goto err;
        }
        break;
    }
    default:
        /* unknown operation (shouldn't happen -- parse catches this) */
        goto err;
    }

    xen_block_complete_aio(request, 0);

    return 0;

err:
    xen_block_finish_request(request);
    request->status = BLKIF_RSP_ERROR;
    return -1;
}
コード例 #16
0
ファイル: xen_disk.c プロジェクト: Mellanox/qemu
static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
{
    struct XenBlkDev *blkdev = ioreq->blkdev;

    ioreq->buf = qemu_memalign(XC_PAGE_SIZE, ioreq->size);
    if (ioreq->req.nr_segments &&
        (ioreq->req.operation == BLKIF_OP_WRITE ||
         ioreq->req.operation == BLKIF_OP_FLUSH_DISKCACHE) &&
        ioreq_grant_copy(ioreq)) {
        qemu_vfree(ioreq->buf);
        goto err;
    }

    ioreq->aio_inflight++;
    if (ioreq->presync) {
        blk_aio_flush(ioreq->blkdev->blk, qemu_aio_complete, ioreq);
        return 0;
    }

    switch (ioreq->req.operation) {
    case BLKIF_OP_READ:
        qemu_iovec_add(&ioreq->v, ioreq->buf, ioreq->size);
        block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
                         ioreq->v.size, BLOCK_ACCT_READ);
        ioreq->aio_inflight++;
        blk_aio_preadv(blkdev->blk, ioreq->start, &ioreq->v, 0,
                       qemu_aio_complete, ioreq);
        break;
    case BLKIF_OP_WRITE:
    case BLKIF_OP_FLUSH_DISKCACHE:
        if (!ioreq->req.nr_segments) {
            break;
        }

        qemu_iovec_add(&ioreq->v, ioreq->buf, ioreq->size);
        block_acct_start(blk_get_stats(blkdev->blk), &ioreq->acct,
                         ioreq->v.size,
                         ioreq->req.operation == BLKIF_OP_WRITE ?
                         BLOCK_ACCT_WRITE : BLOCK_ACCT_FLUSH);
        ioreq->aio_inflight++;
        blk_aio_pwritev(blkdev->blk, ioreq->start, &ioreq->v, 0,
                        qemu_aio_complete, ioreq);
        break;
    case BLKIF_OP_DISCARD:
    {
        struct blkif_request_discard *req = (void *)&ioreq->req;
        if (!blk_split_discard(ioreq, req->sector_number, req->nr_sectors)) {
            goto err;
        }
        break;
    }
    default:
        /* unknown operation (shouldn't happen -- parse catches this) */
        goto err;
    }

    qemu_aio_complete(ioreq, 0);

    return 0;

err:
    ioreq_finish(ioreq);
    ioreq->status = BLKIF_RSP_ERROR;
    return -1;
}
コード例 #17
0
ファイル: xen_disk.c プロジェクト: Mellanox/qemu
static void qemu_aio_complete(void *opaque, int ret)
{
    struct ioreq *ioreq = opaque;
    struct XenBlkDev *blkdev = ioreq->blkdev;
    struct XenDevice *xendev = &blkdev->xendev;

    aio_context_acquire(blkdev->ctx);

    if (ret != 0) {
        xen_pv_printf(xendev, 0, "%s I/O error\n",
                      ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
        ioreq->aio_errors++;
    }

    ioreq->aio_inflight--;
    if (ioreq->presync) {
        ioreq->presync = 0;
        ioreq_runio_qemu_aio(ioreq);
        goto done;
    }
    if (ioreq->aio_inflight > 0) {
        goto done;
    }

    switch (ioreq->req.operation) {
    case BLKIF_OP_READ:
        /* in case of failure ioreq->aio_errors is increased */
        if (ret == 0) {
            ioreq_grant_copy(ioreq);
        }
        qemu_vfree(ioreq->buf);
        break;
    case BLKIF_OP_WRITE:
    case BLKIF_OP_FLUSH_DISKCACHE:
        if (!ioreq->req.nr_segments) {
            break;
        }
        qemu_vfree(ioreq->buf);
        break;
    default:
        break;
    }

    ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
    ioreq_finish(ioreq);

    switch (ioreq->req.operation) {
    case BLKIF_OP_WRITE:
    case BLKIF_OP_FLUSH_DISKCACHE:
        if (!ioreq->req.nr_segments) {
            break;
        }
    case BLKIF_OP_READ:
        if (ioreq->status == BLKIF_RSP_OKAY) {
            block_acct_done(blk_get_stats(blkdev->blk), &ioreq->acct);
        } else {
            block_acct_failed(blk_get_stats(blkdev->blk), &ioreq->acct);
        }
        break;
    case BLKIF_OP_DISCARD:
    default:
        break;
    }
    qemu_bh_schedule(blkdev->bh);

done:
    aio_context_release(blkdev->ctx);
}
コード例 #18
0
ファイル: qapi.c プロジェクト: dsandersimgtec/qemu
static BlockStats *bdrv_query_stats(const BlockDriverState *bs,
                                    bool query_backing)
{
    BlockStats *s;

    s = g_malloc0(sizeof(*s));

    if (bdrv_get_device_name(bs)[0]) {
        s->has_device = true;
        s->device = g_strdup(bdrv_get_device_name(bs));
    }

    if (bdrv_get_node_name(bs)[0]) {
        s->has_node_name = true;
        s->node_name = g_strdup(bdrv_get_node_name(bs));
    }

    s->stats = g_malloc0(sizeof(*s->stats));
    if (bs->blk) {
        BlockAcctStats *stats = blk_get_stats(bs->blk);
        BlockAcctTimedStats *ts = NULL;

        s->stats->rd_bytes = stats->nr_bytes[BLOCK_ACCT_READ];
        s->stats->wr_bytes = stats->nr_bytes[BLOCK_ACCT_WRITE];
        s->stats->rd_operations = stats->nr_ops[BLOCK_ACCT_READ];
        s->stats->wr_operations = stats->nr_ops[BLOCK_ACCT_WRITE];

        s->stats->failed_rd_operations = stats->failed_ops[BLOCK_ACCT_READ];
        s->stats->failed_wr_operations = stats->failed_ops[BLOCK_ACCT_WRITE];
        s->stats->failed_flush_operations = stats->failed_ops[BLOCK_ACCT_FLUSH];

        s->stats->invalid_rd_operations = stats->invalid_ops[BLOCK_ACCT_READ];
        s->stats->invalid_wr_operations = stats->invalid_ops[BLOCK_ACCT_WRITE];
        s->stats->invalid_flush_operations =
            stats->invalid_ops[BLOCK_ACCT_FLUSH];

        s->stats->rd_merged = stats->merged[BLOCK_ACCT_READ];
        s->stats->wr_merged = stats->merged[BLOCK_ACCT_WRITE];
        s->stats->flush_operations = stats->nr_ops[BLOCK_ACCT_FLUSH];
        s->stats->wr_total_time_ns = stats->total_time_ns[BLOCK_ACCT_WRITE];
        s->stats->rd_total_time_ns = stats->total_time_ns[BLOCK_ACCT_READ];
        s->stats->flush_total_time_ns = stats->total_time_ns[BLOCK_ACCT_FLUSH];

        s->stats->has_idle_time_ns = stats->last_access_time_ns > 0;
        if (s->stats->has_idle_time_ns) {
            s->stats->idle_time_ns = block_acct_idle_time_ns(stats);
        }

        s->stats->account_invalid = stats->account_invalid;
        s->stats->account_failed = stats->account_failed;

        while ((ts = block_acct_interval_next(stats, ts))) {
            BlockDeviceTimedStatsList *timed_stats =
                g_malloc0(sizeof(*timed_stats));
            BlockDeviceTimedStats *dev_stats = g_malloc0(sizeof(*dev_stats));
            timed_stats->next = s->stats->timed_stats;
            timed_stats->value = dev_stats;
            s->stats->timed_stats = timed_stats;

            TimedAverage *rd = &ts->latency[BLOCK_ACCT_READ];
            TimedAverage *wr = &ts->latency[BLOCK_ACCT_WRITE];
            TimedAverage *fl = &ts->latency[BLOCK_ACCT_FLUSH];

            dev_stats->interval_length = ts->interval_length;

            dev_stats->min_rd_latency_ns = timed_average_min(rd);
            dev_stats->max_rd_latency_ns = timed_average_max(rd);
            dev_stats->avg_rd_latency_ns = timed_average_avg(rd);

            dev_stats->min_wr_latency_ns = timed_average_min(wr);
            dev_stats->max_wr_latency_ns = timed_average_max(wr);
            dev_stats->avg_wr_latency_ns = timed_average_avg(wr);

            dev_stats->min_flush_latency_ns = timed_average_min(fl);
            dev_stats->max_flush_latency_ns = timed_average_max(fl);
            dev_stats->avg_flush_latency_ns = timed_average_avg(fl);

            dev_stats->avg_rd_queue_depth =
                block_acct_queue_depth(ts, BLOCK_ACCT_READ);
            dev_stats->avg_wr_queue_depth =
                block_acct_queue_depth(ts, BLOCK_ACCT_WRITE);
        }
    }

    s->stats->wr_highest_offset = bs->wr_highest_offset;

    if (bs->file) {
        s->has_parent = true;
        s->parent = bdrv_query_stats(bs->file->bs, query_backing);
    }

    if (query_backing && bs->backing) {
        s->has_backing = true;
        s->backing = bdrv_query_stats(bs->backing->bs, query_backing);
    }

    return s;
}
コード例 #19
0
ファイル: xen-block.c プロジェクト: MaddTheSane/qemu
static void xen_block_complete_aio(void *opaque, int ret)
{
    XenBlockRequest *request = opaque;
    XenBlockDataPlane *dataplane = request->dataplane;

    aio_context_acquire(dataplane->ctx);

    if (ret != 0) {
        error_report("%s I/O error",
                     request->req.operation == BLKIF_OP_READ ?
                     "read" : "write");
        request->aio_errors++;
    }

    request->aio_inflight--;
    if (request->presync) {
        request->presync = 0;
        xen_block_do_aio(request);
        goto done;
    }
    if (request->aio_inflight > 0) {
        goto done;
    }

    switch (request->req.operation) {
    case BLKIF_OP_READ:
        /* in case of failure request->aio_errors is increased */
        if (ret == 0) {
            xen_block_copy_request(request);
        }
        break;
    case BLKIF_OP_WRITE:
    case BLKIF_OP_FLUSH_DISKCACHE:
    default:
        break;
    }

    request->status = request->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
    xen_block_finish_request(request);

    switch (request->req.operation) {
    case BLKIF_OP_WRITE:
    case BLKIF_OP_FLUSH_DISKCACHE:
        if (!request->req.nr_segments) {
            break;
        }
        /* fall through */
    case BLKIF_OP_READ:
        if (request->status == BLKIF_RSP_OKAY) {
            block_acct_done(blk_get_stats(dataplane->blk), &request->acct);
        } else {
            block_acct_failed(blk_get_stats(dataplane->blk), &request->acct);
        }
        break;
    case BLKIF_OP_DISCARD:
    default:
        break;
    }
    if (xen_block_send_response(request)) {
        Error *local_err = NULL;

        xen_device_notify_event_channel(dataplane->xendev,
                                        dataplane->event_channel,
                                        &local_err);
        if (local_err) {
            error_report_err(local_err);
        }
    }
    xen_block_release_request(request);

    qemu_bh_schedule(dataplane->bh);

done:
    aio_context_release(dataplane->ctx);
}