Exemplo n.º 1
0
static void scsi_write_complete(void * opaque, int ret)
{
    SCSIGenericReq *r = (SCSIGenericReq *)opaque;
    SCSIDevice *s = r->req.dev;

    DPRINTF("scsi_write_complete() ret = %d\n", ret);

    assert(r->req.aiocb != NULL);
    r->req.aiocb = NULL;

    aio_context_acquire(blk_get_aio_context(s->conf.blk));

    if (ret || r->req.io_canceled) {
        scsi_command_complete_noio(r, ret);
        goto done;
    }

    if (r->req.cmd.buf[0] == MODE_SELECT && r->req.cmd.buf[4] == 12 &&
        s->type == TYPE_TAPE) {
        s->blocksize = (r->buf[9] << 16) | (r->buf[10] << 8) | r->buf[11];
        DPRINTF("block size %d\n", s->blocksize);
    }

    scsi_command_complete_noio(r, ret);

done:
    aio_context_release(blk_get_aio_context(s->conf.blk));
}
Exemplo n.º 2
0
static void scsi_command_complete(void *opaque, int ret)
{
    SCSIGenericReq *r = (SCSIGenericReq *)opaque;
    SCSIDevice *s = r->req.dev;

    assert(r->req.aiocb != NULL);
    r->req.aiocb = NULL;

    aio_context_acquire(blk_get_aio_context(s->conf.blk));
    scsi_command_complete_noio(r, ret);
    aio_context_release(blk_get_aio_context(s->conf.blk));
}
Exemplo n.º 3
0
BlockStatsList *qmp_query_blockstats(bool has_query_nodes,
                                     bool query_nodes,
                                     Error **errp)
{
    BlockStatsList *head = NULL, **p_next = &head;
    BlockBackend *blk = NULL;
    BlockDriverState *bs = NULL;

    /* Just to be safe if query_nodes is not always initialized */
    query_nodes = has_query_nodes && query_nodes;

    while (next_query_bds(&blk, &bs, query_nodes)) {
        BlockStatsList *info = g_malloc0(sizeof(*info));
        AioContext *ctx = blk ? blk_get_aio_context(blk)
                              : bdrv_get_aio_context(bs);

        aio_context_acquire(ctx);
        info->value = bdrv_query_stats(blk, bs, !query_nodes);
        aio_context_release(ctx);

        *p_next = info;
        p_next = &info->next;
    }

    return head;
}
Exemplo n.º 4
0
static void mirror_write_complete(void *opaque, int ret)
{
    MirrorOp *op = opaque;
    MirrorBlockJob *s = op->s;

    aio_context_acquire(blk_get_aio_context(s->common.blk));
    if (ret < 0) {
        BlockErrorAction action;

        bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes);
        action = mirror_error_action(s, false, -ret);
        if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
            s->ret = ret;
        }
    }
    mirror_iteration_done(op, ret);
    aio_context_release(blk_get_aio_context(s->common.blk));
}
Exemplo n.º 5
0
static void virtio_blk_ioctl_complete(void *opaque, int status)
{
    VirtIOBlockIoctlReq *ioctl_req = opaque;
    VirtIOBlockReq *req = ioctl_req->req;
    VirtIOBlock *s = req->dev;
    VirtIODevice *vdev = VIRTIO_DEVICE(s);
    struct virtio_scsi_inhdr *scsi;
    struct sg_io_hdr *hdr;

    scsi = (void *)req->elem.in_sg[req->elem.in_num - 2].iov_base;

    if (status) {
        status = VIRTIO_BLK_S_UNSUPP;
        virtio_stl_p(vdev, &scsi->errors, 255);
        goto out;
    }

    hdr = &ioctl_req->hdr;
    /*
     * From SCSI-Generic-HOWTO: "Some lower level drivers (e.g. ide-scsi)
     * clear the masked_status field [hence status gets cleared too, see
     * block/scsi_ioctl.c] even when a CHECK_CONDITION or COMMAND_TERMINATED
     * status has occurred.  However they do set DRIVER_SENSE in driver_status
     * field. Also a (sb_len_wr > 0) indicates there is a sense buffer.
     */
    if (hdr->status == 0 && hdr->sb_len_wr > 0) {
        hdr->status = CHECK_CONDITION;
    }

    virtio_stl_p(vdev, &scsi->errors,
                 hdr->status | (hdr->msg_status << 8) |
                 (hdr->host_status << 16) | (hdr->driver_status << 24));
    virtio_stl_p(vdev, &scsi->residual, hdr->resid);
    virtio_stl_p(vdev, &scsi->sense_len, hdr->sb_len_wr);
    virtio_stl_p(vdev, &scsi->data_len, hdr->dxfer_len);

out:
    aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
    virtio_blk_req_complete(req, status);
    virtio_blk_free_request(req);
    aio_context_release(blk_get_aio_context(s->conf.conf.blk));
    g_free(ioctl_req);
}
Exemplo n.º 6
0
static void virtio_blk_flush_complete(void *opaque, int ret)
{
    VirtIOBlockReq *req = opaque;
    VirtIOBlock *s = req->dev;

    aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
    if (ret) {
        if (virtio_blk_handle_rw_error(req, -ret, 0)) {
            goto out;
        }
    }

    virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
    block_acct_done(blk_get_stats(req->dev->blk), &req->acct);
    virtio_blk_free_request(req);

out:
    aio_context_release(blk_get_aio_context(s->conf.conf.blk));
}
Exemplo n.º 7
0
static void virtio_blk_rw_complete(void *opaque, int ret)
{
    VirtIOBlockReq *next = opaque;
    VirtIOBlock *s = next->dev;
    VirtIODevice *vdev = VIRTIO_DEVICE(s);

    aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
    while (next) {
        VirtIOBlockReq *req = next;
        next = req->mr_next;
        trace_virtio_blk_rw_complete(vdev, req, ret);

        if (req->qiov.nalloc != -1) {
            /* If nalloc is != 1 req->qiov is a local copy of the original
             * external iovec. It was allocated in submit_merged_requests
             * to be able to merge requests. */
            qemu_iovec_destroy(&req->qiov);
        }

        if (ret) {
            int p = virtio_ldl_p(VIRTIO_DEVICE(req->dev), &req->out.type);
            bool is_read = !(p & VIRTIO_BLK_T_OUT);
            /* Note that memory may be dirtied on read failure.  If the
             * virtio request is not completed here, as is the case for
             * BLOCK_ERROR_ACTION_STOP, the memory may not be copied
             * correctly during live migration.  While this is ugly,
             * it is acceptable because the device is free to write to
             * the memory until the request is completed (which will
             * happen on the other side of the migration).
             */
            if (virtio_blk_handle_rw_error(req, -ret, is_read)) {
                continue;
            }
        }

        virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
        block_acct_done(blk_get_stats(req->dev->blk), &req->acct);
        virtio_blk_free_request(req);
    }
    aio_context_release(blk_get_aio_context(s->conf.conf.blk));
}
Exemplo n.º 8
0
static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret)
{
    VirtIOBlockReq *req = opaque;
    VirtIOBlock *s = req->dev;
    bool is_write_zeroes = (virtio_ldl_p(VIRTIO_DEVICE(s), &req->out.type) &
                            ~VIRTIO_BLK_T_BARRIER) == VIRTIO_BLK_T_WRITE_ZEROES;

    aio_context_acquire(blk_get_aio_context(s->conf.conf.blk));
    if (ret) {
        if (virtio_blk_handle_rw_error(req, -ret, false, is_write_zeroes)) {
            goto out;
        }
    }

    virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
    if (is_write_zeroes) {
        block_acct_done(blk_get_stats(s->blk), &req->acct);
    }
    virtio_blk_free_request(req);

out:
    aio_context_release(blk_get_aio_context(s->conf.conf.blk));
}
static void release_drive(Object *obj, const char *name, void *opaque)
{
    DeviceState *dev = DEVICE(obj);
    Property *prop = opaque;
    BlockBackend **ptr = qdev_get_prop_ptr(dev, prop);

    if (*ptr) {
        AioContext *ctx = blk_get_aio_context(*ptr);

        aio_context_acquire(ctx);
        blockdev_auto_del(*ptr);
        blk_detach_dev(*ptr, dev);
        aio_context_release(ctx);
    }
}
Exemplo n.º 10
0
static void dma_blk_cb(void *opaque, int ret)
{
    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
    dma_addr_t cur_addr, cur_len;
    void *mem;

    trace_dma_blk_cb(dbs, ret);

    dbs->acb = NULL;
    dbs->sector_num += dbs->iov.size / 512;

    if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
        dma_complete(dbs, ret);
        return;
    }
    dma_blk_unmap(dbs);

    while (dbs->sg_cur_index < dbs->sg->nsg) {
        cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
        cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
        mem = dma_memory_map(dbs->sg->as, cur_addr, &cur_len, dbs->dir);
        if (!mem)
            break;
        qemu_iovec_add(&dbs->iov, mem, cur_len);
        dbs->sg_cur_byte += cur_len;
        if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
            dbs->sg_cur_byte = 0;
            ++dbs->sg_cur_index;
        }
    }

    if (dbs->iov.size == 0) {
        trace_dma_map_wait(dbs);
        dbs->bh = aio_bh_new(blk_get_aio_context(dbs->blk),
                             reschedule_dma, dbs);
        cpu_register_map_client(dbs->bh);
        return;
    }

    if (dbs->iov.size & ~BDRV_SECTOR_MASK) {
        qemu_iovec_discard_back(&dbs->iov, dbs->iov.size & ~BDRV_SECTOR_MASK);
    }

    dbs->acb = dbs->io_func(dbs->blk, dbs->sector_num, &dbs->iov,
                            dbs->iov.size / 512, dma_blk_cb, dbs);
    assert(dbs->acb);
}
Exemplo n.º 11
0
BlockStatsList *qmp_query_blockstats(bool has_query_nodes,
                                     bool query_nodes,
                                     Error **errp)
{
    BlockStatsList *head = NULL, **p_next = &head;
    BlockBackend *blk;
    BlockDriverState *bs;

    /* Just to be safe if query_nodes is not always initialized */
    if (has_query_nodes && query_nodes) {
        for (bs = bdrv_next_node(NULL); bs; bs = bdrv_next_node(bs)) {
            BlockStatsList *info = g_malloc0(sizeof(*info));
            AioContext *ctx = bdrv_get_aio_context(bs);

            aio_context_acquire(ctx);
            info->value = bdrv_query_bds_stats(bs, false);
            aio_context_release(ctx);

            *p_next = info;
            p_next = &info->next;
        }
    } else {
        for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
            BlockStatsList *info = g_malloc0(sizeof(*info));
            AioContext *ctx = blk_get_aio_context(blk);
            BlockStats *s;

            aio_context_acquire(ctx);
            s = bdrv_query_bds_stats(blk_bs(blk), true);
            s->has_device = true;
            s->device = g_strdup(blk_name(blk));
            bdrv_query_blk_stats(s->stats, blk);
            aio_context_release(ctx);

            info->value = s;
            *p_next = info;
            p_next = &info->next;
        }
    }

    return head;
}
Exemplo n.º 12
0
static void scsi_read_complete(void * opaque, int ret)
{
    SCSIGenericReq *r = (SCSIGenericReq *)opaque;
    SCSIDevice *s = r->req.dev;
    int len;

    assert(r->req.aiocb != NULL);
    r->req.aiocb = NULL;

    aio_context_acquire(blk_get_aio_context(s->conf.blk));

    if (ret || r->req.io_canceled) {
        scsi_command_complete_noio(r, ret);
        goto done;
    }

    len = r->io_header.dxfer_len - r->io_header.resid;
    DPRINTF("Data ready tag=0x%x len=%d\n", r->req.tag, len);

    r->len = -1;
    if (len == 0) {
        scsi_command_complete_noio(r, 0);
        goto done;
    }

    /* Snoop READ CAPACITY output to set the blocksize.  */
    if (r->req.cmd.buf[0] == READ_CAPACITY_10 &&
        (ldl_be_p(&r->buf[0]) != 0xffffffffU || s->max_lba == 0)) {
        s->blocksize = ldl_be_p(&r->buf[4]);
        s->max_lba = ldl_be_p(&r->buf[0]) & 0xffffffffULL;
    } else if (r->req.cmd.buf[0] == SERVICE_ACTION_IN_16 &&
               (r->req.cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
        s->blocksize = ldl_be_p(&r->buf[8]);
        s->max_lba = ldq_be_p(&r->buf[0]);
    }
    blk_set_guest_block_size(s->conf.blk, s->blocksize);

    /* Patch MODE SENSE device specific parameters if the BDS is opened
     * readonly.
     */
    if ((s->type == TYPE_DISK || s->type == TYPE_TAPE) &&
        blk_is_read_only(s->conf.blk) &&
        (r->req.cmd.buf[0] == MODE_SENSE ||
         r->req.cmd.buf[0] == MODE_SENSE_10) &&
        (r->req.cmd.buf[1] & 0x8) == 0) {
        if (r->req.cmd.buf[0] == MODE_SENSE) {
            r->buf[2] |= 0x80;
        } else  {
            r->buf[3] |= 0x80;
        }
    }
    if (s->type == TYPE_DISK &&
        r->req.cmd.buf[0] == INQUIRY &&
        r->req.cmd.buf[2] == 0xb0) {
        uint32_t max_transfer =
            blk_get_max_transfer(s->conf.blk) / s->blocksize;

        assert(max_transfer);
        stl_be_p(&r->buf[8], max_transfer);
        /* Also take care of the opt xfer len. */
        stl_be_p(&r->buf[12],
                 MIN_NON_ZERO(max_transfer, ldl_be_p(&r->buf[12])));
    }
    scsi_req_data(&r->req, len);
    scsi_req_unref(&r->req);

done:
    aio_context_release(blk_get_aio_context(s->conf.blk));
}