Ejemplo n.º 1
0
static void virtio_blk_submit_multireq(BlockBackend *blk, MultiReqBuffer *mrb)
{
    int i = 0, start = 0, num_reqs = 0, niov = 0, nb_sectors = 0;
    uint32_t max_transfer;
    int64_t sector_num = 0;

    if (mrb->num_reqs == 1) {
        submit_requests(blk, mrb, 0, 1, -1);
        mrb->num_reqs = 0;
        return;
    }

    max_transfer = blk_get_max_transfer(mrb->reqs[0]->dev->blk);

    qsort(mrb->reqs, mrb->num_reqs, sizeof(*mrb->reqs),
          &multireq_compare);

    for (i = 0; i < mrb->num_reqs; i++) {
        VirtIOBlockReq *req = mrb->reqs[i];
        if (num_reqs > 0) {
            /*
             * NOTE: We cannot merge the requests in below situations:
             * 1. requests are not sequential
             * 2. merge would exceed maximum number of IOVs
             * 3. merge would exceed maximum transfer length of backend device
             */
            if (sector_num + nb_sectors != req->sector_num ||
                niov > blk_get_max_iov(blk) - req->qiov.niov ||
                req->qiov.size > max_transfer ||
                nb_sectors > (max_transfer -
                              req->qiov.size) / BDRV_SECTOR_SIZE) {
                submit_requests(blk, mrb, start, num_reqs, niov);
                num_reqs = 0;
            }
        }

        if (num_reqs == 0) {
            sector_num = req->sector_num;
            nb_sectors = niov = 0;
            start = i;
        }

        nb_sectors += req->qiov.size / BDRV_SECTOR_SIZE;
        niov += req->qiov.niov;
        num_reqs++;
    }

    submit_requests(blk, mrb, start, num_reqs, niov);
    mrb->num_reqs = 0;
}
Ejemplo n.º 2
0
static void scsi_read_complete(void * opaque, int ret)
{
    SCSIGenericReq *r = (SCSIGenericReq *)opaque;
    SCSIDevice *s = r->req.dev;
    int len;

    assert(r->req.aiocb != NULL);
    r->req.aiocb = NULL;

    aio_context_acquire(blk_get_aio_context(s->conf.blk));

    if (ret || r->req.io_canceled) {
        scsi_command_complete_noio(r, ret);
        goto done;
    }

    len = r->io_header.dxfer_len - r->io_header.resid;
    DPRINTF("Data ready tag=0x%x len=%d\n", r->req.tag, len);

    r->len = -1;
    if (len == 0) {
        scsi_command_complete_noio(r, 0);
        goto done;
    }

    /* Snoop READ CAPACITY output to set the blocksize.  */
    if (r->req.cmd.buf[0] == READ_CAPACITY_10 &&
        (ldl_be_p(&r->buf[0]) != 0xffffffffU || s->max_lba == 0)) {
        s->blocksize = ldl_be_p(&r->buf[4]);
        s->max_lba = ldl_be_p(&r->buf[0]) & 0xffffffffULL;
    } else if (r->req.cmd.buf[0] == SERVICE_ACTION_IN_16 &&
               (r->req.cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
        s->blocksize = ldl_be_p(&r->buf[8]);
        s->max_lba = ldq_be_p(&r->buf[0]);
    }
    blk_set_guest_block_size(s->conf.blk, s->blocksize);

    /* Patch MODE SENSE device specific parameters if the BDS is opened
     * readonly.
     */
    if ((s->type == TYPE_DISK || s->type == TYPE_TAPE) &&
        blk_is_read_only(s->conf.blk) &&
        (r->req.cmd.buf[0] == MODE_SENSE ||
         r->req.cmd.buf[0] == MODE_SENSE_10) &&
        (r->req.cmd.buf[1] & 0x8) == 0) {
        if (r->req.cmd.buf[0] == MODE_SENSE) {
            r->buf[2] |= 0x80;
        } else  {
            r->buf[3] |= 0x80;
        }
    }
    if (s->type == TYPE_DISK &&
        r->req.cmd.buf[0] == INQUIRY &&
        r->req.cmd.buf[2] == 0xb0) {
        uint32_t max_transfer =
            blk_get_max_transfer(s->conf.blk) / s->blocksize;

        assert(max_transfer);
        stl_be_p(&r->buf[8], max_transfer);
        /* Also take care of the opt xfer len. */
        stl_be_p(&r->buf[12],
                 MIN_NON_ZERO(max_transfer, ldl_be_p(&r->buf[12])));
    }
    scsi_req_data(&r->req, len);
    scsi_req_unref(&r->req);

done:
    aio_context_release(blk_get_aio_context(s->conf.blk));
}