Exemple #1
0
/* This is where we get a request from a caller to read something */
static BlockDriverAIOCB *tar_aio_readv(BlockDriverState *bs,
        int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
        BlockDriverCompletionFunc *cb, void *opaque)
{
    BDRVTarState *s = bs->opaque;
    SparseCache *sparse;
    int64_t sec_file = sector_num + s->file_sec;
    int64_t start = sector_num * SECTOR_SIZE;
    int64_t end = start + (nb_sectors * SECTOR_SIZE);
    int i;
    TarAIOCB *acb;

    for (i = 0; i < s->sparse_num; i++) {
        sparse = &s->sparse[i];
        if (sparse->start > end) {
            /* We expect the cache to be start increasing */
            break;
        } else if ((sparse->start < start) && (sparse->end <= start)) {
            /* sparse before our offset */
            sec_file -= (sparse->end - sparse->start) / SECTOR_SIZE;
        } else if ((sparse->start <= start) && (sparse->end >= end)) {
            /* all our sectors are sparse */
            char *buf = g_malloc0(nb_sectors * SECTOR_SIZE);

            acb = qemu_aio_get(&tar_aiocb_info, bs, cb, opaque);
            qemu_iovec_from_buf(qiov, 0, buf, nb_sectors * SECTOR_SIZE);
            g_free(buf);
            acb->bh = qemu_bh_new(tar_sparse_cb, acb);
            qemu_bh_schedule(acb->bh);

            return &acb->common;
        } else if (((sparse->start >= start) && (sparse->start < end)) ||
                   ((sparse->end >= start) && (sparse->end < end))) {
            /* we're semi-sparse (worst case) */
            /* let's go synchronous and read all sectors individually */
            char *buf = g_malloc(nb_sectors * SECTOR_SIZE);
            uint64_t offs;

            for (offs = 0; offs < (nb_sectors * SECTOR_SIZE);
                 offs += SECTOR_SIZE) {
                bdrv_pread(bs, (sector_num * SECTOR_SIZE) + offs,
                           buf + offs, SECTOR_SIZE);
            }

            qemu_iovec_from_buf(qiov, 0, buf, nb_sectors * SECTOR_SIZE);
            acb = qemu_aio_get(&tar_aiocb_info, bs, cb, opaque);
            acb->bh = qemu_bh_new(tar_sparse_cb, acb);
            qemu_bh_schedule(acb->bh);

            return &acb->common;
        }
    }

    return bdrv_aio_readv(s->hd, sec_file, qiov, nb_sectors,
                          cb, opaque);
}
Exemple #2
0
static int coroutine_fn
cloop_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
                QEMUIOVector *qiov, int flags)
{
    BDRVCloopState *s = bs->opaque;
    uint64_t sector_num = offset >> BDRV_SECTOR_BITS;
    int nb_sectors = bytes >> BDRV_SECTOR_BITS;
    int ret, i;

    assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
    assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);

    qemu_co_mutex_lock(&s->lock);

    for (i = 0; i < nb_sectors; i++) {
        void *data;
        uint32_t sector_offset_in_block =
            ((sector_num + i) % s->sectors_per_block),
            block_num = (sector_num + i) / s->sectors_per_block;
        if (cloop_read_block(bs, block_num) != 0) {
            ret = -EIO;
            goto fail;
        }

        data = s->uncompressed_block + sector_offset_in_block * 512;
        qemu_iovec_from_buf(qiov, i * 512, data, 512);
    }

    ret = 0;
fail:
    qemu_co_mutex_unlock(&s->lock);

    return ret;
}
Exemple #3
0
static size_t curl_read_cb(void *ptr, size_t size, size_t nmemb, void *opaque)
{
    CURLState *s = ((CURLState*)opaque);
    size_t realsize = size * nmemb;
    int i;

    DPRINTF("CURL: Just reading %zd bytes\n", realsize);

    if (!s || !s->orig_buf)
        goto read_end;

    memcpy(s->orig_buf + s->buf_off, ptr, realsize);
    s->buf_off += realsize;

    for(i=0; i<CURL_NUM_ACB; i++) {
        CURLAIOCB *acb = s->acb[i];

        if (!acb)
            continue;

        if ((s->buf_off >= acb->end)) {
            qemu_iovec_from_buf(acb->qiov, 0, s->orig_buf + acb->start,
                                acb->end - acb->start);
            acb->common.cb(acb->common.opaque, 0);
            qemu_aio_release(acb);
            s->acb[i] = NULL;
        }
    }

read_end:
    return realsize;
}
Exemple #4
0
static void complete_request(struct iocb *iocb, ssize_t ret, void *opaque)
{
    VirtIOBlockDataPlane *s = opaque;
    VirtIOBlockRequest *req = container_of(iocb, VirtIOBlockRequest, iocb);
    struct virtio_blk_inhdr hdr;
    int len;

    if (likely(ret >= 0)) {
        hdr.status = VIRTIO_BLK_S_OK;
        len = ret;
    } else {
        hdr.status = VIRTIO_BLK_S_IOERR;
        len = 0;
    }

    trace_virtio_blk_data_plane_complete_request(s, req->head, ret);

    if (req->read_qiov) {
        assert(req->bounce_iov);
        qemu_iovec_from_buf(req->read_qiov, 0, req->bounce_iov->iov_base, len);
        qemu_iovec_destroy(req->read_qiov);
        g_slice_free(QEMUIOVector, req->read_qiov);
    }

    if (req->bounce_iov) {
        qemu_vfree(req->bounce_iov->iov_base);
        g_slice_free(struct iovec, req->bounce_iov);
    }
Exemple #5
0
static int curl_find_buf(BDRVCURLState *s, size_t start, size_t len,
                         CURLAIOCB *acb)
{
    int i;
    size_t end = start + len;

    for (i=0; i<CURL_NUM_STATES; i++) {
        CURLState *state = &s->states[i];
        size_t buf_end = (state->buf_start + state->buf_off);
        size_t buf_fend = (state->buf_start + state->buf_len);

        if (!state->orig_buf)
            continue;
        if (!state->buf_off)
            continue;

        // Does the existing buffer cover our section?
        if ((start >= state->buf_start) &&
            (start <= buf_end) &&
            (end >= state->buf_start) &&
            (end <= buf_end))
        {
            char *buf = state->orig_buf + (start - state->buf_start);

            qemu_iovec_from_buf(acb->qiov, 0, buf, len);
            acb->common.cb(acb->common.opaque, 0);

            return FIND_RET_OK;
        }

        // Wait for unfinished chunks
        if (state->in_use &&
            (start >= state->buf_start) &&
            (start <= buf_fend) &&
            (end >= state->buf_start) &&
            (end <= buf_fend))
        {
            int j;

            acb->start = start - state->buf_start;
            acb->end = acb->start + len;

            for (j=0; j<CURL_NUM_ACB; j++) {
                if (!state->acb[j]) {
                    state->acb[j] = acb;
                    return FIND_RET_WAIT;
                }
            }
        }
    }

    return FIND_RET_NONE;
}
Exemple #6
0
static void rbd_aio_bh_cb(void *opaque)
{
    RBDAIOCB *acb = opaque;

    if (acb->cmd == RBD_AIO_READ) {
        qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
    }
    qemu_vfree(acb->bounce);
    acb->common.cb(acb->common.opaque, (acb->ret > 0 ? 0 : acb->ret));
    qemu_bh_delete(acb->bh);
    acb->bh = NULL;

    qemu_aio_release(acb);
}
Exemple #7
0
/*
 * This aio completion is being called from rbd_finish_bh() and runs in qemu
 * BH context.
 */
static void qemu_rbd_complete_aio(RADOSCB *rcb)
{
    RBDAIOCB *acb = rcb->acb;
    int64_t r;

    r = rcb->ret;

    if (acb->cmd != RBD_AIO_READ) {
        if (r < 0) {
            acb->ret = r;
            acb->error = 1;
        } else if (!acb->error) {
            acb->ret = rcb->size;
        }
    } else {
        if (r < 0) {
            qemu_rbd_memset(rcb, 0);
            acb->ret = r;
            acb->error = 1;
        } else if (r < rcb->size) {
            qemu_rbd_memset(rcb, r);
            if (!acb->error) {
                acb->ret = rcb->size;
            }
        } else if (!acb->error) {
            acb->ret = r;
        }
    }

    g_free(rcb);

    if (!LIBRBD_USE_IOVEC) {
        if (acb->cmd == RBD_AIO_READ) {
            qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
        }
        qemu_vfree(acb->bounce);
    }

    acb->common.cb(acb->common.opaque, (acb->ret > 0 ? 0 : acb->ret));

    qemu_aio_unref(acb);
}
Exemple #8
0
/*
 * This aio completion is being called from rbd_finish_bh() and runs in qemu
 * BH context.
 */
static void qemu_rbd_complete_aio(RADOSCB *rcb)
{
    RBDAIOCB *acb = rcb->acb;
    int64_t r;

    r = rcb->ret;

    if (acb->cmd != RBD_AIO_READ) {
        if (r < 0) {
            acb->ret = r;
            acb->error = 1;
        } else if (!acb->error) {
            acb->ret = rcb->size;
        }
    } else {
        if (r < 0) {
            memset(rcb->buf, 0, rcb->size);
            acb->ret = r;
            acb->error = 1;
        } else if (r < rcb->size) {
            memset(rcb->buf + r, 0, rcb->size - r);
            if (!acb->error) {
                acb->ret = rcb->size;
            }
        } else if (!acb->error) {
            acb->ret = r;
        }
    }

    g_free(rcb);

    if (acb->cmd == RBD_AIO_READ) {
        qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
    }
    qemu_vfree(acb->bounce);
    acb->common.cb(acb->common.opaque, (acb->ret > 0 ? 0 : acb->ret));
    acb->status = 0;

    if (!acb->cancelled) {
        qemu_aio_release(acb);
    }
}
Exemple #9
0
static int coroutine_fn
dmg_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
              QEMUIOVector *qiov, int flags)
{
    BDRVDMGState *s = bs->opaque;
    uint64_t sector_num = offset >> BDRV_SECTOR_BITS;
    int nb_sectors = bytes >> BDRV_SECTOR_BITS;
    int ret, i;

    assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
    assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);

    qemu_co_mutex_lock(&s->lock);

    for (i = 0; i < nb_sectors; i++) {
        uint32_t sector_offset_in_chunk;
        void *data;

        if (dmg_read_chunk(bs, sector_num + i) != 0) {
            ret = -EIO;
            goto fail;
        }
        /* Special case: current chunk is all zeroes. Do not perform a memcpy as
         * s->uncompressed_chunk may be too small to cover the large all-zeroes
         * section. dmg_read_chunk is called to find s->current_chunk */
        if (s->types[s->current_chunk] == 2) { /* all zeroes block entry */
            qemu_iovec_memset(qiov, i * 512, 0, 512);
            continue;
        }
        sector_offset_in_chunk = sector_num + i - s->sectors[s->current_chunk];
        data = s->uncompressed_chunk + sector_offset_in_chunk * 512;
        qemu_iovec_from_buf(qiov, i * 512, data, 512);
    }

    ret = 0;
fail:
    qemu_co_mutex_unlock(&s->lock);
    return ret;
}
Exemple #10
0
static void xseg_request_handler(void *state)
{
    BDRVArchipelagoState *s = (BDRVArchipelagoState *) state;
    void *psd = xseg_get_signal_desc(s->xseg, s->port);
    qemu_mutex_lock(&s->request_mutex);

    while (!s->stopping) {
        struct xseg_request *req;
        void *data;
        xseg_prepare_wait(s->xseg, s->srcport);
        req = xseg_receive(s->xseg, s->srcport, X_NONBLOCK);
        if (req) {
            AIORequestData *reqdata;
            ArchipelagoSegmentedRequest *segreq;
            xseg_get_req_data(s->xseg, req, (void **)&reqdata);

            switch (reqdata->op) {
            case ARCHIP_OP_READ:
                data = xseg_get_data(s->xseg, req);
                segreq = reqdata->segreq;
                segreq->count += req->serviced;

                qemu_iovec_from_buf(reqdata->aio_cb->qiov, reqdata->bufidx,
                                    data,
                                    req->serviced);

                xseg_put_request(s->xseg, req, s->srcport);

                if ((__sync_add_and_fetch(&segreq->ref, -1)) == 0) {
                    if (!segreq->failed) {
                        reqdata->aio_cb->ret = segreq->count;
                        archipelago_finish_aiocb(reqdata);
                        g_free(segreq);
                    } else {
                        g_free(segreq);
                        g_free(reqdata);
                    }
                } else {
                    g_free(reqdata);
                }
                break;
            case ARCHIP_OP_WRITE:
            case ARCHIP_OP_FLUSH:
                segreq = reqdata->segreq;
                segreq->count += req->serviced;
                xseg_put_request(s->xseg, req, s->srcport);

                if ((__sync_add_and_fetch(&segreq->ref, -1)) == 0) {
                    if (!segreq->failed) {
                        reqdata->aio_cb->ret = segreq->count;
                        archipelago_finish_aiocb(reqdata);
                        g_free(segreq);
                    } else {
                        g_free(segreq);
                        g_free(reqdata);
                    }
                } else {
                    g_free(reqdata);
                }
                break;
            case ARCHIP_OP_VOLINFO:
                s->is_signaled = true;
                qemu_cond_signal(&s->archip_cond);
                break;
            }
        } else {
            xseg_wait_signal(s->xseg, psd, 100000UL);
        }
        xseg_cancel_wait(s->xseg, s->srcport);
    }

    s->th_is_signaled = true;
    qemu_cond_signal(&s->request_cond);
    qemu_mutex_unlock(&s->request_mutex);
    qemu_thread_exit(NULL);
}
Exemple #11
0
static coroutine_fn int
block_crypto_co_readv(BlockDriverState *bs, int64_t sector_num,
                      int remaining_sectors, QEMUIOVector *qiov)
{
    BlockCrypto *crypto = bs->opaque;
    int cur_nr_sectors; /* number of sectors in current iteration */
    uint64_t bytes_done = 0;
    uint8_t *cipher_data = NULL;
    QEMUIOVector hd_qiov;
    int ret = 0;
    size_t payload_offset =
        qcrypto_block_get_payload_offset(crypto->block) / 512;

    qemu_iovec_init(&hd_qiov, qiov->niov);

    /* Bounce buffer so we have a linear mem region for
     * entire sector. XXX optimize so we avoid bounce
     * buffer in case that qiov->niov == 1
     */
    cipher_data =
        qemu_try_blockalign(bs->file->bs, MIN(BLOCK_CRYPTO_MAX_SECTORS * 512,
                                              qiov->size));
    if (cipher_data == NULL) {
        ret = -ENOMEM;
        goto cleanup;
    }

    while (remaining_sectors) {
        cur_nr_sectors = remaining_sectors;

        if (cur_nr_sectors > BLOCK_CRYPTO_MAX_SECTORS) {
            cur_nr_sectors = BLOCK_CRYPTO_MAX_SECTORS;
        }

        qemu_iovec_reset(&hd_qiov);
        qemu_iovec_add(&hd_qiov, cipher_data, cur_nr_sectors * 512);

        ret = bdrv_co_readv(bs->file,
                            payload_offset + sector_num,
                            cur_nr_sectors, &hd_qiov);
        if (ret < 0) {
            goto cleanup;
        }

        if (qcrypto_block_decrypt(crypto->block,
                                  sector_num,
                                  cipher_data, cur_nr_sectors * 512,
                                  NULL) < 0) {
            ret = -EIO;
            goto cleanup;
        }

        qemu_iovec_from_buf(qiov, bytes_done,
                            cipher_data, cur_nr_sectors * 512);

        remaining_sectors -= cur_nr_sectors;
        sector_num += cur_nr_sectors;
        bytes_done += cur_nr_sectors * 512;
    }

 cleanup:
    qemu_iovec_destroy(&hd_qiov);
    qemu_vfree(cipher_data);

    return ret;
}