Пример #1
0
static bool blk_split_discard(struct ioreq *ioreq, blkif_sector_t sector_number,
                              uint64_t nr_sectors)
{
    struct XenBlkDev *blkdev = ioreq->blkdev;
    int64_t byte_offset;
    int byte_chunk;
    uint64_t byte_remaining, limit;
    uint64_t sec_start = sector_number;
    uint64_t sec_count = nr_sectors;

    /* Wrap around, or overflowing byte limit? */
    if (sec_start + sec_count < sec_count ||
        sec_start + sec_count > INT64_MAX >> BDRV_SECTOR_BITS) {
        return false;
    }

    limit = BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS;
    byte_offset = sec_start << BDRV_SECTOR_BITS;
    byte_remaining = sec_count << BDRV_SECTOR_BITS;

    do {
        byte_chunk = byte_remaining > limit ? limit : byte_remaining;
        ioreq->aio_inflight++;
        blk_aio_pdiscard(blkdev->blk, byte_offset, byte_chunk,
                         qemu_aio_complete, ioreq);
        byte_remaining -= byte_chunk;
        byte_offset += byte_chunk;
    } while (byte_remaining > 0);

    return true;
}
Пример #2
0
static void mirror_do_zero_or_discard(MirrorBlockJob *s,
                                      int64_t offset,
                                      uint64_t bytes,
                                      bool is_discard)
{
    MirrorOp *op;

    /* Allocate a MirrorOp that is used as an AIO callback. The qiov is zeroed
     * so the freeing in mirror_iteration_done is nop. */
    op = g_new0(MirrorOp, 1);
    op->s = s;
    op->offset = offset;
    op->bytes = bytes;

    s->in_flight++;
    s->bytes_in_flight += bytes;
    if (is_discard) {
        blk_aio_pdiscard(s->target, offset,
                         op->bytes, mirror_write_complete, op);
    } else {
        blk_aio_pwrite_zeroes(s->target, offset,
                              op->bytes, s->unmap ? BDRV_REQ_MAY_UNMAP : 0,
                              mirror_write_complete, op);
    }
}
Пример #3
0
static bool xen_block_split_discard(XenBlockRequest *request,
                                    blkif_sector_t sector_number,
                                    uint64_t nr_sectors)
{
    XenBlockDataPlane *dataplane = request->dataplane;
    int64_t byte_offset;
    int byte_chunk;
    uint64_t byte_remaining;
    uint64_t sec_start = sector_number;
    uint64_t sec_count = nr_sectors;

    /* Wrap around, or overflowing byte limit? */
    if (sec_start + sec_count < sec_count ||
        sec_start + sec_count > INT64_MAX / XEN_BLKIF_SECTOR_SIZE) {
        return false;
    }

    byte_offset = sec_start * XEN_BLKIF_SECTOR_SIZE;
    byte_remaining = sec_count * XEN_BLKIF_SECTOR_SIZE;

    do {
        byte_chunk = byte_remaining > BDRV_REQUEST_MAX_BYTES ?
            BDRV_REQUEST_MAX_BYTES : byte_remaining;
        request->aio_inflight++;
        blk_aio_pdiscard(dataplane->blk, byte_offset, byte_chunk,
                         xen_block_complete_aio, request);
        byte_remaining -= byte_chunk;
        byte_offset += byte_chunk;
    } while (byte_remaining > 0);

    return true;
}