Beispiel #1
0
static int
zvol_read(struct bio *bio)
{
	zvol_state_t *zv = bio->bi_bdev->bd_disk->private_data;
	uint64_t offset = BIO_BI_SECTOR(bio) << 9;
	uint64_t size = BIO_BI_SIZE(bio);
	int error;
	rl_t *rl;
	uio_t uio;

	if (size == 0)
		return (0);

	uio.uio_bvec = &bio->bi_io_vec[BIO_BI_IDX(bio)];
	uio.uio_skip = BIO_BI_SKIP(bio);
	uio.uio_resid = size;
	uio.uio_iovcnt = bio->bi_vcnt - BIO_BI_IDX(bio);
	uio.uio_loffset = offset;
	uio.uio_limit = MAXOFFSET_T;
	uio.uio_segflg = UIO_BVEC;

	rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);

	error = dmu_read_uio(zv->zv_objset, ZVOL_OBJ, &uio, size);

	zfs_range_unlock(rl);

	/* convert checksum errors into IO errors */
	if (error == ECKSUM)
		error = SET_ERROR(EIO);

	return (error);
}
Beispiel #2
0
Datei: zvol.c Projekt: l1k/zfs
static int
zvol_read(struct bio *bio)
{
	zvol_state_t *zv = bio->bi_bdev->bd_disk->private_data;
	uint64_t offset = BIO_BI_SECTOR(bio) << 9;
	uint64_t len = BIO_BI_SIZE(bio);
	int error;
	rl_t *rl;

	if (len == 0)
		return (0);


	rl = zfs_range_lock(&zv->zv_znode, offset, len, RL_READER);

	error = dmu_read_bio(zv->zv_objset, ZVOL_OBJ, bio);

	zfs_range_unlock(rl);

	/* convert checksum errors into IO errors */
	if (error == ECKSUM)
		error = SET_ERROR(EIO);

	return (error);
}
Beispiel #3
0
static int
zvol_write(struct bio *bio)
{
	zvol_state_t *zv = bio->bi_bdev->bd_disk->private_data;
	uint64_t offset = BIO_BI_SECTOR(bio) << 9;
	uint64_t size = BIO_BI_SIZE(bio);
	int error = 0;
	dmu_tx_t *tx;
	rl_t *rl;
	uio_t uio;

	if (bio->bi_rw & VDEV_REQ_FLUSH)
		zil_commit(zv->zv_zilog, ZVOL_OBJ);

	/*
	 * Some requests are just for flush and nothing else.
	 */
	if (size == 0)
		goto out;

	uio.uio_bvec = &bio->bi_io_vec[BIO_BI_IDX(bio)];
	uio.uio_skip = BIO_BI_SKIP(bio);
	uio.uio_resid = size;
	uio.uio_iovcnt = bio->bi_vcnt - BIO_BI_IDX(bio);
	uio.uio_loffset = offset;
	uio.uio_limit = MAXOFFSET_T;
	uio.uio_segflg = UIO_BVEC;

	rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER);

	tx = dmu_tx_create(zv->zv_objset);
	dmu_tx_hold_write(tx, ZVOL_OBJ, offset, size);

	/* This will only fail for ENOSPC */
	error = dmu_tx_assign(tx, TXG_WAIT);
	if (error) {
		dmu_tx_abort(tx);
		zfs_range_unlock(rl);
		goto out;
	}

	error = dmu_write_uio(zv->zv_objset, ZVOL_OBJ, &uio, size, tx);
	if (error == 0)
		zvol_log_write(zv, tx, offset, size,
		    !!(bio->bi_rw & VDEV_REQ_FUA));

	dmu_tx_commit(tx);
	zfs_range_unlock(rl);

	if ((bio->bi_rw & VDEV_REQ_FUA) ||
	    zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)
		zil_commit(zv->zv_zilog, ZVOL_OBJ);

out:
	return (error);
}
Beispiel #4
0
static int
zvol_discard(struct bio *bio)
{
	zvol_state_t *zv = bio->bi_bdev->bd_disk->private_data;
	uint64_t start = BIO_BI_SECTOR(bio) << 9;
	uint64_t size = BIO_BI_SIZE(bio);
	uint64_t end = start + size;
	int error;
	rl_t *rl;
	dmu_tx_t *tx;

	ASSERT(zv && zv->zv_open_count > 0);

	if (end > zv->zv_volsize)
		return (SET_ERROR(EIO));

	/*
	 * Align the request to volume block boundaries when REQ_SECURE is
	 * available, but not requested. If we don't, then this will force
	 * dnode_free_range() to zero out the unaligned parts, which is slow
	 * (read-modify-write) and useless since we are not freeing any space
	 * by doing so. Kernels that do not support REQ_SECURE (2.6.32 through
	 * 2.6.35) will not receive this optimization.
	 */
#ifdef REQ_SECURE
	if (!(bio->bi_rw & REQ_SECURE)) {
		start = P2ROUNDUP(start, zv->zv_volblocksize);
		end = P2ALIGN(end, zv->zv_volblocksize);
		size = end - start;
	}
#endif

	if (start >= end)
		return (0);

	rl = zfs_range_lock(&zv->zv_znode, start, size, RL_WRITER);
	tx = dmu_tx_create(zv->zv_objset);
	dmu_tx_mark_netfree(tx);
	error = dmu_tx_assign(tx, TXG_WAIT);
	if (error != 0) {
		dmu_tx_abort(tx);
	} else {
		zvol_log_truncate(zv, tx, start, size, B_TRUE);
		dmu_tx_commit(tx);
		error = dmu_free_long_range(zv->zv_objset,
		    ZVOL_OBJ, start, size);
	}

	zfs_range_unlock(rl);

	return (error);
}
Beispiel #5
0
Datei: zvol.c Projekt: alek-p/zfs
static int
zvol_discard(struct bio *bio)
{
	zvol_state_t *zv = bio->bi_bdev->bd_disk->private_data;
	uint64_t start = BIO_BI_SECTOR(bio) << 9;
	uint64_t size = BIO_BI_SIZE(bio);
	uint64_t end = start + size;
	int error;
	rl_t *rl;
	dmu_tx_t *tx;

	ASSERT(zv && zv->zv_open_count > 0);

	if (end > zv->zv_volsize)
		return (SET_ERROR(EIO));

	/*
	 * Align the request to volume block boundaries when a secure erase is
	 * not required.  This will prevent dnode_free_range() from zeroing out
	 * the unaligned parts which is slow (read-modify-write) and useless
	 * since we are not freeing any space by doing so.
	 */
	if (!bio_is_secure_erase(bio)) {
		start = P2ROUNDUP(start, zv->zv_volblocksize);
		end = P2ALIGN(end, zv->zv_volblocksize);
		size = end - start;
	}

	if (start >= end)
		return (0);

	rl = zfs_range_lock(&zv->zv_range_lock, start, size, RL_WRITER);
	tx = dmu_tx_create(zv->zv_objset);
	dmu_tx_mark_netfree(tx);
	error = dmu_tx_assign(tx, TXG_WAIT);
	if (error != 0) {
		dmu_tx_abort(tx);
	} else {
		zvol_log_truncate(zv, tx, start, size, B_TRUE);
		dmu_tx_commit(tx);
		error = dmu_free_long_range(zv->zv_objset,
		    ZVOL_OBJ, start, size);
	}

	zfs_range_unlock(rl);

	return (error);
}
Beispiel #6
0
static int
zvol_discard(struct bio *bio)
{
	zvol_state_t *zv = bio->bi_bdev->bd_disk->private_data;
	uint64_t start = BIO_BI_SECTOR(bio) << 9;
	uint64_t size = BIO_BI_SIZE(bio);
	uint64_t end = start + size;
	int error;
	rl_t *rl;

	if (end > zv->zv_volsize)
		return (SET_ERROR(EIO));

	/*
	 * Align the request to volume block boundaries when REQ_SECURE is
	 * available, but not requested. If we don't, then this will force
	 * dnode_free_range() to zero out the unaligned parts, which is slow
	 * (read-modify-write) and useless since we are not freeing any space
	 * by doing so. Kernels that do not support REQ_SECURE (2.6.32 through
	 * 2.6.35) will not receive this optimization.
	 */
#ifdef REQ_SECURE
	if (!(bio->bi_rw & REQ_SECURE)) {
		start = P2ROUNDUP(start, zv->zv_volblocksize);
		end = P2ALIGN(end, zv->zv_volblocksize);
		size = end - start;
	}
#endif

	if (start >= end)
		return (0);

	rl = zfs_range_lock(&zv->zv_znode, start, size, RL_WRITER);

	error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, start, size);

	/*
	 * TODO: maybe we should add the operation to the log.
	 */

	zfs_range_unlock(rl);

	return (error);
}
Beispiel #7
0
Datei: zvol.c Projekt: alek-p/zfs
static MAKE_REQUEST_FN_RET
zvol_request(struct request_queue *q, struct bio *bio)
{
	uio_t uio;
	zvol_state_t *zv = q->queuedata;
	fstrans_cookie_t cookie = spl_fstrans_mark();
	int rw = bio_data_dir(bio);
#ifdef HAVE_GENERIC_IO_ACCT
	unsigned long start = jiffies;
#endif
	int error = 0;

	uio.uio_bvec = &bio->bi_io_vec[BIO_BI_IDX(bio)];
	uio.uio_skip = BIO_BI_SKIP(bio);
	uio.uio_resid = BIO_BI_SIZE(bio);
	uio.uio_iovcnt = bio->bi_vcnt - BIO_BI_IDX(bio);
	uio.uio_loffset = BIO_BI_SECTOR(bio) << 9;
	uio.uio_limit = MAXOFFSET_T;
	uio.uio_segflg = UIO_BVEC;

	if (bio_has_data(bio) && uio.uio_loffset + uio.uio_resid >
	    zv->zv_volsize) {
		printk(KERN_INFO
		    "%s: bad access: offset=%llu, size=%lu\n",
		    zv->zv_disk->disk_name,
		    (long long unsigned)uio.uio_loffset,
		    (long unsigned)uio.uio_resid);
		error = SET_ERROR(EIO);
		goto out1;
	}

	generic_start_io_acct(rw, bio_sectors(bio), &zv->zv_disk->part0);

	if (rw == WRITE) {
		if (unlikely(zv->zv_flags & ZVOL_RDONLY)) {
			error = SET_ERROR(EROFS);
			goto out2;
		}

		if (bio_is_discard(bio) || bio_is_secure_erase(bio)) {
			error = zvol_discard(bio);
			goto out2;
		}

		/*
		 * Some requests are just for flush and nothing else.
		 */
		if (uio.uio_resid == 0) {
			if (bio_is_flush(bio))
				zil_commit(zv->zv_zilog, ZVOL_OBJ);
			goto out2;
		}

		error = zvol_write(zv, &uio,
		    bio_is_flush(bio) || bio_is_fua(bio) ||
		    zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
	} else
		error = zvol_read(zv, &uio);

out2:
	generic_end_io_acct(rw, &zv->zv_disk->part0, start);
out1:
	BIO_END_IO(bio, -error);
	spl_fstrans_unmark(cookie);
#ifdef HAVE_MAKE_REQUEST_FN_RET_INT
	return (0);
#elif defined(HAVE_MAKE_REQUEST_FN_RET_QC)
	return (BLK_QC_T_NONE);
#endif
}