Example #1
0
/*
 * Free space in a file.
 *
 *	IN:	zp	- znode of file to free data in.
 *		off	- start of section to free.
 *		len	- length of section to free.
 *
 * 	RETURN:	0 on success, error code on failure
 */
static int
zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
{
	zfs_sb_t *zsb = ZTOZSB(zp);
	rl_t *rl;
	int error;

	/*
	 * Lock the range being freed.
	 */
	rl = zfs_range_lock(zp, off, len, RL_WRITER);

	/*
	 * Nothing to do if file already at desired length.
	 */
	if (off >= zp->z_size) {
		zfs_range_unlock(rl);
		return (0);
	}

	if (off + len > zp->z_size)
		len = zp->z_size - off;

	error = dmu_free_long_range(zsb->z_os, zp->z_id, off, len);

	zfs_range_unlock(rl);

	return (error);
}
Example #2
0
File: zvol.c Project: koplover/zfs
static int
zvol_write(struct bio *bio)
{
	zvol_state_t *zv = bio->bi_bdev->bd_disk->private_data;
	uint64_t offset = BIO_BI_SECTOR(bio) << 9;
	uint64_t size = BIO_BI_SIZE(bio);
	int error = 0;
	dmu_tx_t *tx;
	rl_t *rl;
	uio_t uio;

	if (bio->bi_rw & VDEV_REQ_FLUSH)
		zil_commit(zv->zv_zilog, ZVOL_OBJ);

	/*
	 * Some requests are just for flush and nothing else.
	 */
	if (size == 0)
		goto out;

	uio.uio_bvec = &bio->bi_io_vec[BIO_BI_IDX(bio)];
	uio.uio_skip = BIO_BI_SKIP(bio);
	uio.uio_resid = size;
	uio.uio_iovcnt = bio->bi_vcnt - BIO_BI_IDX(bio);
	uio.uio_loffset = offset;
	uio.uio_limit = MAXOFFSET_T;
	uio.uio_segflg = UIO_BVEC;

	rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER);

	tx = dmu_tx_create(zv->zv_objset);
	dmu_tx_hold_write(tx, ZVOL_OBJ, offset, size);

	/* This will only fail for ENOSPC */
	error = dmu_tx_assign(tx, TXG_WAIT);
	if (error) {
		dmu_tx_abort(tx);
		zfs_range_unlock(rl);
		goto out;
	}

	error = dmu_write_uio(zv->zv_objset, ZVOL_OBJ, &uio, size, tx);
	if (error == 0)
		zvol_log_write(zv, tx, offset, size,
		    !!(bio->bi_rw & VDEV_REQ_FUA));

	dmu_tx_commit(tx);
	zfs_range_unlock(rl);

	if ((bio->bi_rw & VDEV_REQ_FUA) ||
	    zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)
		zil_commit(zv->zv_zilog, ZVOL_OBJ);

out:
	return (error);
}
Example #3
0
File: zvol.c Project: avg-I/zfs
/*
 * Common write path running under the zvol taskq context.  This function
 * is responsible for copying the request structure data in to the DMU and
 * signaling the request queue with the result of the copy.
 */
static void
zvol_write(void *arg)
{
    struct request *req = (struct request *)arg;
    struct request_queue *q = req->q;
    zvol_state_t *zv = q->queuedata;
    fstrans_cookie_t cookie = spl_fstrans_mark();
    uint64_t offset = blk_rq_pos(req) << 9;
    uint64_t size = blk_rq_bytes(req);
    int error = 0;
    dmu_tx_t *tx;
    rl_t *rl;

    if (req->cmd_flags & VDEV_REQ_FLUSH)
        zil_commit(zv->zv_zilog, ZVOL_OBJ);

    /*
     * Some requests are just for flush and nothing else.
     */
    if (size == 0) {
        error = 0;
        goto out;
    }

    rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER);

    tx = dmu_tx_create(zv->zv_objset);
    dmu_tx_hold_write(tx, ZVOL_OBJ, offset, size);

    /* This will only fail for ENOSPC */
    error = dmu_tx_assign(tx, TXG_WAIT);
    if (error) {
        dmu_tx_abort(tx);
        zfs_range_unlock(rl);
        goto out;
    }

    error = dmu_write_req(zv->zv_objset, ZVOL_OBJ, req, tx);
    if (error == 0)
        zvol_log_write(zv, tx, offset, size,
                       req->cmd_flags & VDEV_REQ_FUA);

    dmu_tx_commit(tx);
    zfs_range_unlock(rl);

    if ((req->cmd_flags & VDEV_REQ_FUA) ||
            zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)
        zil_commit(zv->zv_zilog, ZVOL_OBJ);

out:
    blk_end_request(req, -error, size);
    spl_fstrans_unmark(cookie);
}
Example #4
0
File: zvol.c Project: koplover/zfs
static int
zvol_read(struct bio *bio)
{
	zvol_state_t *zv = bio->bi_bdev->bd_disk->private_data;
	uint64_t offset = BIO_BI_SECTOR(bio) << 9;
	uint64_t size = BIO_BI_SIZE(bio);
	int error;
	rl_t *rl;
	uio_t uio;

	if (size == 0)
		return (0);

	uio.uio_bvec = &bio->bi_io_vec[BIO_BI_IDX(bio)];
	uio.uio_skip = BIO_BI_SKIP(bio);
	uio.uio_resid = size;
	uio.uio_iovcnt = bio->bi_vcnt - BIO_BI_IDX(bio);
	uio.uio_loffset = offset;
	uio.uio_limit = MAXOFFSET_T;
	uio.uio_segflg = UIO_BVEC;

	rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);

	error = dmu_read_uio(zv->zv_objset, ZVOL_OBJ, &uio, size);

	zfs_range_unlock(rl);

	/* convert checksum errors into IO errors */
	if (error == ECKSUM)
		error = SET_ERROR(EIO);

	return (error);
}
Example #5
0
File: zvol.c Project: l1k/zfs
static int
zvol_read(struct bio *bio)
{
	zvol_state_t *zv = bio->bi_bdev->bd_disk->private_data;
	uint64_t offset = BIO_BI_SECTOR(bio) << 9;
	uint64_t len = BIO_BI_SIZE(bio);
	int error;
	rl_t *rl;

	if (len == 0)
		return (0);


	rl = zfs_range_lock(&zv->zv_znode, offset, len, RL_READER);

	error = dmu_read_bio(zv->zv_objset, ZVOL_OBJ, bio);

	zfs_range_unlock(rl);

	/* convert checksum errors into IO errors */
	if (error == ECKSUM)
		error = SET_ERROR(EIO);

	return (error);
}
Example #6
0
/*
 * Common read path running under the zvol taskq context.  This function
 * is responsible for copying the requested data out of the DMU and in to
 * a linux request structure.  It then must signal the request queue with
 * an error code describing the result of the copy.
 */
static void
zvol_read(void *arg)
{
	struct request *req = (struct request *)arg;
	struct request_queue *q = req->q;
	zvol_state_t *zv = q->queuedata;
	uint64_t offset = blk_rq_pos(req) << 9;
	uint64_t size = blk_rq_bytes(req);
	int error;
	rl_t *rl;

	if (size == 0) {
		blk_end_request(req, 0, size);
		return;
	}

	rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);

	error = dmu_read_req(zv->zv_objset, ZVOL_OBJ, req);

	zfs_range_unlock(rl);

	/* convert checksum errors into IO errors */
	if (error == ECKSUM)
		error = EIO;

	blk_end_request(req, -error, size);
}
Example #7
0
File: zvol.c Project: alek-p/zfs
static int
zvol_read(zvol_state_t *zv, uio_t *uio)
{
	uint64_t volsize = zv->zv_volsize;
	rl_t *rl;
	int error = 0;

	ASSERT(zv && zv->zv_open_count > 0);

	rl = zfs_range_lock(&zv->zv_range_lock, uio->uio_loffset,
	    uio->uio_resid, RL_READER);
	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);

		/* don't read past the end */
		if (bytes > volsize - uio->uio_loffset)
			bytes = volsize - uio->uio_loffset;

		error = dmu_read_uio_dbuf(zv->zv_dbuf, uio, bytes);
		if (error) {
			/* convert checksum errors into IO errors */
			if (error == ECKSUM)
				error = SET_ERROR(EIO);
			break;
		}
	}
	zfs_range_unlock(rl);
	return (error);
}
Example #8
0
File: zvol.c Project: avg-I/zfs
/*
 * Common read path running under the zvol taskq context.  This function
 * is responsible for copying the requested data out of the DMU and in to
 * a linux request structure.  It then must signal the request queue with
 * an error code describing the result of the copy.
 */
static void
zvol_read(void *arg)
{
    struct request *req = (struct request *)arg;
    struct request_queue *q = req->q;
    zvol_state_t *zv = q->queuedata;
    fstrans_cookie_t cookie = spl_fstrans_mark();
    uint64_t offset = blk_rq_pos(req) << 9;
    uint64_t size = blk_rq_bytes(req);
    int error;
    rl_t *rl;

    if (size == 0) {
        error = 0;
        goto out;
    }

    rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);

    error = dmu_read_req(zv->zv_objset, ZVOL_OBJ, req);

    zfs_range_unlock(rl);

    /* convert checksum errors into IO errors */
    if (error == ECKSUM)
        error = SET_ERROR(EIO);

out:
    blk_end_request(req, -error, size);
    spl_fstrans_unmark(cookie);
}
Example #9
0
File: zvol.c Project: nordaux/zfs
static void
zvol_discard(void *arg)
{
    struct request *req = (struct request *)arg;
    struct request_queue *q = req->q;
    zvol_state_t *zv = q->queuedata;
    uint64_t offset = blk_rq_pos(req) << 9;
    uint64_t size = blk_rq_bytes(req);
    int error;
    rl_t *rl;

    if (offset + size > zv->zv_volsize) {
        blk_end_request(req, -EIO, size);
        return;
    }

    if (size == 0) {
        blk_end_request(req, 0, size);
        return;
    }

    rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER);

    error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, size);

    /*
     * TODO: maybe we should add the operation to the log.
     */

    zfs_range_unlock(rl);

    blk_end_request(req, -error, size);
}
Example #10
0
static void
zvol_discard(void *arg)
{
	struct request *req = (struct request *)arg;
	struct request_queue *q = req->q;
	zvol_state_t *zv = q->queuedata;
	uint64_t start = blk_rq_pos(req) << 9;
	uint64_t end = start + blk_rq_bytes(req);
	int error;
	rl_t *rl;

	/*
	 * Annotate this call path with a flag that indicates that it is
	 * unsafe to use KM_SLEEP during memory allocations due to the
	 * potential for a deadlock.  KM_PUSHPAGE should be used instead.
	 */
	ASSERT(!(current->flags & PF_NOFS));
	current->flags |= PF_NOFS;

	if (end > zv->zv_volsize) {
		blk_end_request(req, -EIO, blk_rq_bytes(req));
		goto out;
	}

	/*
	 * Align the request to volume block boundaries. If we don't,
	 * then this will force dnode_free_range() to zero out the
	 * unaligned parts, which is slow (read-modify-write) and
	 * useless since we are not freeing any space by doing so.
	 */
	start = P2ROUNDUP(start, zv->zv_volblocksize);
	end = P2ALIGN(end, zv->zv_volblocksize);

	if (start >= end) {
		blk_end_request(req, 0, blk_rq_bytes(req));
		goto out;
	}

	rl = zfs_range_lock(&zv->zv_znode, start, end - start, RL_WRITER);

	error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, start, end - start);

	/*
	 * TODO: maybe we should add the operation to the log.
	 */

	zfs_range_unlock(rl);

	blk_end_request(req, -error, blk_rq_bytes(req));
out:
	current->flags &= ~PF_NOFS;
}
Example #11
0
File: zvol.c Project: Oliverlyn/zfs
static int
zvol_discard(struct bio *bio)
{
	zvol_state_t *zv = bio->bi_bdev->bd_disk->private_data;
	uint64_t start = BIO_BI_SECTOR(bio) << 9;
	uint64_t size = BIO_BI_SIZE(bio);
	uint64_t end = start + size;
	int error;
	rl_t *rl;
	dmu_tx_t *tx;

	ASSERT(zv && zv->zv_open_count > 0);

	if (end > zv->zv_volsize)
		return (SET_ERROR(EIO));

	/*
	 * Align the request to volume block boundaries when REQ_SECURE is
	 * available, but not requested. If we don't, then this will force
	 * dnode_free_range() to zero out the unaligned parts, which is slow
	 * (read-modify-write) and useless since we are not freeing any space
	 * by doing so. Kernels that do not support REQ_SECURE (2.6.32 through
	 * 2.6.35) will not receive this optimization.
	 */
#ifdef REQ_SECURE
	if (!(bio->bi_rw & REQ_SECURE)) {
		start = P2ROUNDUP(start, zv->zv_volblocksize);
		end = P2ALIGN(end, zv->zv_volblocksize);
		size = end - start;
	}
#endif

	if (start >= end)
		return (0);

	rl = zfs_range_lock(&zv->zv_znode, start, size, RL_WRITER);
	tx = dmu_tx_create(zv->zv_objset);
	dmu_tx_mark_netfree(tx);
	error = dmu_tx_assign(tx, TXG_WAIT);
	if (error != 0) {
		dmu_tx_abort(tx);
	} else {
		zvol_log_truncate(zv, tx, start, size, B_TRUE);
		dmu_tx_commit(tx);
		error = dmu_free_long_range(zv->zv_objset,
		    ZVOL_OBJ, start, size);
	}

	zfs_range_unlock(rl);

	return (error);
}
Example #12
0
File: zvol.c Project: alek-p/zfs
static void
zvol_get_done(zgd_t *zgd, int error)
{
	if (zgd->zgd_db)
		dmu_buf_rele(zgd->zgd_db, zgd);

	zfs_range_unlock(zgd->zgd_rl);

	if (error == 0 && zgd->zgd_bp)
		zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);

	kmem_free(zgd, sizeof (zgd_t));
}
Example #13
0
/*
 * Common write path running under the zvol taskq context.  This function
 * is responsible for copying the request structure data in to the DMU and
 * signaling the request queue with the result of the copy.
 */
static void
zvol_write(void *arg)
{
	struct request *req = (struct request *)arg;
	struct request_queue *q = req->q;
	zvol_state_t *zv = q->queuedata;
	uint64_t offset = blk_rq_pos(req) << 9;
	uint64_t size = blk_rq_bytes(req);
	int error = 0;
	dmu_tx_t *tx;
	rl_t *rl;

	rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER);

	tx = dmu_tx_create(zv->zv_objset);
	dmu_tx_hold_write(tx, ZVOL_OBJ, offset, size);

	/* This will only fail for ENOSPC */
	error = dmu_tx_assign(tx, TXG_WAIT);
	if (error) {
		dmu_tx_abort(tx);
		zfs_range_unlock(rl);
		blk_end_request(req, -error, size);
		return;
	}

	error = dmu_write_req(zv->zv_objset, ZVOL_OBJ, req, tx);
	if (error == 0)
		zvol_log_write(zv, tx, offset, size, rq_is_sync(req));

	dmu_tx_commit(tx);
	zfs_range_unlock(rl);

	if (rq_is_sync(req))
		zil_commit(zv->zv_zilog, ZVOL_OBJ);

	blk_end_request(req, -error, size);
}
Example #14
0
File: zvol.c Project: alek-p/zfs
static int
zvol_discard(struct bio *bio)
{
	zvol_state_t *zv = bio->bi_bdev->bd_disk->private_data;
	uint64_t start = BIO_BI_SECTOR(bio) << 9;
	uint64_t size = BIO_BI_SIZE(bio);
	uint64_t end = start + size;
	int error;
	rl_t *rl;
	dmu_tx_t *tx;

	ASSERT(zv && zv->zv_open_count > 0);

	if (end > zv->zv_volsize)
		return (SET_ERROR(EIO));

	/*
	 * Align the request to volume block boundaries when a secure erase is
	 * not required.  This will prevent dnode_free_range() from zeroing out
	 * the unaligned parts which is slow (read-modify-write) and useless
	 * since we are not freeing any space by doing so.
	 */
	if (!bio_is_secure_erase(bio)) {
		start = P2ROUNDUP(start, zv->zv_volblocksize);
		end = P2ALIGN(end, zv->zv_volblocksize);
		size = end - start;
	}

	if (start >= end)
		return (0);

	rl = zfs_range_lock(&zv->zv_range_lock, start, size, RL_WRITER);
	tx = dmu_tx_create(zv->zv_objset);
	dmu_tx_mark_netfree(tx);
	error = dmu_tx_assign(tx, TXG_WAIT);
	if (error != 0) {
		dmu_tx_abort(tx);
	} else {
		zvol_log_truncate(zv, tx, start, size, B_TRUE);
		dmu_tx_commit(tx);
		error = dmu_free_long_range(zv->zv_objset,
		    ZVOL_OBJ, start, size);
	}

	zfs_range_unlock(rl);

	return (error);
}
Example #15
0
File: zvol.c Project: avg-I/zfs
static void
zvol_discard(void *arg)
{
    struct request *req = (struct request *)arg;
    struct request_queue *q = req->q;
    zvol_state_t *zv = q->queuedata;
    fstrans_cookie_t cookie = spl_fstrans_mark();
    uint64_t start = blk_rq_pos(req) << 9;
    uint64_t end = start + blk_rq_bytes(req);
    int error;
    rl_t *rl;

    if (end > zv->zv_volsize) {
        error = EIO;
        goto out;
    }

    /*
     * Align the request to volume block boundaries. If we don't,
     * then this will force dnode_free_range() to zero out the
     * unaligned parts, which is slow (read-modify-write) and
     * useless since we are not freeing any space by doing so.
     */
    start = P2ROUNDUP(start, zv->zv_volblocksize);
    end = P2ALIGN(end, zv->zv_volblocksize);

    if (start >= end) {
        error = 0;
        goto out;
    }

    rl = zfs_range_lock(&zv->zv_znode, start, end - start, RL_WRITER);

    error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, start, end-start);

    /*
     * TODO: maybe we should add the operation to the log.
     */

    zfs_range_unlock(rl);
out:
    blk_end_request(req, -error, blk_rq_bytes(req));
    spl_fstrans_unmark(cookie);
}
Example #16
0
File: zvol.c Project: koplover/zfs
static int
zvol_discard(struct bio *bio)
{
	zvol_state_t *zv = bio->bi_bdev->bd_disk->private_data;
	uint64_t start = BIO_BI_SECTOR(bio) << 9;
	uint64_t size = BIO_BI_SIZE(bio);
	uint64_t end = start + size;
	int error;
	rl_t *rl;

	if (end > zv->zv_volsize)
		return (SET_ERROR(EIO));

	/*
	 * Align the request to volume block boundaries when REQ_SECURE is
	 * available, but not requested. If we don't, then this will force
	 * dnode_free_range() to zero out the unaligned parts, which is slow
	 * (read-modify-write) and useless since we are not freeing any space
	 * by doing so. Kernels that do not support REQ_SECURE (2.6.32 through
	 * 2.6.35) will not receive this optimization.
	 */
#ifdef REQ_SECURE
	if (!(bio->bi_rw & REQ_SECURE)) {
		start = P2ROUNDUP(start, zv->zv_volblocksize);
		end = P2ALIGN(end, zv->zv_volblocksize);
		size = end - start;
	}
#endif

	if (start >= end)
		return (0);

	rl = zfs_range_lock(&zv->zv_znode, start, size, RL_WRITER);

	error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, start, size);

	/*
	 * TODO: maybe we should add the operation to the log.
	 */

	zfs_range_unlock(rl);

	return (error);
}
Example #17
0
File: zvol.c Project: torn5/zfs
static void
zvol_discard(void *arg)
{
	struct request *req = (struct request *)arg;
	struct request_queue *q = req->q;
	zvol_state_t *zv = q->queuedata;
	uint64_t offset = blk_rq_pos(req) << 9;
	uint64_t size = blk_rq_bytes(req);
	int error;
	rl_t *rl;

	/*
	 * Annotate this call path with a flag that indicates that it is
	 * unsafe to use KM_SLEEP during memory allocations due to the
	 * potential for a deadlock.  KM_PUSHPAGE should be used instead.
	 */
	ASSERT(!(current->flags & PF_NOFS));
	current->flags |= PF_NOFS;

	if (offset + size > zv->zv_volsize) {
		blk_end_request(req, -EIO, size);
		goto out;
	}

	if (size == 0) {
		blk_end_request(req, 0, size);
		goto out;
	}

	rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER);

	error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, size);

	/*
	 * TODO: maybe we should add the operation to the log.
	 */

	zfs_range_unlock(rl);

	blk_end_request(req, -error, size);
out:
	current->flags &= ~PF_NOFS;
}
Example #18
0
File: zvol.c Project: alek-p/zfs
static int
zvol_write(zvol_state_t *zv, uio_t *uio, boolean_t sync)
{
	uint64_t volsize = zv->zv_volsize;
	rl_t *rl;
	int error = 0;

	ASSERT(zv && zv->zv_open_count > 0);

	rl = zfs_range_lock(&zv->zv_range_lock, uio->uio_loffset,
	    uio->uio_resid, RL_WRITER);

	while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
		uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
		uint64_t off = uio->uio_loffset;
		dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);

		if (bytes > volsize - off)	/* don't write past the end */
			bytes = volsize - off;

		dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);

		/* This will only fail for ENOSPC */
		error = dmu_tx_assign(tx, TXG_WAIT);
		if (error) {
			dmu_tx_abort(tx);
			break;
		}
		error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx);
		if (error == 0)
			zvol_log_write(zv, tx, off, bytes, sync);
		dmu_tx_commit(tx);

		if (error)
			break;
	}
	zfs_range_unlock(rl);
	if (sync)
		zil_commit(zv->zv_zilog, ZVOL_OBJ);
	return (error);
}
Example #19
0
/*
 * Common write path running under the zvol taskq context.  This function
 * is responsible for copying the request structure data in to the DMU and
 * signaling the request queue with the result of the copy.
 */
static void
zvol_write(void *arg)
{
	struct request *req = (struct request *)arg;
	struct request_queue *q = req->q;
	zvol_state_t *zv = q->queuedata;
	uint64_t offset = blk_rq_pos(req) << 9;
	uint64_t size = blk_rq_bytes(req);
	int error = 0;
	dmu_tx_t *tx;
	rl_t *rl;

	/*
	 * Annotate this call path with a flag that indicates that it is
	 * unsafe to use KM_SLEEP during memory allocations due to the
	 * potential for a deadlock.  KM_PUSHPAGE should be used instead.
	 */
	ASSERT(!(current->flags & PF_NOFS));
	current->flags |= PF_NOFS;

	if (req->cmd_flags & VDEV_REQ_FLUSH)
		zil_commit(zv->zv_zilog, ZVOL_OBJ);

	/*
	 * Some requests are just for flush and nothing else.
	 */
	if (size == 0) {
		blk_end_request(req, 0, size);
		goto out;
	}

	rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER);

	tx = dmu_tx_create(zv->zv_objset);
	dmu_tx_hold_write(tx, ZVOL_OBJ, offset, size);

	/* This will only fail for ENOSPC */
	error = dmu_tx_assign(tx, TXG_WAIT);
	if (error) {
		dmu_tx_abort(tx);
		zfs_range_unlock(rl);
		blk_end_request(req, -error, size);
		goto out;
	}

	error = dmu_write_req(zv->zv_objset, ZVOL_OBJ, req, tx);
	if (error == 0)
		zvol_log_write(zv, tx, offset, size,
		    req->cmd_flags & VDEV_REQ_FUA);

	dmu_tx_commit(tx);
	zfs_range_unlock(rl);

	if ((req->cmd_flags & VDEV_REQ_FUA) ||
	    zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)
		zil_commit(zv->zv_zilog, ZVOL_OBJ);

	blk_end_request(req, -error, size);
out:
	current->flags &= ~PF_NOFS;
}
Example #20
0
/*
 * Truncate a file
 *
 *	IN:	zp	- znode of file to free data in.
 *		end	- new end-of-file.
 *
 * 	RETURN:	0 on success, error code on failure
 */
static int
zfs_trunc(znode_t *zp, uint64_t end)
{
	zfs_sb_t *zsb = ZTOZSB(zp);
	dmu_tx_t *tx;
	rl_t *rl;
	int error;
	sa_bulk_attr_t bulk[2];
	int count = 0;

	/*
	 * We will change zp_size, lock the whole file.
	 */
	rl = zfs_range_lock(zp, 0, UINT64_MAX, RL_WRITER);

	/*
	 * Nothing to do if file already at desired length.
	 */
	if (end >= zp->z_size) {
		zfs_range_unlock(rl);
		return (0);
	}

	error = dmu_free_long_range(zsb->z_os, zp->z_id, end,  -1);
	if (error) {
		zfs_range_unlock(rl);
		return (error);
	}
top:
	tx = dmu_tx_create(zsb->z_os);
	dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
	zfs_sa_upgrade_txholds(tx, zp);
	error = dmu_tx_assign(tx, TXG_NOWAIT);
	if (error) {
		if (error == ERESTART) {
			dmu_tx_wait(tx);
			dmu_tx_abort(tx);
			goto top;
		}
		dmu_tx_abort(tx);
		zfs_range_unlock(rl);
		return (error);
	}

	zp->z_size = end;
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb),
	    NULL, &zp->z_size, sizeof (zp->z_size));

	if (end == 0) {
		zp->z_pflags &= ~ZFS_SPARSE;
		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb),
		    NULL, &zp->z_pflags, 8);
	}
	VERIFY(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx) == 0);

	dmu_tx_commit(tx);

	zfs_range_unlock(rl);

	return (0);
}
Example #21
0
/*
 * Increase the file length
 *
 *	IN:	zp	- znode of file to free data in.
 *		end	- new end-of-file
 *
 * 	RETURN:	0 on success, error code on failure
 */
static int
zfs_extend(znode_t *zp, uint64_t end)
{
	zfs_sb_t *zsb = ZTOZSB(zp);
	dmu_tx_t *tx;
	rl_t *rl;
	uint64_t newblksz;
	int error;

	/*
	 * We will change zp_size, lock the whole file.
	 */
	rl = zfs_range_lock(zp, 0, UINT64_MAX, RL_WRITER);

	/*
	 * Nothing to do if file already at desired length.
	 */
	if (end <= zp->z_size) {
		zfs_range_unlock(rl);
		return (0);
	}
	tx = dmu_tx_create(zsb->z_os);
	dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
	zfs_sa_upgrade_txholds(tx, zp);
	if (end > zp->z_blksz &&
	    (!ISP2(zp->z_blksz) || zp->z_blksz < zsb->z_max_blksz)) {
		/*
		 * We are growing the file past the current block size.
		 */
		if (zp->z_blksz > ZTOZSB(zp)->z_max_blksz) {
			ASSERT(!ISP2(zp->z_blksz));
			newblksz = MIN(end, SPA_MAXBLOCKSIZE);
		} else {
			newblksz = MIN(end, ZTOZSB(zp)->z_max_blksz);
		}
		dmu_tx_hold_write(tx, zp->z_id, 0, newblksz);
	} else {
		newblksz = 0;
	}

	error = dmu_tx_assign(tx, TXG_WAIT);
	if (error) {
		dmu_tx_abort(tx);
		zfs_range_unlock(rl);
		return (error);
	}

	if (newblksz)
		zfs_grow_blocksize(zp, newblksz, tx);

	zp->z_size = end;

	VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(ZTOZSB(zp)),
	    &zp->z_size, sizeof (zp->z_size), tx));

	zfs_range_unlock(rl);

	dmu_tx_commit(tx);

	return (0);
}