Esempio n. 1
0
int
xfs_discard_extents(
    struct xfs_mount	*mp,
    struct list_head	*list)
{
    struct xfs_busy_extent	*busyp;
    int			error = 0;

    list_for_each_entry(busyp, list, list) {
        trace_xfs_discard_extent(mp, busyp->agno, busyp->bno,
                                 busyp->length);

        error = -blkdev_issue_discard(mp->m_ddev_targp->bt_bdev,
                                      XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno),
                                      XFS_FSB_TO_BB(mp, busyp->length),
                                      GFP_NOFS, 0);
        if (error && error != EOPNOTSUPP) {
            xfs_info(mp,
                     "discard failed for extent [0x%llu,%u], error %d",
                     (unsigned long long)busyp->bno,
                     busyp->length,
                     error);
            return error;
        }
    }
Esempio n. 2
0
static int bch_allocator_thread(void *arg)
{
	struct cache *ca = arg;

	mutex_lock(&ca->set->bucket_lock);

	while (1) {
		/*
		 * First, we pull buckets off of the unused and free_inc lists,
		 * possibly issue discards to them, then we add the bucket to
		 * the free list:
		 */
		while (1) {
			long bucket;

			if ((!atomic_read(&ca->set->prio_blocked) ||
			     !CACHE_SYNC(&ca->set->sb)) &&
			    !fifo_empty(&ca->unused))
				fifo_pop(&ca->unused, bucket);
			else if (!fifo_empty(&ca->free_inc))
				fifo_pop(&ca->free_inc, bucket);
			else
				break;

			if (ca->discard) {
				mutex_unlock(&ca->set->bucket_lock);
				blkdev_issue_discard(ca->bdev,
					bucket_to_sector(ca->set, bucket),
					ca->sb.block_size, GFP_KERNEL, 0);
				mutex_lock(&ca->set->bucket_lock);
			}

			allocator_wait(ca, bch_allocator_push(ca, bucket));
			wake_up(&ca->set->bucket_wait);
		}

		/*
		 * We've run out of free buckets, we need to find some buckets
		 * we can invalidate. First, invalidate them in memory and add
		 * them to the free_inc list:
		 */

		allocator_wait(ca, ca->set->gc_mark_valid &&
			       (ca->need_save_prio > 64 ||
				!ca->invalidate_needs_gc));
		invalidate_buckets(ca);

		/*
		 * Now, we write their new gens to disk so we can start writing
		 * new stuff to them:
		 */
		allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
		if (CACHE_SYNC(&ca->set->sb) &&
		    (!fifo_empty(&ca->free_inc) ||
		     ca->need_save_prio > 64))
			bch_prio_write(ca);
	}
}
static sense_reason_t
iblock_do_unmap(struct se_cmd *cmd, void *priv,
		sector_t lba, sector_t nolb)
{
	struct block_device *bdev = priv;
	int ret;

	ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0);
	if (ret < 0) {
		pr_err("blkdev_issue_discard() failed: %d\n", ret);
		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
	}

	return 0;
}
static int iblock_execute_write_same(struct se_cmd *cmd)
{
	struct iblock_dev *ibd = cmd->se_dev->dev_ptr;
	int ret;

	ret = blkdev_issue_discard(ibd->ibd_bd, cmd->t_task_lba,
				   spc_get_write_same_sectors(cmd), GFP_KERNEL,
				   0);
	if (ret < 0) {
		pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n");
		return ret;
	}

	target_complete_cmd(cmd, GOOD);
	return 0;
}
Esempio n. 5
0
int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
			 sector_t nr_sects, gfp_t gfp_mask, bool discard)
{
	struct request_queue *q = bdev_get_queue(bdev);

	if (discard && blk_queue_discard(q) && q->limits.discard_zeroes_data &&
	    blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, 0) == 0)
		return 0;

	if (bdev_write_same(bdev) &&
	    blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
				    ZERO_PAGE(0)) == 0)
		return 0;

	return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
}
Esempio n. 6
0
File: alloc.c Progetto: 7799/linux
static int bch_allocator_thread(void *arg)
{
	struct cache *ca = arg;

	mutex_lock(&ca->set->bucket_lock);

	while (1) {
		/*
		 * First, we pull buckets off of the unused and free_inc lists,
		 * possibly issue discards to them, then we add the bucket to
		 * the free list:
		 */
		while (!fifo_empty(&ca->free_inc)) {
			long bucket;

			fifo_pop(&ca->free_inc, bucket);

			if (ca->discard) {
				mutex_unlock(&ca->set->bucket_lock);
				blkdev_issue_discard(ca->bdev,
					bucket_to_sector(ca->set, bucket),
					ca->sb.block_size, GFP_KERNEL, 0);
				mutex_lock(&ca->set->bucket_lock);
			}

			allocator_wait(ca, bch_allocator_push(ca, bucket));
			wake_up(&ca->set->btree_cache_wait);
			wake_up(&ca->set->bucket_wait);
		}

		/*
		 * We've run out of free buckets, we need to find some buckets
		 * we can invalidate. First, invalidate them in memory and add
		 * them to the free_inc list:
		 */

retry_invalidate:
		allocator_wait(ca, ca->set->gc_mark_valid &&
			       !ca->invalidate_needs_gc);
		invalidate_buckets(ca);

		/*
		 * Now, we write their new gens to disk so we can start writing
		 * new stuff to them:
		 */
		allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
		if (CACHE_SYNC(&ca->set->sb)) {
			/*
			 * This could deadlock if an allocation with a btree
			 * node locked ever blocked - having the btree node
			 * locked would block garbage collection, but here we're
			 * waiting on garbage collection before we invalidate
			 * and free anything.
			 *
			 * But this should be safe since the btree code always
			 * uses btree_check_reserve() before allocating now, and
			 * if it fails it blocks without btree nodes locked.
			 */
			if (!fifo_full(&ca->free_inc))
				goto retry_invalidate;

			bch_prio_write(ca);
		}
	}
}
Esempio n. 7
0
long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	struct inode *inode = file_inode(filp);
	struct super_block *sb = inode->i_sb;
	struct ext4_inode_info *ei = EXT4_I(inode);
	unsigned int flags;

	ext4_debug("cmd = %u, arg = %lu\n", cmd, arg);

	switch (cmd) {
	case EXT4_IOC_GETFLAGS:
		ext4_get_inode_flags(ei);
		flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
		return put_user(flags, (int __user *) arg);
	case EXT4_IOC_SETFLAGS: {
		handle_t *handle = NULL;
		int err, migrate = 0;
		struct ext4_iloc iloc;
		unsigned int oldflags, mask, i;
		unsigned int jflag;

		if (!inode_owner_or_capable(inode))
			return -EACCES;

		if (get_user(flags, (int __user *) arg))
			return -EFAULT;

		err = mnt_want_write_file(filp);
		if (err)
			return err;

		flags = ext4_mask_flags(inode->i_mode, flags);

		err = -EPERM;
		mutex_lock(&inode->i_mutex);
		/* Is it quota file? Do not allow user to mess with it */
		if (IS_NOQUOTA(inode))
			goto flags_out;

		oldflags = ei->i_flags;

		/* The JOURNAL_DATA flag is modifiable only by root */
		jflag = flags & EXT4_JOURNAL_DATA_FL;

		/*
		 * The IMMUTABLE and APPEND_ONLY flags can only be changed by
		 * the relevant capability.
		 *
		 * This test looks nicer. Thanks to Pauline Middelink
		 */
		if ((flags ^ oldflags) & (EXT4_APPEND_FL | EXT4_IMMUTABLE_FL)) {
			if (!capable(CAP_LINUX_IMMUTABLE))
				goto flags_out;
		}

		/*
		 * The JOURNAL_DATA flag can only be changed by
		 * the relevant capability.
		 */
		if ((jflag ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) {
			if (!capable(CAP_SYS_RESOURCE))
				goto flags_out;
		}
		if ((flags ^ oldflags) & EXT4_EXTENTS_FL)
			migrate = 1;

		if (flags & EXT4_EOFBLOCKS_FL) {
			/* we don't support adding EOFBLOCKS flag */
			if (!(oldflags & EXT4_EOFBLOCKS_FL)) {
				err = -EOPNOTSUPP;
				goto flags_out;
			}
		} else if (oldflags & EXT4_EOFBLOCKS_FL)
			ext4_truncate(inode);

		handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
		if (IS_ERR(handle)) {
			err = PTR_ERR(handle);
			goto flags_out;
		}
		if (IS_SYNC(inode))
			ext4_handle_sync(handle);
		err = ext4_reserve_inode_write(handle, inode, &iloc);
		if (err)
			goto flags_err;

		for (i = 0, mask = 1; i < 32; i++, mask <<= 1) {
			if (!(mask & EXT4_FL_USER_MODIFIABLE))
				continue;
			if (mask & flags)
				ext4_set_inode_flag(inode, i);
			else
				ext4_clear_inode_flag(inode, i);
		}

		ext4_set_inode_flags(inode);
		inode->i_ctime = ext4_current_time(inode);

		err = ext4_mark_iloc_dirty(handle, inode, &iloc);
flags_err:
		ext4_journal_stop(handle);
		if (err)
			goto flags_out;

		if ((jflag ^ oldflags) & (EXT4_JOURNAL_DATA_FL))
			err = ext4_change_inode_journal_flag(inode, jflag);
		if (err)
			goto flags_out;
		if (migrate) {
			if (flags & EXT4_EXTENTS_FL)
				err = ext4_ext_migrate(inode);
			else
				err = ext4_ind_migrate(inode);
		}

flags_out:
		mutex_unlock(&inode->i_mutex);
		mnt_drop_write_file(filp);
		return err;
	}
	case EXT4_IOC_GETVERSION:
	case EXT4_IOC_GETVERSION_OLD:
		return put_user(inode->i_generation, (int __user *) arg);
	case EXT4_IOC_SETVERSION:
	case EXT4_IOC_SETVERSION_OLD: {
		handle_t *handle;
		struct ext4_iloc iloc;
		__u32 generation;
		int err;

		if (!inode_owner_or_capable(inode))
			return -EPERM;

		if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
				EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
			ext4_warning(sb, "Setting inode version is not "
				     "supported with metadata_csum enabled.");
			return -ENOTTY;
		}

		err = mnt_want_write_file(filp);
		if (err)
			return err;
		if (get_user(generation, (int __user *) arg)) {
			err = -EFAULT;
			goto setversion_out;
		}

		mutex_lock(&inode->i_mutex);
		handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
		if (IS_ERR(handle)) {
			err = PTR_ERR(handle);
			goto unlock_out;
		}
		err = ext4_reserve_inode_write(handle, inode, &iloc);
		if (err == 0) {
			inode->i_ctime = ext4_current_time(inode);
			inode->i_generation = generation;
			err = ext4_mark_iloc_dirty(handle, inode, &iloc);
		}
		ext4_journal_stop(handle);

unlock_out:
		mutex_unlock(&inode->i_mutex);
setversion_out:
		mnt_drop_write_file(filp);
		return err;
	}
	case EXT4_IOC_GROUP_EXTEND: {
		ext4_fsblk_t n_blocks_count;
		int err, err2=0;

		err = ext4_resize_begin(sb);
		if (err)
			return err;

		if (get_user(n_blocks_count, (__u32 __user *)arg)) {
			err = -EFAULT;
			goto group_extend_out;
		}

		if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
			       EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
			ext4_msg(sb, KERN_ERR,
				 "Online resizing not supported with bigalloc");
			err = -EOPNOTSUPP;
			goto group_extend_out;
		}

		err = mnt_want_write_file(filp);
		if (err)
			goto group_extend_out;

		err = ext4_group_extend(sb, EXT4_SB(sb)->s_es, n_blocks_count);
		if (EXT4_SB(sb)->s_journal) {
			jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
			err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
			jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
		}
		if (err == 0)
			err = err2;
		mnt_drop_write_file(filp);
group_extend_out:
		ext4_resize_end(sb);
		return err;
	}

	case EXT4_IOC_MOVE_EXT: {
		struct move_extent me;
		struct fd donor;
		int err;

		if (!(filp->f_mode & FMODE_READ) ||
		    !(filp->f_mode & FMODE_WRITE))
			return -EBADF;

		if (copy_from_user(&me,
			(struct move_extent __user *)arg, sizeof(me)))
			return -EFAULT;
		me.moved_len = 0;

		donor = fdget(me.donor_fd);
		if (!donor.file)
			return -EBADF;

		if (!(donor.file->f_mode & FMODE_WRITE)) {
			err = -EBADF;
			goto mext_out;
		}

		if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
			       EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
			ext4_msg(sb, KERN_ERR,
				 "Online defrag not supported with bigalloc");
			err = -EOPNOTSUPP;
			goto mext_out;
		}

		err = mnt_want_write_file(filp);
		if (err)
			goto mext_out;

		err = ext4_move_extents(filp, donor.file, me.orig_start,
					me.donor_start, me.len, &me.moved_len);
		mnt_drop_write_file(filp);

		if (copy_to_user((struct move_extent __user *)arg,
				 &me, sizeof(me)))
			err = -EFAULT;
mext_out:
		fdput(donor);
		return err;
	}

	case EXT4_IOC_GROUP_ADD: {
		struct ext4_new_group_data input;
		int err, err2=0;

		err = ext4_resize_begin(sb);
		if (err)
			return err;

		if (copy_from_user(&input, (struct ext4_new_group_input __user *)arg,
				sizeof(input))) {
			err = -EFAULT;
			goto group_add_out;
		}

		if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
			       EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
			ext4_msg(sb, KERN_ERR,
				 "Online resizing not supported with bigalloc");
			err = -EOPNOTSUPP;
			goto group_add_out;
		}

		err = mnt_want_write_file(filp);
		if (err)
			goto group_add_out;

		err = ext4_group_add(sb, &input);
		if (EXT4_SB(sb)->s_journal) {
			jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
			err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
			jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
		}
		if (err == 0)
			err = err2;
		mnt_drop_write_file(filp);
		if (!err && ext4_has_group_desc_csum(sb) &&
		    test_opt(sb, INIT_INODE_TABLE))
			err = ext4_register_li_request(sb, input.group);
group_add_out:
		ext4_resize_end(sb);
		return err;
	}

	case EXT4_IOC_MIGRATE:
	{
		int err;
		if (!inode_owner_or_capable(inode))
			return -EACCES;

		err = mnt_want_write_file(filp);
		if (err)
			return err;
		/*
		 * inode_mutex prevent write and truncate on the file.
		 * Read still goes through. We take i_data_sem in
		 * ext4_ext_swap_inode_data before we switch the
		 * inode format to prevent read.
		 */
		mutex_lock(&(inode->i_mutex));
		err = ext4_ext_migrate(inode);
		mutex_unlock(&(inode->i_mutex));
		mnt_drop_write_file(filp);
		return err;
	}

	case EXT4_IOC_ALLOC_DA_BLKS:
	{
		int err;
		if (!inode_owner_or_capable(inode))
			return -EACCES;

		err = mnt_want_write_file(filp);
		if (err)
			return err;
		err = ext4_alloc_da_blocks(inode);
		mnt_drop_write_file(filp);
		return err;
	}

	case EXT4_IOC_SWAP_BOOT:
		if (!(filp->f_mode & FMODE_WRITE))
			return -EBADF;
		return swap_inode_boot_loader(sb, inode);

	case EXT4_IOC_RESIZE_FS: {
		ext4_fsblk_t n_blocks_count;
		int err = 0, err2 = 0;
		ext4_group_t o_group = EXT4_SB(sb)->s_groups_count;

		if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
			       EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
			ext4_msg(sb, KERN_ERR,
				 "Online resizing not (yet) supported with bigalloc");
			return -EOPNOTSUPP;
		}

		if (copy_from_user(&n_blocks_count, (__u64 __user *)arg,
				   sizeof(__u64))) {
			return -EFAULT;
		}

		err = ext4_resize_begin(sb);
		if (err)
			return err;

		err = mnt_want_write_file(filp);
		if (err)
			goto resizefs_out;

		err = ext4_resize_fs(sb, n_blocks_count);
		if (EXT4_SB(sb)->s_journal) {
			jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
			err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
			jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
		}
		if (err == 0)
			err = err2;
		mnt_drop_write_file(filp);
		if (!err && (o_group > EXT4_SB(sb)->s_groups_count) &&
		    ext4_has_group_desc_csum(sb) &&
		    test_opt(sb, INIT_INODE_TABLE))
			err = ext4_register_li_request(sb, o_group);

resizefs_out:
		ext4_resize_end(sb);
		return err;
	}

	case FSECTRIM:
	{
		struct request_queue *q = bdev_get_queue(sb->s_bdev);
		struct fstrim_range range;
		int ret = 0;

		if (!capable(CAP_SYS_ADMIN))
			return -EPERM;

		if (!blk_queue_discard(q))
			return -EOPNOTSUPP;

		if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
				   EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
			ext4_msg(sb, KERN_ERR,
				 "FSECTRIM not supported with bigalloc");
			return -EOPNOTSUPP;
		}

		if (copy_from_user(&range, (struct fstrim_range __user *)arg,
			sizeof(range)))
			return -EFAULT;

		range.minlen = max((unsigned int)range.minlen,
				   q->limits.discard_granularity);

		ret =  blkdev_issue_discard(sb->s_bdev, range.start, range.len, GFP_NOFS, 1);
		if (ret < 0)
			return ret;

		return 0;
	}

	case FITRIM:
	{
		struct request_queue *q = bdev_get_queue(sb->s_bdev);
		struct fstrim_range range;
		int ret = 0;

		if (!capable(CAP_SYS_ADMIN))
			return -EPERM;

		if (!blk_queue_discard(q))
			return -EOPNOTSUPP;

		if (copy_from_user(&range, (struct fstrim_range __user *)arg,
		    sizeof(range)))
			return -EFAULT;

		range.minlen = max((unsigned int)range.minlen,
				   q->limits.discard_granularity);
		ret = ext4_trim_fs(sb, &range);
		if (ret < 0)
			return ret;

		if (copy_to_user((struct fstrim_range __user *)arg, &range,
		    sizeof(range)))
			return -EFAULT;

		return 0;
	}

	default:
		return -ENOTTY;
	}
}
Esempio n. 8
0
STATIC int
xfs_trim_extents(
    struct xfs_mount	*mp,
    xfs_agnumber_t		agno,
    xfs_fsblock_t		start,
    xfs_fsblock_t		len,
    xfs_fsblock_t		minlen,
    __uint64_t		*blocks_trimmed)
{
    struct block_device	*bdev = mp->m_ddev_targp->bt_bdev;
    struct xfs_btree_cur	*cur;
    struct xfs_buf		*agbp;
    struct xfs_perag	*pag;
    int			error;
    int			i;

    pag = xfs_perag_get(mp, agno);

    error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
    if (error || !agbp)
        goto out_put_perag;

    cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT);

    /*
     * Force out the log.  This means any transactions that might have freed
     * space before we took the AGF buffer lock are now on disk, and the
     * volatile disk cache is flushed.
     */
    xfs_log_force(mp, XFS_LOG_SYNC);

    /*
     * Look up the longest btree in the AGF and start with it.
     */
    error = xfs_alloc_lookup_le(cur, 0,
                                XFS_BUF_TO_AGF(agbp)->agf_longest, &i);
    if (error)
        goto out_del_cursor;

    /*
     * Loop until we are done with all extents that are large
     * enough to be worth discarding.
     */
    while (i) {
        xfs_agblock_t fbno;
        xfs_extlen_t flen;

        error = xfs_alloc_get_rec(cur, &fbno, &flen, &i);
        if (error)
            goto out_del_cursor;
        XFS_WANT_CORRUPTED_GOTO(i == 1, out_del_cursor);
        ASSERT(flen <= XFS_BUF_TO_AGF(agbp)->agf_longest);

        /*
         * Too small?  Give up.
         */
        if (flen < minlen) {
            trace_xfs_discard_toosmall(mp, agno, fbno, flen);
            goto out_del_cursor;
        }

        /*
         * If the extent is entirely outside of the range we are
         * supposed to discard skip it.  Do not bother to trim
         * down partially overlapping ranges for now.
         */
        if (XFS_AGB_TO_FSB(mp, agno, fbno) + flen < start ||
                XFS_AGB_TO_FSB(mp, agno, fbno) >= start + len) {
            trace_xfs_discard_exclude(mp, agno, fbno, flen);
            goto next_extent;
        }

        /*
         * If any blocks in the range are still busy, skip the
         * discard and try again the next time.
         */
        if (xfs_alloc_busy_search(mp, agno, fbno, flen)) {
            trace_xfs_discard_busy(mp, agno, fbno, flen);
            goto next_extent;
        }

        trace_xfs_discard_extent(mp, agno, fbno, flen);
        error = -blkdev_issue_discard(bdev,
                                      XFS_AGB_TO_DADDR(mp, agno, fbno),
                                      XFS_FSB_TO_BB(mp, flen),
                                      GFP_NOFS, 0);
        if (error)
            goto out_del_cursor;
        *blocks_trimmed += flen;

next_extent:
        error = xfs_btree_decrement(cur, 0, &i);
        if (error)
            goto out_del_cursor;
    }

out_del_cursor:
    xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
    xfs_buf_relse(agbp);
out_put_perag:
    xfs_perag_put(pag);
    return error;
}
static int iblock_execute_unmap(struct se_cmd *cmd)
{
	struct se_device *dev = cmd->se_dev;
	struct iblock_dev *ibd = dev->dev_ptr;
	unsigned char *buf, *ptr = NULL;
	sector_t lba;
	int size;
	u32 range;
	int ret = 0;
	int dl, bd_dl;

	if (cmd->data_length < 8) {
		pr_warn("UNMAP parameter list length %u too small\n",
			cmd->data_length);
		cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
		return -EINVAL;
	}

	buf = transport_kmap_data_sg(cmd);

	dl = get_unaligned_be16(&buf[0]);
	bd_dl = get_unaligned_be16(&buf[2]);

	size = cmd->data_length - 8;
	if (bd_dl > size)
		pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n",
			cmd->data_length, bd_dl);
	else
		size = bd_dl;

	if (size / 16 > dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
		cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
		ret = -EINVAL;
		goto err;
	}

	/* First UNMAP block descriptor starts at 8 byte offset */
	ptr = &buf[8];
	pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
		" ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);

	while (size >= 16) {
		lba = get_unaligned_be64(&ptr[0]);
		range = get_unaligned_be32(&ptr[8]);
		pr_debug("UNMAP: Using lba: %llu and range: %u\n",
				 (unsigned long long)lba, range);

		if (range > dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count) {
			cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
			ret = -EINVAL;
			goto err;
		}

		if (lba + range > dev->transport->get_blocks(dev) + 1) {
			cmd->scsi_sense_reason = TCM_ADDRESS_OUT_OF_RANGE;
			ret = -EINVAL;
			goto err;
		}

		ret = blkdev_issue_discard(ibd->ibd_bd, lba, range,
					   GFP_KERNEL, 0);
		if (ret < 0) {
			pr_err("blkdev_issue_discard() failed: %d\n",
					ret);
			goto err;
		}

		ptr += 16;
		size -= 16;
	}

err:
	transport_kunmap_data_sg(cmd);
	if (!ret)
		target_complete_cmd(cmd, GOOD);
	return ret;
}