Ejemplo n.º 1
0
static int gfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		       u64 start, u64 len)
{
	struct gfs2_inode *ip = GFS2_I(inode);
	struct gfs2_holder gh;
	int ret;

	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
	if (ret)
		return ret;

	mutex_lock(&inode->i_mutex);

	ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
	if (ret)
		goto out;

	if (gfs2_is_stuffed(ip)) {
		u64 phys = ip->i_no_addr << inode->i_blkbits;
		u64 size = i_size_read(inode);
		u32 flags = FIEMAP_EXTENT_LAST|FIEMAP_EXTENT_NOT_ALIGNED|
			    FIEMAP_EXTENT_DATA_INLINE;
		phys += sizeof(struct gfs2_dinode);
		phys += start;
		if (start + len > size)
			len = size - start;
		if (start < size)
			ret = fiemap_fill_next_extent(fieinfo, start, phys,
						      len, flags);
		if (ret == 1)
			ret = 0;
	} else {
		ret = __generic_block_fiemap(inode, fieinfo, start, len,
					     gfs2_block_map);
	}

	gfs2_glock_dq_uninit(&gh);
out:
	mutex_unlock(&inode->i_mutex);
	return ret;
}
Ejemplo n.º 2
0
STATIC int
xfs_vn_fiemap(
	struct inode		*inode,
	struct fiemap_extent_info *fieinfo,
	u64			start,
	u64			length)
{
	xfs_inode_t		*ip = XFS_I(inode);
	struct getbmapx		bm;
	int			error;

	error = fiemap_check_flags(fieinfo, XFS_FIEMAP_FLAGS);
	if (error)
		return error;

	/* Set up bmap header for xfs internal routine */
	bm.bmv_offset = BTOBB(start);
	/* Special case for whole file */
	if (length == FIEMAP_MAX_OFFSET)
		bm.bmv_length = -1LL;
	else
		bm.bmv_length = BTOBB(length);

	/* We add one because in getbmap world count includes the header */
	bm.bmv_count = !fieinfo->fi_extents_max ? MAXEXTNUM :
					fieinfo->fi_extents_max + 1;
	bm.bmv_count = min_t(__s32, bm.bmv_count,
			     (PAGE_SIZE * 16 / sizeof(struct getbmapx)));
	bm.bmv_iflags = BMV_IF_PREALLOC | BMV_IF_NO_HOLES;
	if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR)
		bm.bmv_iflags |= BMV_IF_ATTRFORK;
	if (!(fieinfo->fi_flags & FIEMAP_FLAG_SYNC))
		bm.bmv_iflags |= BMV_IF_DELALLOC;

	error = xfs_getbmap(ip, &bm, xfs_fiemap_format, fieinfo);
	if (error)
		return -error;

	return 0;
}
Ejemplo n.º 3
0
STATIC int
xfs_vn_fiemap(
	struct inode		*inode,
	struct fiemap_extent_info *fieinfo,
	u64			start,
	u64			length)
{
	xfs_inode_t		*ip = XFS_I(inode);
	struct getbmapx		bm;
	int			error;

	error = fiemap_check_flags(fieinfo, XFS_FIEMAP_FLAGS);
	if (error)
		return error;

	/* Set up bmap header for xfs internal routine */
	bm.bmv_offset = BTOBB(start);
	/* Special case for whole file */
	if (length == FIEMAP_MAX_OFFSET)
		bm.bmv_length = -1LL;
	else
		bm.bmv_length = BTOBB(length);

	/* our formatter will tell xfs_getbmap when to stop. */
	bm.bmv_count = MAXEXTNUM;
	bm.bmv_iflags = BMV_IF_PREALLOC;
	if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR)
		bm.bmv_iflags |= BMV_IF_ATTRFORK;
	if (!(fieinfo->fi_flags & FIEMAP_FLAG_SYNC))
		bm.bmv_iflags |= BMV_IF_DELALLOC;

	error = xfs_getbmap(ip, &bm, xfs_fiemap_format, fieinfo);
	if (error)
		return -error;

	return 0;
}
Ejemplo n.º 4
0
int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		 __u64 start, __u64 len)
{
	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
	__u64 logical = 0, phys = 0, size = 0;
	__u32 flags = 0;
	loff_t isize;
	sector_t blkoff, end_blkoff;
	sector_t delalloc_blkoff;
	unsigned long delalloc_blklen;
	unsigned int blkbits = inode->i_blkbits;
	int ret, n;

	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
	if (ret)
		return ret;

	mutex_lock(&inode->i_mutex);

	isize = i_size_read(inode);

	blkoff = start >> blkbits;
	end_blkoff = (start + len - 1) >> blkbits;

	delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
							&delalloc_blkoff);

	do {
		__u64 blkphy;
		unsigned int maxblocks;

		if (delalloc_blklen && blkoff == delalloc_blkoff) {
			if (size) {
				/* End of the current extent */
				ret = fiemap_fill_next_extent(
					fieinfo, logical, phys, size, flags);
				if (ret)
					break;
			}
			if (blkoff > end_blkoff)
				break;

			flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
			logical = blkoff << blkbits;
			phys = 0;
			size = delalloc_blklen << blkbits;

			blkoff = delalloc_blkoff + delalloc_blklen;
			delalloc_blklen = nilfs_find_uncommitted_extent(
				inode, blkoff, &delalloc_blkoff);
			continue;
		}

		/*
		 * Limit the number of blocks that we look up so as
		 * not to get into the next delayed allocation extent.
		 */
		maxblocks = INT_MAX;
		if (delalloc_blklen)
			maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
					  maxblocks);
		blkphy = 0;

		down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
		n = nilfs_bmap_lookup_contig(
			NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
		up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);

		if (n < 0) {
			int past_eof;

			if (unlikely(n != -ENOENT))
				break; /* error */

			/* HOLE */
			blkoff++;
			past_eof = ((blkoff << blkbits) >= isize);

			if (size) {
				/* End of the current extent */

				if (past_eof)
					flags |= FIEMAP_EXTENT_LAST;

				ret = fiemap_fill_next_extent(
					fieinfo, logical, phys, size, flags);
				if (ret)
					break;
				size = 0;
			}
			if (blkoff > end_blkoff || past_eof)
				break;
		} else {
			if (size) {
				if (phys && blkphy << blkbits == phys + size) {
					/* The current extent goes on */
					size += n << blkbits;
				} else {
					/* Terminate the current extent */
					ret = fiemap_fill_next_extent(
						fieinfo, logical, phys, size,
						flags);
					if (ret || blkoff > end_blkoff)
						break;

					/* Start another extent */
					flags = FIEMAP_EXTENT_MERGED;
					logical = blkoff << blkbits;
					phys = blkphy << blkbits;
					size = n << blkbits;
				}
			} else {
				/* Start a new extent */
				flags = FIEMAP_EXTENT_MERGED;
				logical = blkoff << blkbits;
				phys = blkphy << blkbits;
				size = n << blkbits;
			}
			blkoff += n;
		}
		cond_resched();
	} while (true);

	/* If ret is 1 then we just hit the end of the extent array */
	if (ret == 1)
		ret = 0;

	mutex_unlock(&inode->i_mutex);
	return ret;
}
Ejemplo n.º 5
0
int __generic_block_fiemap(struct inode *inode,
			   struct fiemap_extent_info *fieinfo, loff_t start,
			   loff_t len, get_block_t *get_block)
{
	struct buffer_head map_bh;
	sector_t start_blk, last_blk;
	loff_t isize = i_size_read(inode);
	u64 logical = 0, phys = 0, size = 0;
	u32 flags = FIEMAP_EXTENT_MERGED;
	bool past_eof = false, whole_file = false;
	int ret = 0;

	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
	if (ret)
		return ret;

	/*
	 * Either the i_mutex or other appropriate locking needs to be held
	 * since we expect isize to not change at all through the duration of
	 * this call.
	 */
	if (len >= isize) {
		whole_file = true;
		len = isize;
	}

	/*
	 * Some filesystems can't deal with being asked to map less than
	 * blocksize, so make sure our len is at least block length.
	 */
	if (logical_to_blk(inode, len) == 0)
		len = blk_to_logical(inode, 1);

	start_blk = logical_to_blk(inode, start);
	last_blk = logical_to_blk(inode, start + len - 1);

	do {
		/*
		 * we set b_size to the total size we want so it will map as
		 * many contiguous blocks as possible at once
		 */
		memset(&map_bh, 0, sizeof(struct buffer_head));
		map_bh.b_size = len;

		ret = get_block(inode, start_blk, &map_bh, 0);
		if (ret)
			break;

		/* HOLE */
		if (!buffer_mapped(&map_bh)) {
			start_blk++;

			/*
			 * We want to handle the case where there is an
			 * allocated block at the front of the file, and then
			 * nothing but holes up to the end of the file properly,
			 * to make sure that extent at the front gets properly
			 * marked with FIEMAP_EXTENT_LAST
			 */
			if (!past_eof &&
			    blk_to_logical(inode, start_blk) >= isize)
				past_eof = 1;

			/*
			 * First hole after going past the EOF, this is our
			 * last extent
			 */
			if (past_eof && size) {
				flags = FIEMAP_EXTENT_MERGED|FIEMAP_EXTENT_LAST;
				ret = fiemap_fill_next_extent(fieinfo, logical,
							      phys, size,
							      flags);
			} else if (size) {
				ret = fiemap_fill_next_extent(fieinfo, logical,
							      phys, size, flags);
				size = 0;
			}

			/* if we have holes up to/past EOF then we're done */
			if (start_blk > last_blk || past_eof || ret)
				break;
		} else {
			/*
			 * We have gone over the length of what we wanted to
			 * map, and it wasn't the entire file, so add the extent
			 * we got last time and exit.
			 *
			 * This is for the case where say we want to map all the
			 * way up to the second to the last block in a file, but
			 * the last block is a hole, making the second to last
			 * block FIEMAP_EXTENT_LAST.  In this case we want to
			 * see if there is a hole after the second to last block
			 * so we can mark it properly.  If we found data after
			 * we exceeded the length we were requesting, then we
			 * are good to go, just add the extent to the fieinfo
			 * and break
			 */
			if (start_blk > last_blk && !whole_file) {
				ret = fiemap_fill_next_extent(fieinfo, logical,
							      phys, size,
							      flags);
				break;
			}

			/*
			 * if size != 0 then we know we already have an extent
			 * to add, so add it.
			 */
			if (size) {
				ret = fiemap_fill_next_extent(fieinfo, logical,
							      phys, size,
							      flags);
				if (ret)
					break;
			}

			logical = blk_to_logical(inode, start_blk);
			phys = blk_to_logical(inode, map_bh.b_blocknr);
			size = map_bh.b_size;
			flags = FIEMAP_EXTENT_MERGED;

			start_blk += logical_to_blk(inode, size);

			/*
			 * If we are past the EOF, then we need to make sure as
			 * soon as we find a hole that the last extent we found
			 * is marked with FIEMAP_EXTENT_LAST
			 */
			if (!past_eof && logical + size >= isize)
				past_eof = true;
		}
		cond_resched();
	} while (1);

	/* If ret is 1 then we just hit the end of the extent array */
	if (ret == 1)
		ret = 0;

	return ret;
}
Ejemplo n.º 6
0
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                u64 start, u64 len)
{
    struct buffer_head map_bh;
    sector_t start_blk, last_blk;
    loff_t isize = i_size_read(inode);
    u64 logical = 0, phys = 0, size = 0;
    u32 flags = 0;
    bool past_eof = false, whole_file = false;
    int ret = 0;

    ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
    if (ret)
        return ret;

    mutex_lock(&inode->i_mutex);

    if (len >= isize) {
        whole_file = true;
        len = isize;
    }

    if (logical_to_blk(inode, len) == 0)
        len = blk_to_logical(inode, 1);

    start_blk = logical_to_blk(inode, start);
    last_blk = logical_to_blk(inode, start + len - 1);
next:
    memset(&map_bh, 0, sizeof(struct buffer_head));
    map_bh.b_size = len;

    ret = get_data_block(inode, start_blk, &map_bh, 0,
                         F2FS_GET_BLOCK_FIEMAP);
    if (ret)
        goto out;

    /* HOLE */
    if (!buffer_mapped(&map_bh)) {
        start_blk++;

        if (!past_eof && blk_to_logical(inode, start_blk) >= isize)
            past_eof = 1;

        if (past_eof && size) {
            flags |= FIEMAP_EXTENT_LAST;
            ret = fiemap_fill_next_extent(fieinfo, logical,
                                          phys, size, flags);
        } else if (size) {
            ret = fiemap_fill_next_extent(fieinfo, logical,
                                          phys, size, flags);
            size = 0;
        }

        /* if we have holes up to/past EOF then we're done */
        if (start_blk > last_blk || past_eof || ret)
            goto out;
    } else {
        if (start_blk > last_blk && !whole_file) {
            ret = fiemap_fill_next_extent(fieinfo, logical,
                                          phys, size, flags);
            goto out;
        }

        /*
         * if size != 0 then we know we already have an extent
         * to add, so add it.
         */
        if (size) {
            ret = fiemap_fill_next_extent(fieinfo, logical,
                                          phys, size, flags);
            if (ret)
                goto out;
        }

        logical = blk_to_logical(inode, start_blk);
        phys = blk_to_logical(inode, map_bh.b_blocknr);
        size = map_bh.b_size;
        flags = 0;
        if (buffer_unwritten(&map_bh))
            flags = FIEMAP_EXTENT_UNWRITTEN;

        start_blk += logical_to_blk(inode, size);

        /*
         * If we are past the EOF, then we need to make sure as
         * soon as we find a hole that the last extent we found
         * is marked with FIEMAP_EXTENT_LAST
         */
        if (!past_eof && logical + size >= isize)
            past_eof = true;
    }
    cond_resched();
    if (fatal_signal_pending(current))
        ret = -EINTR;
    else
        goto next;
out:
    if (ret == 1)
        ret = 0;

    mutex_unlock(&inode->i_mutex);
    return ret;
}
Ejemplo n.º 7
0
/*
 * @inode - the inode to map
 * @arg - the pointer to userspace where we copy everything to
 * @get_block - the fs's get_block function
 *
 * This does FIEMAP for block based inodes.  Basically it will just loop
 * through get_block until we hit the number of extents we want to map, or we
 * go past the end of the file and hit a hole.
 *
 * If it is possible to have data blocks beyond a hole past @inode->i_size, then
 * please do not use this function, it will stop at the first unmapped block
 * beyond i_size
 */
int generic_block_fiemap(struct inode *inode,
			 struct fiemap_extent_info *fieinfo, u64 start,
			 u64 len, get_block_t *get_block)
{
	struct buffer_head tmp;
	unsigned int start_blk;
	long long length = 0, map_len = 0;
	u64 logical = 0, phys = 0, size = 0;
	u32 flags = FIEMAP_EXTENT_MERGED;
	int ret = 0;

	if ((ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC)))
		return ret;

	start_blk = logical_to_blk(inode, start);

	/* guard against change */
	mutex_lock(&inode->i_mutex);

	length = (long long)min_t(u64, len, i_size_read(inode));
	map_len = length;

	do {
		/*
		 * we set b_size to the total size we want so it will map as
		 * many contiguous blocks as possible at once
		 */
		memset(&tmp, 0, sizeof(struct buffer_head));
		tmp.b_size = map_len;

		ret = get_block(inode, start_blk, &tmp, 0);
		if (ret)
			break;

		/* HOLE */
		if (!buffer_mapped(&tmp)) {
			/*
			 * first hole after going past the EOF, this is our
			 * last extent
			 */
			if (length <= 0) {
				flags = FIEMAP_EXTENT_MERGED|FIEMAP_EXTENT_LAST;
				ret = fiemap_fill_next_extent(fieinfo, logical,
							      phys, size,
							      flags);
				break;
			}

			length -= blk_to_logical(inode, 1);

			/* if we have holes up to/past EOF then we're done */
			if (length <= 0)
				break;

			start_blk++;
		} else {
			if (length <= 0 && size) {
				ret = fiemap_fill_next_extent(fieinfo, logical,
							      phys, size,
							      flags);
				if (ret)
					break;
			}

			logical = blk_to_logical(inode, start_blk);
			phys = blk_to_logical(inode, tmp.b_blocknr);
			size = tmp.b_size;
			flags = FIEMAP_EXTENT_MERGED;

			length -= tmp.b_size;
			start_blk += logical_to_blk(inode, size);

			/*
			 * if we are past the EOF we need to loop again to see
			 * if there is a hole so we can mark this extent as the
			 * last one, and if not keep mapping things until we
			 * find a hole, or we run out of slots in the extent
			 * array
			 */
			if (length <= 0)
				continue;

			ret = fiemap_fill_next_extent(fieinfo, logical, phys,
						      size, flags);
			if (ret)
				break;
		}
		cond_resched();
	} while (1);

	mutex_unlock(&inode->i_mutex);

	/* if ret is 1 then we just hit the end of the extent array */
	if (ret == 1)
		ret = 0;

	return ret;
}
Ejemplo n.º 8
0
int __generic_block_fiemap(struct inode *inode,
                           struct fiemap_extent_info *fieinfo, u64 start,
                           u64 len, get_block_t *get_block)
{
    struct buffer_head tmp;
    unsigned long long start_blk;
    long long length = 0, map_len = 0;
    u64 logical = 0, phys = 0, size = 0;
    u32 flags = FIEMAP_EXTENT_MERGED;
    int ret = 0, past_eof = 0, whole_file = 0;

    if ((ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC)))
        return ret;

    start_blk = logical_to_blk(inode, start);

    length = (long long)min_t(u64, len, i_size_read(inode));
    if (length < len)
        whole_file = 1;

    map_len = length;

    do {
        /*
         * we set b_size to the total size we want so it will map as
         * many contiguous blocks as possible at once
         */
        memset(&tmp, 0, sizeof(struct buffer_head));
        tmp.b_size = map_len;

        ret = get_block(inode, start_blk, &tmp, 0);
        if (ret)
            break;

        /* HOLE */
        if (!buffer_mapped(&tmp)) {
            length -= blk_to_logical(inode, 1);
            start_blk++;

            /*
             * we want to handle the case where there is an
             * allocated block at the front of the file, and then
             * nothing but holes up to the end of the file properly,
             * to make sure that extent at the front gets properly
             * marked with FIEMAP_EXTENT_LAST
             */
            if (!past_eof &&
                    blk_to_logical(inode, start_blk) >=
                    blk_to_logical(inode, 0)+i_size_read(inode))
                past_eof = 1;

            /*
             * first hole after going past the EOF, this is our
             * last extent
             */
            if (past_eof && size) {
                flags = FIEMAP_EXTENT_MERGED|FIEMAP_EXTENT_LAST;
                ret = fiemap_fill_next_extent(fieinfo, logical,
                                              phys, size,
                                              flags);
                break;
            }

            /* if we have holes up to/past EOF then we're done */
            if (length <= 0 || past_eof)
                break;
        } else {
            /*
             * we have gone over the length of what we wanted to
             * map, and it wasn't the entire file, so add the extent
             * we got last time and exit.
             *
             * This is for the case where say we want to map all the
             * way up to the second to the last block in a file, but
             * the last block is a hole, making the second to last
             * block FIEMAP_EXTENT_LAST.  In this case we want to
             * see if there is a hole after the second to last block
             * so we can mark it properly.  If we found data after
             * we exceeded the length we were requesting, then we
             * are good to go, just add the extent to the fieinfo
             * and break
             */
            if (length <= 0 && !whole_file) {
                ret = fiemap_fill_next_extent(fieinfo, logical,
                                              phys, size,
                                              flags);
                break;
            }

            /*
             * if size != 0 then we know we already have an extent
             * to add, so add it.
             */
            if (size) {
                ret = fiemap_fill_next_extent(fieinfo, logical,
                                              phys, size,
                                              flags);
                if (ret)
                    break;
            }

            logical = blk_to_logical(inode, start_blk);
            phys = blk_to_logical(inode, tmp.b_blocknr);
            size = tmp.b_size;
            flags = FIEMAP_EXTENT_MERGED;

            length -= tmp.b_size;
            start_blk += logical_to_blk(inode, size);

            /*
             * If we are past the EOF, then we need to make sure as
             * soon as we find a hole that the last extent we found
             * is marked with FIEMAP_EXTENT_LAST
             */
            if (!past_eof &&
                    logical+size >=
                    blk_to_logical(inode, 0)+i_size_read(inode))
                past_eof = 1;
        }
        cond_resched();
    } while (1);

    /* if ret is 1 then we just hit the end of the extent array */
    if (ret == 1)
        ret = 0;

    return ret;
}