Beispiel #1
0
int f2fs_inline_data_fiemap(struct inode *inode,
		struct fiemap_extent_info *fieinfo, __u64 start, __u64 len)
{
	__u64 byteaddr, ilen;
	__u32 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED |
		FIEMAP_EXTENT_LAST;
	struct node_info ni;
	struct page *ipage;
	int err = 0;

	ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
	if (IS_ERR(ipage))
		return PTR_ERR(ipage);

	if (!f2fs_has_inline_data(inode)) {
		err = -EAGAIN;
		goto out;
	}

	ilen = min_t(size_t, MAX_INLINE_DATA, i_size_read(inode));
	if (start >= ilen)
		goto out;
	if (start + len < ilen)
		ilen = start + len;
	ilen -= start;

	get_node_info(F2FS_I_SB(inode), inode->i_ino, &ni);
	byteaddr = (__u64)ni.blk_addr << inode->i_sb->s_blocksize_bits;
	byteaddr += (char *)inline_data_addr(ipage) - (char *)F2FS_INODE(ipage);
	err = fiemap_fill_next_extent(fieinfo, start, byteaddr, ilen, flags);
out:
	f2fs_put_page(ipage, 1);
	return err;
}
int ext4_inline_data_fiemap(struct inode *inode,
			    struct fiemap_extent_info *fieinfo,
			    int *has_inline)
{
	__u64 physical = 0;
	__u64 length;
	__u32 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_LAST;
	int error = 0;
	struct ext4_iloc iloc;

	down_read(&EXT4_I(inode)->xattr_sem);
	if (!ext4_has_inline_data(inode)) {
		*has_inline = 0;
		goto out;
	}

	error = ext4_get_inode_loc(inode, &iloc);
	if (error)
		goto out;

	physical = (__u64)iloc.bh->b_blocknr << inode->i_sb->s_blocksize_bits;
	physical += (char *)ext4_raw_inode(&iloc) - iloc.bh->b_data;
	physical += offsetof(struct ext4_inode, i_block);
	length = i_size_read(inode);

	if (physical)
		error = fiemap_fill_next_extent(fieinfo, 0, physical,
						length, flags);
	brelse(iloc.bh);
out:
	up_read(&EXT4_I(inode)->xattr_sem);
	return (error < 0 ? error : 0);
}
Beispiel #3
0
static int gfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		       u64 start, u64 len)
{
	struct gfs2_inode *ip = GFS2_I(inode);
	struct gfs2_holder gh;
	int ret;

	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
	if (ret)
		return ret;

	mutex_lock(&inode->i_mutex);

	ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
	if (ret)
		goto out;

	if (gfs2_is_stuffed(ip)) {
		u64 phys = ip->i_no_addr << inode->i_blkbits;
		u64 size = i_size_read(inode);
		u32 flags = FIEMAP_EXTENT_LAST|FIEMAP_EXTENT_NOT_ALIGNED|
			    FIEMAP_EXTENT_DATA_INLINE;
		phys += sizeof(struct gfs2_dinode);
		phys += start;
		if (start + len > size)
			len = size - start;
		if (start < size)
			ret = fiemap_fill_next_extent(fieinfo, start, phys,
						      len, flags);
		if (ret == 1)
			ret = 0;
	} else {
		ret = __generic_block_fiemap(inode, fieinfo, start, len,
					     gfs2_block_map);
	}

	gfs2_glock_dq_uninit(&gh);
out:
	mutex_unlock(&inode->i_mutex);
	return ret;
}
Beispiel #4
0
int ext4_inline_data_fiemap(struct inode *inode,
			    struct fiemap_extent_info *fieinfo,
			    int *has_inline, __u64 start, __u64 len)
{
	__u64 physical = 0;
	__u64 inline_len;
	__u32 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED |
		FIEMAP_EXTENT_LAST;
	int error = 0;
	struct ext4_iloc iloc;

	down_read(&EXT4_I(inode)->xattr_sem);
	if (!ext4_has_inline_data(inode)) {
		*has_inline = 0;
		goto out;
	}
	inline_len = min_t(size_t, ext4_get_inline_size(inode),
			   i_size_read(inode));
	if (start >= inline_len)
		goto out;
	if (start + len < inline_len)
		inline_len = start + len;
	inline_len -= start;

	error = ext4_get_inode_loc(inode, &iloc);
	if (error)
		goto out;

	physical = (__u64)iloc.bh->b_blocknr << inode->i_sb->s_blocksize_bits;
	physical += (char *)ext4_raw_inode(&iloc) - iloc.bh->b_data;
	physical += offsetof(struct ext4_inode, i_block);

	if (physical)
		error = fiemap_fill_next_extent(fieinfo, start, physical,
						inline_len, flags);
	brelse(iloc.bh);
out:
	up_read(&EXT4_I(inode)->xattr_sem);
	return (error < 0 ? error : 0);
}
Beispiel #5
0
/*
 * Call fiemap helper to fill in user data.
 * Returns positive errors to xfs_getbmap.
 */
STATIC int
xfs_fiemap_format(
	void			**arg,
	struct getbmapx		*bmv,
	int			*full)
{
	int			error;
	struct fiemap_extent_info *fieinfo = *arg;
	u32			fiemap_flags = 0;
	u64			logical, physical, length;

	/* Do nothing for a hole */
	if (bmv->bmv_block == -1LL)
		return 0;

	logical = BBTOB(bmv->bmv_offset);
	physical = BBTOB(bmv->bmv_block);
	length = BBTOB(bmv->bmv_length);

	if (bmv->bmv_oflags & BMV_OF_PREALLOC)
		fiemap_flags |= FIEMAP_EXTENT_UNWRITTEN;
	else if (bmv->bmv_oflags & BMV_OF_DELALLOC) {
		fiemap_flags |= (FIEMAP_EXTENT_DELALLOC |
				 FIEMAP_EXTENT_UNKNOWN);
		physical = 0;   /* no block yet */
	}
	if (bmv->bmv_oflags & BMV_OF_LAST)
		fiemap_flags |= FIEMAP_EXTENT_LAST;

	error = fiemap_fill_next_extent(fieinfo, logical, physical,
					length, fiemap_flags);
	if (error > 0) {
		error = 0;
		*full = 1;	/* user array now full */
	}

	return -error;
}
Beispiel #6
0
int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		 __u64 start, __u64 len)
{
	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
	__u64 logical = 0, phys = 0, size = 0;
	__u32 flags = 0;
	loff_t isize;
	sector_t blkoff, end_blkoff;
	sector_t delalloc_blkoff;
	unsigned long delalloc_blklen;
	unsigned int blkbits = inode->i_blkbits;
	int ret, n;

	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
	if (ret)
		return ret;

	mutex_lock(&inode->i_mutex);

	isize = i_size_read(inode);

	blkoff = start >> blkbits;
	end_blkoff = (start + len - 1) >> blkbits;

	delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
							&delalloc_blkoff);

	do {
		__u64 blkphy;
		unsigned int maxblocks;

		if (delalloc_blklen && blkoff == delalloc_blkoff) {
			if (size) {
				/* End of the current extent */
				ret = fiemap_fill_next_extent(
					fieinfo, logical, phys, size, flags);
				if (ret)
					break;
			}
			if (blkoff > end_blkoff)
				break;

			flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
			logical = blkoff << blkbits;
			phys = 0;
			size = delalloc_blklen << blkbits;

			blkoff = delalloc_blkoff + delalloc_blklen;
			delalloc_blklen = nilfs_find_uncommitted_extent(
				inode, blkoff, &delalloc_blkoff);
			continue;
		}

		/*
		 * Limit the number of blocks that we look up so as
		 * not to get into the next delayed allocation extent.
		 */
		maxblocks = INT_MAX;
		if (delalloc_blklen)
			maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
					  maxblocks);
		blkphy = 0;

		down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
		n = nilfs_bmap_lookup_contig(
			NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
		up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);

		if (n < 0) {
			int past_eof;

			if (unlikely(n != -ENOENT))
				break; /* error */

			/* HOLE */
			blkoff++;
			past_eof = ((blkoff << blkbits) >= isize);

			if (size) {
				/* End of the current extent */

				if (past_eof)
					flags |= FIEMAP_EXTENT_LAST;

				ret = fiemap_fill_next_extent(
					fieinfo, logical, phys, size, flags);
				if (ret)
					break;
				size = 0;
			}
			if (blkoff > end_blkoff || past_eof)
				break;
		} else {
			if (size) {
				if (phys && blkphy << blkbits == phys + size) {
					/* The current extent goes on */
					size += n << blkbits;
				} else {
					/* Terminate the current extent */
					ret = fiemap_fill_next_extent(
						fieinfo, logical, phys, size,
						flags);
					if (ret || blkoff > end_blkoff)
						break;

					/* Start another extent */
					flags = FIEMAP_EXTENT_MERGED;
					logical = blkoff << blkbits;
					phys = blkphy << blkbits;
					size = n << blkbits;
				}
			} else {
				/* Start a new extent */
				flags = FIEMAP_EXTENT_MERGED;
				logical = blkoff << blkbits;
				phys = blkphy << blkbits;
				size = n << blkbits;
			}
			blkoff += n;
		}
		cond_resched();
	} while (true);

	/* If ret is 1 then we just hit the end of the extent array */
	if (ret == 1)
		ret = 0;

	mutex_unlock(&inode->i_mutex);
	return ret;
}
Beispiel #7
0
int __generic_block_fiemap(struct inode *inode,
			   struct fiemap_extent_info *fieinfo, loff_t start,
			   loff_t len, get_block_t *get_block)
{
	struct buffer_head map_bh;
	sector_t start_blk, last_blk;
	loff_t isize = i_size_read(inode);
	u64 logical = 0, phys = 0, size = 0;
	u32 flags = FIEMAP_EXTENT_MERGED;
	bool past_eof = false, whole_file = false;
	int ret = 0;

	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
	if (ret)
		return ret;

	/*
	 * Either the i_mutex or other appropriate locking needs to be held
	 * since we expect isize to not change at all through the duration of
	 * this call.
	 */
	if (len >= isize) {
		whole_file = true;
		len = isize;
	}

	/*
	 * Some filesystems can't deal with being asked to map less than
	 * blocksize, so make sure our len is at least block length.
	 */
	if (logical_to_blk(inode, len) == 0)
		len = blk_to_logical(inode, 1);

	start_blk = logical_to_blk(inode, start);
	last_blk = logical_to_blk(inode, start + len - 1);

	do {
		/*
		 * we set b_size to the total size we want so it will map as
		 * many contiguous blocks as possible at once
		 */
		memset(&map_bh, 0, sizeof(struct buffer_head));
		map_bh.b_size = len;

		ret = get_block(inode, start_blk, &map_bh, 0);
		if (ret)
			break;

		/* HOLE */
		if (!buffer_mapped(&map_bh)) {
			start_blk++;

			/*
			 * We want to handle the case where there is an
			 * allocated block at the front of the file, and then
			 * nothing but holes up to the end of the file properly,
			 * to make sure that extent at the front gets properly
			 * marked with FIEMAP_EXTENT_LAST
			 */
			if (!past_eof &&
			    blk_to_logical(inode, start_blk) >= isize)
				past_eof = 1;

			/*
			 * First hole after going past the EOF, this is our
			 * last extent
			 */
			if (past_eof && size) {
				flags = FIEMAP_EXTENT_MERGED|FIEMAP_EXTENT_LAST;
				ret = fiemap_fill_next_extent(fieinfo, logical,
							      phys, size,
							      flags);
			} else if (size) {
				ret = fiemap_fill_next_extent(fieinfo, logical,
							      phys, size, flags);
				size = 0;
			}

			/* if we have holes up to/past EOF then we're done */
			if (start_blk > last_blk || past_eof || ret)
				break;
		} else {
			/*
			 * We have gone over the length of what we wanted to
			 * map, and it wasn't the entire file, so add the extent
			 * we got last time and exit.
			 *
			 * This is for the case where say we want to map all the
			 * way up to the second to the last block in a file, but
			 * the last block is a hole, making the second to last
			 * block FIEMAP_EXTENT_LAST.  In this case we want to
			 * see if there is a hole after the second to last block
			 * so we can mark it properly.  If we found data after
			 * we exceeded the length we were requesting, then we
			 * are good to go, just add the extent to the fieinfo
			 * and break
			 */
			if (start_blk > last_blk && !whole_file) {
				ret = fiemap_fill_next_extent(fieinfo, logical,
							      phys, size,
							      flags);
				break;
			}

			/*
			 * if size != 0 then we know we already have an extent
			 * to add, so add it.
			 */
			if (size) {
				ret = fiemap_fill_next_extent(fieinfo, logical,
							      phys, size,
							      flags);
				if (ret)
					break;
			}

			logical = blk_to_logical(inode, start_blk);
			phys = blk_to_logical(inode, map_bh.b_blocknr);
			size = map_bh.b_size;
			flags = FIEMAP_EXTENT_MERGED;

			start_blk += logical_to_blk(inode, size);

			/*
			 * If we are past the EOF, then we need to make sure as
			 * soon as we find a hole that the last extent we found
			 * is marked with FIEMAP_EXTENT_LAST
			 */
			if (!past_eof && logical + size >= isize)
				past_eof = true;
		}
		cond_resched();
	} while (1);

	/* If ret is 1 then we just hit the end of the extent array */
	if (ret == 1)
		ret = 0;

	return ret;
}
Beispiel #8
0
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                u64 start, u64 len)
{
    struct buffer_head map_bh;
    sector_t start_blk, last_blk;
    loff_t isize = i_size_read(inode);
    u64 logical = 0, phys = 0, size = 0;
    u32 flags = 0;
    bool past_eof = false, whole_file = false;
    int ret = 0;

    ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
    if (ret)
        return ret;

    mutex_lock(&inode->i_mutex);

    if (len >= isize) {
        whole_file = true;
        len = isize;
    }

    if (logical_to_blk(inode, len) == 0)
        len = blk_to_logical(inode, 1);

    start_blk = logical_to_blk(inode, start);
    last_blk = logical_to_blk(inode, start + len - 1);
next:
    memset(&map_bh, 0, sizeof(struct buffer_head));
    map_bh.b_size = len;

    ret = get_data_block(inode, start_blk, &map_bh, 0,
                         F2FS_GET_BLOCK_FIEMAP);
    if (ret)
        goto out;

    /* HOLE */
    if (!buffer_mapped(&map_bh)) {
        start_blk++;

        if (!past_eof && blk_to_logical(inode, start_blk) >= isize)
            past_eof = 1;

        if (past_eof && size) {
            flags |= FIEMAP_EXTENT_LAST;
            ret = fiemap_fill_next_extent(fieinfo, logical,
                                          phys, size, flags);
        } else if (size) {
            ret = fiemap_fill_next_extent(fieinfo, logical,
                                          phys, size, flags);
            size = 0;
        }

        /* if we have holes up to/past EOF then we're done */
        if (start_blk > last_blk || past_eof || ret)
            goto out;
    } else {
        if (start_blk > last_blk && !whole_file) {
            ret = fiemap_fill_next_extent(fieinfo, logical,
                                          phys, size, flags);
            goto out;
        }

        /*
         * if size != 0 then we know we already have an extent
         * to add, so add it.
         */
        if (size) {
            ret = fiemap_fill_next_extent(fieinfo, logical,
                                          phys, size, flags);
            if (ret)
                goto out;
        }

        logical = blk_to_logical(inode, start_blk);
        phys = blk_to_logical(inode, map_bh.b_blocknr);
        size = map_bh.b_size;
        flags = 0;
        if (buffer_unwritten(&map_bh))
            flags = FIEMAP_EXTENT_UNWRITTEN;

        start_blk += logical_to_blk(inode, size);

        /*
         * If we are past the EOF, then we need to make sure as
         * soon as we find a hole that the last extent we found
         * is marked with FIEMAP_EXTENT_LAST
         */
        if (!past_eof && logical + size >= isize)
            past_eof = true;
    }
    cond_resched();
    if (fatal_signal_pending(current))
        ret = -EINTR;
    else
        goto next;
out:
    if (ret == 1)
        ret = 0;

    mutex_unlock(&inode->i_mutex);
    return ret;
}
Beispiel #9
0
/*
 * @inode - the inode to map
 * @arg - the pointer to userspace where we copy everything to
 * @get_block - the fs's get_block function
 *
 * This does FIEMAP for block based inodes.  Basically it will just loop
 * through get_block until we hit the number of extents we want to map, or we
 * go past the end of the file and hit a hole.
 *
 * If it is possible to have data blocks beyond a hole past @inode->i_size, then
 * please do not use this function, it will stop at the first unmapped block
 * beyond i_size
 */
int generic_block_fiemap(struct inode *inode,
			 struct fiemap_extent_info *fieinfo, u64 start,
			 u64 len, get_block_t *get_block)
{
	struct buffer_head tmp;
	unsigned int start_blk;
	long long length = 0, map_len = 0;
	u64 logical = 0, phys = 0, size = 0;
	u32 flags = FIEMAP_EXTENT_MERGED;
	int ret = 0;

	if ((ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC)))
		return ret;

	start_blk = logical_to_blk(inode, start);

	/* guard against change */
	mutex_lock(&inode->i_mutex);

	length = (long long)min_t(u64, len, i_size_read(inode));
	map_len = length;

	do {
		/*
		 * we set b_size to the total size we want so it will map as
		 * many contiguous blocks as possible at once
		 */
		memset(&tmp, 0, sizeof(struct buffer_head));
		tmp.b_size = map_len;

		ret = get_block(inode, start_blk, &tmp, 0);
		if (ret)
			break;

		/* HOLE */
		if (!buffer_mapped(&tmp)) {
			/*
			 * first hole after going past the EOF, this is our
			 * last extent
			 */
			if (length <= 0) {
				flags = FIEMAP_EXTENT_MERGED|FIEMAP_EXTENT_LAST;
				ret = fiemap_fill_next_extent(fieinfo, logical,
							      phys, size,
							      flags);
				break;
			}

			length -= blk_to_logical(inode, 1);

			/* if we have holes up to/past EOF then we're done */
			if (length <= 0)
				break;

			start_blk++;
		} else {
			if (length <= 0 && size) {
				ret = fiemap_fill_next_extent(fieinfo, logical,
							      phys, size,
							      flags);
				if (ret)
					break;
			}

			logical = blk_to_logical(inode, start_blk);
			phys = blk_to_logical(inode, tmp.b_blocknr);
			size = tmp.b_size;
			flags = FIEMAP_EXTENT_MERGED;

			length -= tmp.b_size;
			start_blk += logical_to_blk(inode, size);

			/*
			 * if we are past the EOF we need to loop again to see
			 * if there is a hole so we can mark this extent as the
			 * last one, and if not keep mapping things until we
			 * find a hole, or we run out of slots in the extent
			 * array
			 */
			if (length <= 0)
				continue;

			ret = fiemap_fill_next_extent(fieinfo, logical, phys,
						      size, flags);
			if (ret)
				break;
		}
		cond_resched();
	} while (1);

	mutex_unlock(&inode->i_mutex);

	/* if ret is 1 then we just hit the end of the extent array */
	if (ret == 1)
		ret = 0;

	return ret;
}
Beispiel #10
0
int __generic_block_fiemap(struct inode *inode,
                           struct fiemap_extent_info *fieinfo, u64 start,
                           u64 len, get_block_t *get_block)
{
    struct buffer_head tmp;
    unsigned long long start_blk;
    long long length = 0, map_len = 0;
    u64 logical = 0, phys = 0, size = 0;
    u32 flags = FIEMAP_EXTENT_MERGED;
    int ret = 0, past_eof = 0, whole_file = 0;

    if ((ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC)))
        return ret;

    start_blk = logical_to_blk(inode, start);

    length = (long long)min_t(u64, len, i_size_read(inode));
    if (length < len)
        whole_file = 1;

    map_len = length;

    do {
        /*
         * we set b_size to the total size we want so it will map as
         * many contiguous blocks as possible at once
         */
        memset(&tmp, 0, sizeof(struct buffer_head));
        tmp.b_size = map_len;

        ret = get_block(inode, start_blk, &tmp, 0);
        if (ret)
            break;

        /* HOLE */
        if (!buffer_mapped(&tmp)) {
            length -= blk_to_logical(inode, 1);
            start_blk++;

            /*
             * we want to handle the case where there is an
             * allocated block at the front of the file, and then
             * nothing but holes up to the end of the file properly,
             * to make sure that extent at the front gets properly
             * marked with FIEMAP_EXTENT_LAST
             */
            if (!past_eof &&
                    blk_to_logical(inode, start_blk) >=
                    blk_to_logical(inode, 0)+i_size_read(inode))
                past_eof = 1;

            /*
             * first hole after going past the EOF, this is our
             * last extent
             */
            if (past_eof && size) {
                flags = FIEMAP_EXTENT_MERGED|FIEMAP_EXTENT_LAST;
                ret = fiemap_fill_next_extent(fieinfo, logical,
                                              phys, size,
                                              flags);
                break;
            }

            /* if we have holes up to/past EOF then we're done */
            if (length <= 0 || past_eof)
                break;
        } else {
            /*
             * we have gone over the length of what we wanted to
             * map, and it wasn't the entire file, so add the extent
             * we got last time and exit.
             *
             * This is for the case where say we want to map all the
             * way up to the second to the last block in a file, but
             * the last block is a hole, making the second to last
             * block FIEMAP_EXTENT_LAST.  In this case we want to
             * see if there is a hole after the second to last block
             * so we can mark it properly.  If we found data after
             * we exceeded the length we were requesting, then we
             * are good to go, just add the extent to the fieinfo
             * and break
             */
            if (length <= 0 && !whole_file) {
                ret = fiemap_fill_next_extent(fieinfo, logical,
                                              phys, size,
                                              flags);
                break;
            }

            /*
             * if size != 0 then we know we already have an extent
             * to add, so add it.
             */
            if (size) {
                ret = fiemap_fill_next_extent(fieinfo, logical,
                                              phys, size,
                                              flags);
                if (ret)
                    break;
            }

            logical = blk_to_logical(inode, start_blk);
            phys = blk_to_logical(inode, tmp.b_blocknr);
            size = tmp.b_size;
            flags = FIEMAP_EXTENT_MERGED;

            length -= tmp.b_size;
            start_blk += logical_to_blk(inode, size);

            /*
             * If we are past the EOF, then we need to make sure as
             * soon as we find a hole that the last extent we found
             * is marked with FIEMAP_EXTENT_LAST
             */
            if (!past_eof &&
                    logical+size >=
                    blk_to_logical(inode, 0)+i_size_read(inode))
                past_eof = 1;
        }
        cond_resched();
    } while (1);

    /* if ret is 1 then we just hit the end of the extent array */
    if (ret == 1)
        ret = 0;

    return ret;
}