Esempio n. 1
0
int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
{
    struct extent_info ei;
    struct inode *inode = dn->inode;

    if (f2fs_lookup_extent_cache(inode, index, &ei)) {
        dn->data_blkaddr = ei.blk + index - ei.fofs;
        return 0;
    }

    return f2fs_reserve_block(dn, index);
}
Esempio n. 2
0
/*
 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
 * f2fs_map_blocks structure.
 * If original data blocks are allocated, then give them to blockdev.
 * Otherwise,
 *     a. preallocate requested block addresses
 *     b. do not use extent cache for better performance
 *     c. give the block addresses to blockdev
 */
static int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
                           int create, int flag)
{
    unsigned int maxblocks = map->m_len;
    struct dnode_of_data dn;
    int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
    pgoff_t pgofs, end_offset;
    int err = 0, ofs = 1;
    struct extent_info ei;
    bool allocated = false;

    map->m_len = 0;
    map->m_flags = 0;

    /* it only supports block size == page size */
    pgofs =	(pgoff_t)map->m_lblk;

    if (f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
        map->m_pblk = ei.blk + pgofs - ei.fofs;
        map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
        map->m_flags = F2FS_MAP_MAPPED;
        goto out;
    }

    if (create)
        f2fs_lock_op(F2FS_I_SB(inode));

    /* When reading holes, we need its node page */
    set_new_dnode(&dn, inode, NULL, NULL, 0);
    err = get_dnode_of_data(&dn, pgofs, mode);
    if (err) {
        if (err == -ENOENT)
            err = 0;
        goto unlock_out;
    }
    if (dn.data_blkaddr == NEW_ADDR) {
        if (flag == F2FS_GET_BLOCK_BMAP) {
            err = -ENOENT;
            goto put_out;
        } else if (flag == F2FS_GET_BLOCK_READ ||
                   flag == F2FS_GET_BLOCK_DIO) {
            goto put_out;
        }
        /*
         * if it is in fiemap call path (flag = F2FS_GET_BLOCK_FIEMAP),
         * mark it as mapped and unwritten block.
         */
    }

    if (dn.data_blkaddr != NULL_ADDR) {
        map->m_flags = F2FS_MAP_MAPPED;
        map->m_pblk = dn.data_blkaddr;
        if (dn.data_blkaddr == NEW_ADDR)
            map->m_flags |= F2FS_MAP_UNWRITTEN;
    } else if (create) {
        err = __allocate_data_block(&dn);
        if (err)
            goto put_out;
        allocated = true;
        map->m_flags = F2FS_MAP_NEW | F2FS_MAP_MAPPED;
        map->m_pblk = dn.data_blkaddr;
    } else {
        if (flag == F2FS_GET_BLOCK_BMAP)
            err = -ENOENT;
        goto put_out;
    }

    end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
    map->m_len = 1;
    dn.ofs_in_node++;
    pgofs++;

get_next:
    if (dn.ofs_in_node >= end_offset) {
        if (allocated)
            sync_inode_page(&dn);
        allocated = false;
        f2fs_put_dnode(&dn);

        set_new_dnode(&dn, inode, NULL, NULL, 0);
        err = get_dnode_of_data(&dn, pgofs, mode);
        if (err) {
            if (err == -ENOENT)
                err = 0;
            goto unlock_out;
        }

        if (dn.data_blkaddr == NEW_ADDR &&
                flag != F2FS_GET_BLOCK_FIEMAP)
            goto put_out;

        end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
    }

    if (maxblocks > map->m_len) {
        block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
        if (blkaddr == NULL_ADDR && create) {
            err = __allocate_data_block(&dn);
            if (err)
                goto sync_out;
            allocated = true;
            map->m_flags |= F2FS_MAP_NEW;
            blkaddr = dn.data_blkaddr;
        }
        /* Give more consecutive addresses for the readahead */
        if ((map->m_pblk != NEW_ADDR &&
                blkaddr == (map->m_pblk + ofs)) ||
                (map->m_pblk == NEW_ADDR &&
                 blkaddr == NEW_ADDR)) {
            ofs++;
            dn.ofs_in_node++;
            pgofs++;
            map->m_len++;
            goto get_next;
        }
    }
sync_out:
    if (allocated)
        sync_inode_page(&dn);
put_out:
    f2fs_put_dnode(&dn);
unlock_out:
    if (create)
        f2fs_unlock_op(F2FS_I_SB(inode));
out:
    trace_f2fs_map_blocks(inode, map, err);
    return err;
}
Esempio n. 3
0
struct page *get_read_data_page(struct inode *inode, pgoff_t index, int rw)
{
    struct address_space *mapping = inode->i_mapping;
    struct dnode_of_data dn;
    struct page *page;
    struct extent_info ei;
    int err;
    struct f2fs_io_info fio = {
        .sbi = F2FS_I_SB(inode),
        .type = DATA,
        .rw = rw,
        .encrypted_page = NULL,
    };

    if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
        return read_mapping_page(mapping, index, NULL);

    page = grab_cache_page(mapping, index);
    if (!page)
        return ERR_PTR(-ENOMEM);

    if (f2fs_lookup_extent_cache(inode, index, &ei)) {
        dn.data_blkaddr = ei.blk + index - ei.fofs;
        goto got_it;
    }

    set_new_dnode(&dn, inode, NULL, NULL, 0);
    err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
    if (err)
        goto put_err;
    f2fs_put_dnode(&dn);

    if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
        err = -ENOENT;
        goto put_err;
    }
got_it:
    if (PageUptodate(page)) {
        unlock_page(page);
        return page;
    }

    /*
     * A new dentry page is allocated but not able to be written, since its
     * new inode page couldn't be allocated due to -ENOSPC.
     * In such the case, its blkaddr can be remained as NEW_ADDR.
     * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
     */
    if (dn.data_blkaddr == NEW_ADDR) {
        zero_user_segment(page, 0, PAGE_CACHE_SIZE);
        SetPageUptodate(page);
        unlock_page(page);
        return page;
    }

    fio.blk_addr = dn.data_blkaddr;
    fio.page = page;
    err = f2fs_submit_page_bio(&fio);
    if (err)
        goto put_err;
    return page;

put_err:
    f2fs_put_page(page, 1);
    return ERR_PTR(err);
}
Esempio n. 4
0
static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
					struct file *filp,
					struct f2fs_defragment *range)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_map_blocks map = { .m_next_pgofs = NULL };
	struct extent_info ei;
	pgoff_t pg_start, pg_end;
	unsigned int blk_per_seg = sbi->blocks_per_seg;
	unsigned int total = 0, sec_num;
	unsigned int pages_per_sec = sbi->segs_per_sec * blk_per_seg;
	block_t blk_end = 0;
	bool fragmented = false;
	int err;

	/* if in-place-update policy is enabled, don't waste time here */
	if (need_inplace_update(inode))
		return -EINVAL;

	pg_start = range->start >> PAGE_CACHE_SHIFT;
	pg_end = (range->start + range->len) >> PAGE_CACHE_SHIFT;

	f2fs_balance_fs(sbi, true);

	inode_lock(inode);

	/* writeback all dirty pages in the range */
	err = filemap_write_and_wait_range(inode->i_mapping, range->start,
						range->start + range->len - 1);
	if (err)
		goto out;

	/*
	 * lookup mapping info in extent cache, skip defragmenting if physical
	 * block addresses are continuous.
	 */
	if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
		if (ei.fofs + ei.len >= pg_end)
			goto out;
	}

	map.m_lblk = pg_start;

	/*
	 * lookup mapping info in dnode page cache, skip defragmenting if all
	 * physical block addresses are continuous even if there are hole(s)
	 * in logical blocks.
	 */
	while (map.m_lblk < pg_end) {
		map.m_len = pg_end - map.m_lblk;
		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_READ);
		if (err)
			goto out;

		if (!(map.m_flags & F2FS_MAP_FLAGS)) {
			map.m_lblk++;
			continue;
		}

		if (blk_end && blk_end != map.m_pblk) {
			fragmented = true;
			break;
		}
		blk_end = map.m_pblk + map.m_len;

		map.m_lblk += map.m_len;
	}

	if (!fragmented)
		goto out;

	map.m_lblk = pg_start;
	map.m_len = pg_end - pg_start;

	sec_num = (map.m_len + pages_per_sec - 1) / pages_per_sec;

	/*
	 * make sure there are enough free section for LFS allocation, this can
	 * avoid defragment running in SSR mode when free section are allocated
	 * intensively
	 */
	if (has_not_enough_free_secs(sbi, sec_num)) {
		err = -EAGAIN;
		goto out;
	}

	while (map.m_lblk < pg_end) {
		pgoff_t idx;
		int cnt = 0;

do_map:
		map.m_len = pg_end - map.m_lblk;
		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_READ);
		if (err)
			goto clear_out;

		if (!(map.m_flags & F2FS_MAP_FLAGS)) {
			map.m_lblk++;
			continue;
		}

		set_inode_flag(F2FS_I(inode), FI_DO_DEFRAG);

		idx = map.m_lblk;
		while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
			struct page *page;

			page = get_lock_data_page(inode, idx, true);
			if (IS_ERR(page)) {
				err = PTR_ERR(page);
				goto clear_out;
			}

			set_page_dirty(page);
			f2fs_put_page(page, 1);

			idx++;
			cnt++;
			total++;
		}

		map.m_lblk = idx;

		if (idx < pg_end && cnt < blk_per_seg)
			goto do_map;

		clear_inode_flag(F2FS_I(inode), FI_DO_DEFRAG);

		err = filemap_fdatawrite(inode->i_mapping);
		if (err)
			goto out;
	}
clear_out:
	clear_inode_flag(F2FS_I(inode), FI_DO_DEFRAG);
out:
	inode_unlock(inode);
	if (!err)
		range->len = (u64)total << PAGE_CACHE_SHIFT;
	return err;
}