Exemplo n.º 1
0
static int change_devices_uuid(struct btrfs_fs_info *fs_info)
{
	struct btrfs_root *root = fs_info->chunk_root;
	struct btrfs_path path;
	struct btrfs_key key = {0, 0, 0};
	int ret = 0;

	btrfs_init_path(&path);
	/* No transaction again */
	ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
	if (ret < 0)
		goto out;

	while (1) {
		btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
		if (key.type != BTRFS_DEV_ITEM_KEY ||
		    key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
			goto next;
		ret = change_device_uuid(root, path.nodes[0], path.slots[0]);
		if (ret < 0)
			goto out;
next:
		ret = btrfs_next_item(root, &path);
		if (ret < 0)
			goto out;
		if (ret > 0) {
			ret = 0;
			goto out;
		}
	}
out:
	btrfs_release_path(&path);
	return ret;
}
Exemplo n.º 2
0
static int change_extents_uuid(struct btrfs_fs_info *fs_info)
{
	struct btrfs_root *root = fs_info->extent_root;
	struct btrfs_path path;
	struct btrfs_key key = {0, 0, 0};
	int ret = 0;

	btrfs_init_path(&path);
	/*
	 * Here we don't use transaction as it will takes a lot of reserve
	 * space, and that will make a near-full btrfs unable to change uuid
	 */
	ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
	if (ret < 0)
		goto out;

	while (1) {
		struct btrfs_extent_item *ei;
		struct extent_buffer *eb;
		u64 flags;
		u64 bytenr;

		btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
		if (key.type != BTRFS_EXTENT_ITEM_KEY &&
		    key.type != BTRFS_METADATA_ITEM_KEY)
			goto next;
		ei = btrfs_item_ptr(path.nodes[0], path.slots[0],
				    struct btrfs_extent_item);
		flags = btrfs_extent_flags(path.nodes[0], ei);
		if (!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
			goto next;

		bytenr = key.objectid;
		eb = read_tree_block(root, bytenr, root->nodesize, 0);
		if (IS_ERR(eb)) {
			error("failed to read tree block: %llu", bytenr);
			ret = PTR_ERR(eb);
			goto out;
		}
		ret = change_header_uuid(root, eb);
		free_extent_buffer(eb);
		if (ret < 0) {
			error("failed to change uuid of tree block: %llu",
				bytenr);
			goto out;
		}
next:
		ret = btrfs_next_item(root, &path);
		if (ret < 0)
			goto out;
		if (ret > 0) {
			ret = 0;
			goto out;
		}
	}

out:
	btrfs_release_path(&path);
	return ret;
}
Exemplo n.º 3
0
static int map_one_extent(struct btrfs_fs_info *fs_info,
			  u64 *logical_ret, u64 *len_ret, int search_foward)
{
	struct btrfs_path *path;
	struct btrfs_key key;
	u64 logical;
	u64 len = 0;
	int ret = 0;

	BUG_ON(!logical_ret);
	logical = *logical_ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = logical;
	key.type = 0;
	key.offset = 0;

	ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path,
				0, 0);
	if (ret < 0)
		goto out;
	BUG_ON(ret == 0);
	ret = 0;

again:
	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
	if ((search_foward && key.objectid < logical) ||
	    (!search_foward && key.objectid > logical) ||
	    (key.type != BTRFS_EXTENT_ITEM_KEY &&
	     key.type != BTRFS_METADATA_ITEM_KEY)) {
		if (!search_foward)
			ret = btrfs_previous_extent_item(fs_info->extent_root,
							 path, 0);
		else
			ret = btrfs_next_item(fs_info->extent_root, path);
		if (ret)
			goto out;
		goto again;
	}
	logical = key.objectid;
	if (key.type == BTRFS_METADATA_ITEM_KEY)
		len = fs_info->tree_root->leafsize;
	else
		len = key.offset;

out:
	btrfs_free_path(path);
	if (!ret) {
		*logical_ret = logical;
		if (len_ret)
			*len_ret = len;
	}
	return ret;
}
Exemplo n.º 4
0
int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info,
			    int (*check_func)(struct btrfs_fs_info *, u8 *, u8,
					      u64))
{
	struct btrfs_root *root = fs_info->uuid_root;
	struct btrfs_key key;
	struct btrfs_path *path;
	int ret = 0;
	struct extent_buffer *leaf;
	int slot;
	u32 item_size;
	unsigned long offset;

	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}

	key.objectid = 0;
	key.type = 0;
	key.offset = 0;

again_search_slot:
	ret = btrfs_search_forward(root, &key, path, BTRFS_OLDEST_GENERATION);
	if (ret) {
		if (ret > 0)
			ret = 0;
		goto out;
	}

	while (1) {
		cond_resched();
		leaf = path->nodes[0];
		slot = path->slots[0];
		btrfs_item_key_to_cpu(leaf, &key, slot);

		if (key.type != BTRFS_UUID_KEY_SUBVOL &&
		    key.type != BTRFS_UUID_KEY_RECEIVED_SUBVOL)
			goto skip;

		offset = btrfs_item_ptr_offset(leaf, slot);
		item_size = btrfs_item_size_nr(leaf, slot);
		if (!IS_ALIGNED(item_size, sizeof(u64))) {
			btrfs_warn(fs_info,
				   "uuid item with illegal size %lu!",
				   (unsigned long)item_size);
			goto skip;
		}
		while (item_size) {
			u8 uuid[BTRFS_UUID_SIZE];
			__le64 subid_le;
			u64 subid_cpu;

			put_unaligned_le64(key.objectid, uuid);
			put_unaligned_le64(key.offset, uuid + sizeof(u64));
			read_extent_buffer(leaf, &subid_le, offset,
					   sizeof(subid_le));
			subid_cpu = le64_to_cpu(subid_le);
			ret = check_func(fs_info, uuid, key.type, subid_cpu);
			if (ret < 0)
				goto out;
			if (ret > 0) {
				btrfs_release_path(path);
				ret = btrfs_uuid_iter_rem(root, uuid, key.type,
							  subid_cpu);
				if (ret == 0) {
					/*
					 * this might look inefficient, but the
					 * justification is that it is an
					 * exception that check_func returns 1,
					 * and that in the regular case only one
					 * entry per UUID exists.
					 */
					goto again_search_slot;
				}
				if (ret < 0 && ret != -ENOENT)
					goto out;
			}
			item_size -= sizeof(subid_le);
			offset += sizeof(subid_le);
		}

skip:
		ret = btrfs_next_item(root, path);
		if (ret == 0)
			continue;
		else if (ret > 0)
			ret = 0;
		break;
	}

out:
	btrfs_free_path(path);
	return ret;
}
Exemplo n.º 5
0
/*
 * add all non-inline backrefs for bytenr to the list
 */
static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
			    struct btrfs_path *path, u64 bytenr,
			    int info_level, struct list_head *prefs)
{
	struct btrfs_root *extent_root = fs_info->extent_root;
	int ret;
	int slot;
	struct extent_buffer *leaf;
	struct btrfs_key key;

	while (1) {
		ret = btrfs_next_item(extent_root, path);
		if (ret < 0)
			break;
		if (ret) {
			ret = 0;
			break;
		}

		slot = path->slots[0];
		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &key, slot);

		if (key.objectid != bytenr)
			break;
		if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
			continue;
		if (key.type > BTRFS_SHARED_DATA_REF_KEY)
			break;

		switch (key.type) {
		case BTRFS_SHARED_BLOCK_REF_KEY:
			ret = __add_prelim_ref(prefs, 0, NULL,
						info_level + 1, key.offset,
						bytenr, 1);
			break;
		case BTRFS_SHARED_DATA_REF_KEY: {
			struct btrfs_shared_data_ref *sdref;
			int count;

			sdref = btrfs_item_ptr(leaf, slot,
					      struct btrfs_shared_data_ref);
			count = btrfs_shared_data_ref_count(leaf, sdref);
			ret = __add_prelim_ref(prefs, 0, NULL, 0, key.offset,
						bytenr, count);
			break;
		}
		case BTRFS_TREE_BLOCK_REF_KEY:
			ret = __add_prelim_ref(prefs, key.offset, NULL,
					       info_level + 1, 0,
					       bytenr, 1);
			break;
		case BTRFS_EXTENT_DATA_REF_KEY: {
			struct btrfs_extent_data_ref *dref;
			int count;
			u64 root;

			dref = btrfs_item_ptr(leaf, slot,
					      struct btrfs_extent_data_ref);
			count = btrfs_extent_data_ref_count(leaf, dref);
			key.objectid = btrfs_extent_data_ref_objectid(leaf,
								      dref);
			key.type = BTRFS_EXTENT_DATA_KEY;
			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
			root = btrfs_extent_data_ref_root(leaf, dref);
			ret = __add_prelim_ref(prefs, root, &key, 0, 0,
					       bytenr, count);
			break;
		}
		default:
			WARN_ON(1);
		}
		if (ret)
			return ret;

	}

	return ret;
}
Exemplo n.º 6
0
/*
 * Get the first file extent that covers (part of) the given range
 * Unlike kernel using extent_map to handle hole even no-hole is enabled,
 * progs don't have such infrastructure, so caller should do extra care
 * for no-hole.
 *
 * return 0 for found, and path points to the file extent.
 * return >0 for not found, and path points to the insert position.
 * return <0 for error.
 */
int btrfs_get_extent(struct btrfs_trans_handle *trans,
		     struct btrfs_root *root,
		     struct btrfs_path *path,
		     u64 ino, u64 offset, u64 len, int ins_len)
{
	struct btrfs_key key;
	struct btrfs_key found_key;
	struct btrfs_file_extent_item *fi_item;
	u64 end = 0;
	int ret = 0;
	int not_found = 1;

	key.objectid = ino;
	key.type = BTRFS_EXTENT_DATA_KEY;
	key.offset = offset;

	ret = btrfs_search_slot(trans, root, &key, path, ins_len,
				ins_len ? 1 : 0);
	if (ret <= 0)
		goto out;
	if (ret > 0) {
		/* Check preivous file extent */
		ret = btrfs_previous_item(root, path, ino,
					  BTRFS_EXTENT_DATA_KEY);
		if (ret < 0)
			goto out;
		if (ret > 0)
			goto check_next;
	}
	btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
	if (found_key.objectid != ino ||
	    found_key.type != BTRFS_EXTENT_DATA_KEY)
		goto check_next;

	fi_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
				 struct btrfs_file_extent_item);
	end = found_key.offset +
	      btrfs_file_extent_ram_bytes(path->nodes[0], fi_item);
	/*
	 * existing file extent
	 * |--------|	  |----|
	 *      |-------|
	 *      offset + len
	 * OR
	 * |---------------|
	 *	|-------|
	 */
	if (end > offset) {
		not_found = 0;
		goto out;
	}
check_next:
	ret = btrfs_next_item(root, path);
	if (ret)
		goto out;

	btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
	if (found_key.objectid != ino ||
	    found_key.type != BTRFS_EXTENT_DATA_KEY) {
		ret = 1;
		goto out;
	}
	if (found_key.offset < offset + len)
		/*
		 * existing file extent
		 * |---|	|------|
		 *	|-------|
		 *	offset + len
		 */
		not_found = 0;
	else
		/*
		 * existing file extent
		 * |----|		|----|
		 *		|----|
		 *		offset + len
		 */
		not_found = 1;

	/*
	 * To keep the search hehavior consistent with search_slot(),
	 * we need to go back to the prev leaf's nritem slot if
	 * we are at the first slot of the leaf.
	 */
	if (path->slots[0] == 0) {
		ret = btrfs_prev_leaf(root, path);
		/* Not possible */
		if (ret)
			goto out;
		path->slots[0] = btrfs_header_nritems(path->nodes[0]);
	}

out:
	if (ret == 0)
		ret = not_found;
	return ret;
}
static int load_free_space_extents(struct btrfs_caching_control *caching_ctl,
                                   struct btrfs_path *path,
                                   u32 expected_extent_count)
{
    struct btrfs_block_group_cache *block_group;
    struct btrfs_fs_info *fs_info;
    struct btrfs_root *root;
    struct btrfs_key key;
    u64 end;
    u64 total_found = 0;
    u32 extent_count = 0;
    int ret;

    block_group = caching_ctl->block_group;
    fs_info = block_group->fs_info;
    root = fs_info->free_space_root;

    end = block_group->key.objectid + block_group->key.offset;

    while (1) {
        ret = btrfs_next_item(root, path);
        if (ret < 0)
            goto out;
        if (ret)
            break;

        btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);

        if (key.type == BTRFS_FREE_SPACE_INFO_KEY)
            break;

        ASSERT(key.type == BTRFS_FREE_SPACE_EXTENT_KEY);
        ASSERT(key.objectid < end && key.objectid + key.offset <= end);

        caching_ctl->progress = key.objectid;

        total_found += add_new_free_space(block_group, fs_info,
                                          key.objectid,
                                          key.objectid + key.offset);
        if (total_found > CACHING_CTL_WAKE_UP) {
            total_found = 0;
            wake_up(&caching_ctl->wait);
        }
        extent_count++;
    }

    if (extent_count != expected_extent_count) {
        btrfs_err(fs_info, "incorrect extent count for %llu; counted %u, expected %u",
                  block_group->key.objectid, extent_count,
                  expected_extent_count);
        ASSERT(0);
        ret = -EIO;
        goto out;
    }

    caching_ctl->progress = (u64)-1;

    ret = 0;
out:
    return ret;
}
static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
                                   struct btrfs_path *path,
                                   u32 expected_extent_count)
{
    struct btrfs_block_group_cache *block_group;
    struct btrfs_fs_info *fs_info;
    struct btrfs_root *root;
    struct btrfs_key key;
    int prev_bit = 0, bit;
    /* Initialize to silence GCC. */
    u64 extent_start = 0;
    u64 end, offset;
    u64 total_found = 0;
    u32 extent_count = 0;
    int ret;

    block_group = caching_ctl->block_group;
    fs_info = block_group->fs_info;
    root = fs_info->free_space_root;

    end = block_group->key.objectid + block_group->key.offset;

    while (1) {
        ret = btrfs_next_item(root, path);
        if (ret < 0)
            goto out;
        if (ret)
            break;

        btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);

        if (key.type == BTRFS_FREE_SPACE_INFO_KEY)
            break;

        ASSERT(key.type == BTRFS_FREE_SPACE_BITMAP_KEY);
        ASSERT(key.objectid < end && key.objectid + key.offset <= end);

        caching_ctl->progress = key.objectid;

        offset = key.objectid;
        while (offset < key.objectid + key.offset) {
            bit = free_space_test_bit(block_group, path, offset);
            if (prev_bit == 0 && bit == 1) {
                extent_start = offset;
            } else if (prev_bit == 1 && bit == 0) {
                total_found += add_new_free_space(block_group,
                                                  fs_info,
                                                  extent_start,
                                                  offset);
                if (total_found > CACHING_CTL_WAKE_UP) {
                    total_found = 0;
                    wake_up(&caching_ctl->wait);
                }
                extent_count++;
            }
            prev_bit = bit;
            offset += block_group->sectorsize;
        }
    }
    if (prev_bit == 1) {
        total_found += add_new_free_space(block_group, fs_info,
                                          extent_start, end);
        extent_count++;
    }

    if (extent_count != expected_extent_count) {
        btrfs_err(fs_info, "incorrect extent count for %llu; counted %u, expected %u",
                  block_group->key.objectid, extent_count,
                  expected_extent_count);
        ASSERT(0);
        ret = -EIO;
        goto out;
    }

    caching_ctl->progress = (u64)-1;

    ret = 0;
out:
    return ret;
}
/*
 * Populate the free space tree by walking the extent tree. Operations on the
 * extent tree that happen as a result of writes to the free space tree will go
 * through the normal add/remove hooks.
 */
static int populate_free_space_tree(struct btrfs_trans_handle *trans,
                                    struct btrfs_fs_info *fs_info,
                                    struct btrfs_block_group_cache *block_group)
{
    struct btrfs_root *extent_root = fs_info->extent_root;
    struct btrfs_path *path, *path2;
    struct btrfs_key key;
    u64 start, end;
    int ret;

    path = btrfs_alloc_path();
    if (!path)
        return -ENOMEM;
    path->reada = 1;

    path2 = btrfs_alloc_path();
    if (!path2) {
        btrfs_free_path(path);
        return -ENOMEM;
    }

    ret = add_new_free_space_info(trans, fs_info, block_group, path2);
    if (ret)
        goto out;

    mutex_lock(&block_group->free_space_lock);

    /*
     * Iterate through all of the extent and metadata items in this block
     * group, adding the free space between them and the free space at the
     * end. Note that EXTENT_ITEM and METADATA_ITEM are less than
     * BLOCK_GROUP_ITEM, so an extent may precede the block group that it's
     * contained in.
     */
    key.objectid = block_group->key.objectid;
    key.type = BTRFS_EXTENT_ITEM_KEY;
    key.offset = 0;

    ret = btrfs_search_slot_for_read(extent_root, &key, path, 1, 0);
    if (ret < 0)
        goto out_locked;
    ASSERT(ret == 0);

    start = block_group->key.objectid;
    end = block_group->key.objectid + block_group->key.offset;
    while (1) {
        btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);

        if (key.type == BTRFS_EXTENT_ITEM_KEY ||
                key.type == BTRFS_METADATA_ITEM_KEY) {
            if (key.objectid >= end)
                break;

            if (start < key.objectid) {
                ret = __add_to_free_space_tree(trans, fs_info,
                                               block_group,
                                               path2, start,
                                               key.objectid -
                                               start);
                if (ret)
                    goto out_locked;
            }
            start = key.objectid;
            if (key.type == BTRFS_METADATA_ITEM_KEY)
                start += fs_info->tree_root->nodesize;
            else
                start += key.offset;
        } else if (key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
            if (key.objectid != block_group->key.objectid)
                break;
        }

        ret = btrfs_next_item(extent_root, path);
        if (ret < 0)
            goto out_locked;
        if (ret)
            break;
    }
    if (start < end) {
        ret = __add_to_free_space_tree(trans, fs_info, block_group,
                                       path2, start, end - start);
        if (ret)
            goto out_locked;
    }

    ret = 0;
out_locked:
    mutex_unlock(&block_group->free_space_lock);
out:
    btrfs_free_path(path2);
    btrfs_free_path(path);
    return ret;
}
Exemplo n.º 10
0
static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
			   struct ulist *parents, struct __prelim_ref *ref,
			   int level, u64 time_seq, const u64 *extent_item_pos,
			   u64 total_refs)
{
	int ret = 0;
	int slot;
	struct extent_buffer *eb;
	struct btrfs_key key;
	struct btrfs_key *key_for_search = &ref->key_for_search;
	struct btrfs_file_extent_item *fi;
	struct extent_inode_elem *eie = NULL, *old = NULL;
	u64 disk_byte;
	u64 wanted_disk_byte = ref->wanted_disk_byte;
	u64 count = 0;

	if (level != 0) {
		eb = path->nodes[level];
		ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
		if (ret < 0)
			return ret;
		return 0;
	}

	/*
	 * We normally enter this function with the path already pointing to
	 * the first item to check. But sometimes, we may enter it with
	 * slot==nritems. In that case, go to the next leaf before we continue.
	 */
	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0]))
		ret = btrfs_next_leaf(root, path);

	while (!ret && count < total_refs) {
		eb = path->nodes[0];
		slot = path->slots[0];

		btrfs_item_key_to_cpu(eb, &key, slot);

		if (key.objectid != key_for_search->objectid ||
		    key.type != BTRFS_EXTENT_DATA_KEY)
			break;

		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);

		if (disk_byte == wanted_disk_byte) {
			eie = NULL;
			old = NULL;
			count++;
			if (extent_item_pos) {
				ret = check_extent_in_eb(&key, eb, fi,
						*extent_item_pos,
						&eie);
				if (ret < 0)
					break;
			}
			if (ret > 0)
				goto next;
			ret = ulist_add_merge_ptr(parents, eb->start,
						  eie, (void **)&old, GFP_NOFS);
			if (ret < 0)
				break;
			if (!ret && extent_item_pos) {
				while (old->next)
					old = old->next;
				old->next = eie;
			}
			eie = NULL;
		}
next:
		ret = btrfs_next_item(root, path);
	}

	if (ret > 0)
		ret = 0;
	else if (ret < 0)
		free_inode_elem_list(eie);
	return ret;
}