Beispiel #1
0
int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
			     struct btrfs_root *root,
			     u64 objectid, u64 pos,
			     u64 disk_offset, u64 disk_num_bytes,
			     u64 num_bytes, u64 offset, u64 ram_bytes,
			     u8 compression, u8 encryption, u16 other_encoding)
{
	int ret = 0;
	struct btrfs_file_extent_item *item;
	struct btrfs_key file_key;
	struct btrfs_path *path;
	struct extent_buffer *leaf;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
	file_key.objectid = objectid;
	file_key.offset = pos;
	btrfs_set_key_type(&file_key, BTRFS_EXTENT_DATA_KEY);

	path->leave_spinning = 1;
	ret = btrfs_insert_empty_item(trans, root, path, &file_key,
				      sizeof(*item));
	if (ret < 0)
		goto out;
	BUG_ON(ret); /* Can't happen */
	leaf = path->nodes[0];
	item = btrfs_item_ptr(leaf, path->slots[0],
			      struct btrfs_file_extent_item);
	btrfs_set_file_extent_disk_bytenr(leaf, item, disk_offset);
	btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes);
	btrfs_set_file_extent_offset(leaf, item, offset);
	btrfs_set_file_extent_num_bytes(leaf, item, num_bytes);
	btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes);
	btrfs_set_file_extent_generation(leaf, item, trans->transid);
	btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
	btrfs_set_file_extent_compression(leaf, item, compression);
	btrfs_set_file_extent_encryption(leaf, item, encryption);
	btrfs_set_file_extent_other_encoding(leaf, item, other_encoding);

	btrfs_mark_buffer_dirty(leaf);
out:
	btrfs_free_path(path);
	return ret;
}
Beispiel #2
0
int btrfs_insert_inline_extent(struct btrfs_trans_handle *trans,
                               struct btrfs_root *root, u64 objectid,
                               u64 offset, char *buffer, size_t size)
{
    struct btrfs_key key;
    struct btrfs_path *path;
    struct extent_buffer *leaf;
    unsigned long ptr;
    struct btrfs_file_extent_item *ei;
    u32 datasize;
    int err = 0;
    int ret;

    path = btrfs_alloc_path();
    if (!path)
        return -ENOMEM;

    key.objectid = objectid;
    key.offset = offset;
    btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);

    datasize = btrfs_file_extent_calc_inline_size(size);
    ret = btrfs_insert_empty_item(trans, root, path, &key, datasize);
    if (ret) {
        err = ret;
        goto fail;
    }

    leaf = path->nodes[0];
    ei = btrfs_item_ptr(leaf, path->slots[0],
                        struct btrfs_file_extent_item);
    btrfs_set_file_extent_generation(leaf, ei, trans->transid);
    btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
    btrfs_set_file_extent_ram_bytes(leaf, ei, size);
    btrfs_set_file_extent_compression(leaf, ei, 0);
    btrfs_set_file_extent_encryption(leaf, ei, 0);
    btrfs_set_file_extent_other_encoding(leaf, ei, 0);

    ptr = btrfs_file_extent_inline_start(ei) + offset - key.offset;
    write_extent_buffer(leaf, buffer, ptr, size);
    btrfs_mark_buffer_dirty(leaf);
fail:
    btrfs_free_path(path);
    return err;
}
/*
 * lookup the root with the highest offset for a given objectid.  The key we do
 * find is copied into 'key'.  If we find something return 0, otherwise 1, < 0
 * on error.
 */
int btrfs_find_last_root(struct btrfs_root *root, u64 objectid,
			struct btrfs_root_item *item, struct btrfs_key *key)
{
	struct btrfs_path *path;
	struct btrfs_key search_key;
	struct btrfs_key found_key;
	struct extent_buffer *l;
	int ret;
	int slot;

	search_key.objectid = objectid;
	search_key.type = BTRFS_ROOT_ITEM_KEY;
	search_key.offset = (u64)-1;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
	ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
	if (ret < 0)
		goto out;

	BUG_ON(ret == 0);
	if (path->slots[0] == 0) {
		ret = 1;
		goto out;
	}
	l = path->nodes[0];
	slot = path->slots[0] - 1;
	btrfs_item_key_to_cpu(l, &found_key, slot);
	if (found_key.objectid != objectid ||
	    found_key.type != BTRFS_ROOT_ITEM_KEY) {
		ret = 1;
		goto out;
	}
	if (item)
		read_extent_buffer(l, item, btrfs_item_ptr_offset(l, slot),
				   sizeof(*item));
	if (key)
		memcpy(key, &found_key, sizeof(found_key));
	ret = 0;
out:
	btrfs_free_path(path);
	return ret;
}
Beispiel #4
0
int btrfs_dedup_resume(struct btrfs_fs_info *fs_info,
		       struct btrfs_root *dedup_root)
{
	struct btrfs_dedup_status_item *status;
	struct btrfs_key key;
	struct btrfs_path *path;
	u64 blocksize;
	u64 limit;
	u16 type;
	u16 backend;
	int ret = 0;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = 0;
	key.type = BTRFS_DEDUP_STATUS_ITEM_KEY;
	key.offset = 0;

	ret = btrfs_search_slot(NULL, dedup_root, &key, path, 0, 0);
	if (ret > 0) {
		ret = -ENOENT;
		goto out;
	} else if (ret < 0) {
		goto out;
	}

	status = btrfs_item_ptr(path->nodes[0], path->slots[0],
				struct btrfs_dedup_status_item);
	blocksize = btrfs_dedup_status_blocksize(path->nodes[0], status);
	limit = btrfs_dedup_status_limit(path->nodes[0], status);
	type = btrfs_dedup_status_hash_type(path->nodes[0], status);
	backend = btrfs_dedup_status_backend(path->nodes[0], status);

	ret = init_dedup_info(fs_info, type, backend, blocksize, limit);
	if (ret < 0)
		goto out;
	fs_info->dedup_info->dedup_root = dedup_root;

out:
	btrfs_free_path(path);
	return ret;
}
int btrfs_find_orphan_item(struct btrfs_root *root, u64 offset)
{
	struct btrfs_path *path;
	struct btrfs_key key;
	int ret;

	key.objectid = BTRFS_ORPHAN_OBJECTID;
	key.type = BTRFS_ORPHAN_ITEM_KEY;
	key.offset = offset;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);

	btrfs_free_path(path);
	return ret;
}
Beispiel #6
0
int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans,
			     struct btrfs_root *root, u64 offset)
{
	struct btrfs_path *path;
	struct btrfs_key key;
	int ret = 0;

	key.objectid = BTRFS_ORPHAN_OBJECTID;
	btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
	key.offset = offset;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	ret = btrfs_insert_empty_item(trans, root, path, &key, 0);

	btrfs_free_path(path);
	return ret;
}
static int clear_free_space_tree(struct btrfs_trans_handle *trans,
                                 struct btrfs_root *root)
{
    struct btrfs_path *path;
    struct btrfs_key key;
    int nr;
    int ret;

    path = btrfs_alloc_path();
    if (!path)
        return -ENOMEM;

    path->leave_spinning = 1;

    key.objectid = 0;
    key.type = 0;
    key.offset = 0;

    while (1) {
        ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
        if (ret < 0)
            goto out;

        nr = btrfs_header_nritems(path->nodes[0]);
        if (!nr)
            break;

        path->slots[0] = 0;
        ret = btrfs_del_items(trans, root, path, 0, nr);
        if (ret)
            goto out;

        btrfs_release_path(path);
    }

    ret = 0;
out:
    btrfs_free_path(path);
    return ret;
}
Beispiel #8
0
/*
 *  search forward for a root, starting with objectid 'search_start'
 *  if a root key is found, the objectid we find is filled into 'found_objectid'
 *  and 0 is returned.  < 0 is returned on error, 1 if there is nothing
 *  left in the tree.
 */
int btrfs_search_root(struct btrfs_root *root, u64 search_start,
		      u64 *found_objectid)
{
	struct btrfs_path *path;
	struct btrfs_key search_key;
	int ret;

	root = root->fs_info->tree_root;
	search_key.objectid = search_start;
	search_key.type = (u8)-1;
	search_key.offset = (u64)-1;

	path = btrfs_alloc_path();
	BUG_ON(!path);
again:
	ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
	if (ret < 0)
		goto out;
	if (ret == 0) {
		ret = 1;
		goto out;
	}
	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
		ret = btrfs_next_leaf(root, path);
		if (ret)
			goto out;
	}
	btrfs_item_key_to_cpu(path->nodes[0], &search_key, path->slots[0]);
	if (search_key.type != BTRFS_ROOT_ITEM_KEY) {
		search_key.offset++;
		btrfs_release_path(root, path);
		goto again;
	}
	ret = 0;
	*found_objectid = search_key.objectid;

out:
	btrfs_free_path(path);
	return ret;
}
Beispiel #9
0
/* drop the root item for 'key' from 'root' */
int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
		   struct btrfs_key *key)
{
	struct btrfs_path *path;
	int ret;
	u32 refs;
	struct btrfs_root_item *ri;
	struct extent_buffer *leaf;

	path = btrfs_alloc_path();
	BUG_ON(!path);
	ret = btrfs_search_slot(trans, root, key, path, -1, 1);
	if (ret < 0)
		goto out;

	BUG_ON(ret != 0);
	leaf = path->nodes[0];
	ri = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_item);

	refs = btrfs_disk_root_refs(leaf, ri);
	BUG_ON(refs != 0);
	ret = btrfs_del_item(trans, root, path);
out:
	btrfs_release_path(root, path);
	btrfs_free_path(path);
	return ret;
}

#if 0 /* this will get used when snapshot deletion is implemented */
int btrfs_del_root_ref(struct btrfs_trans_handle *trans,
		       struct btrfs_root *tree_root,
		       u64 root_id, u8 type, u64 ref_id)
{
	struct btrfs_key key;
	int ret;
	struct btrfs_path *path;

	path = btrfs_alloc_path();

	key.objectid = root_id;
	key.type = type;
	key.offset = ref_id;

	ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
	BUG_ON(ret);

	ret = btrfs_del_item(trans, tree_root, path);
	BUG_ON(ret);

	btrfs_free_path(path);
	return ret;
}
Beispiel #10
0
/* drop the root item for 'key' from 'root' */
int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
		   struct btrfs_key *key)
{
	struct btrfs_path *path;
	int ret;
	struct btrfs_root_item *ri;
	struct extent_buffer *leaf;

	path = btrfs_alloc_path();
	BUG_ON(!path);
	ret = btrfs_search_slot(trans, root, key, path, -1, 1);
	if (ret < 0)
		goto out;

	BUG_ON(ret != 0);
	leaf = path->nodes[0];
	ri = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_item);

	ret = btrfs_del_item(trans, root, path);
out:
	btrfs_free_path(path);
	return ret;
}
/*
 * copy the data in 'item' into the btree
 */
int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
		      *root, struct btrfs_key *key, struct btrfs_root_item
		      *item)
{
	struct btrfs_path *path;
	struct extent_buffer *l;
	int ret;
	int slot;
	unsigned long ptr;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	ret = btrfs_search_slot(trans, root, key, path, 0, 1);
	if (ret < 0) {
		btrfs_abort_transaction(trans, root, ret);
		goto out;
	}

	if (ret != 0) {
		btrfs_print_leaf(root, path->nodes[0]);
		printk(KERN_CRIT "unable to update root key %llu %u %llu\n",
		       (unsigned long long)key->objectid, key->type,
		       (unsigned long long)key->offset);
		BUG_ON(1);
	}

	l = path->nodes[0];
	slot = path->slots[0];
	ptr = btrfs_item_ptr_offset(l, slot);
	write_extent_buffer(l, item, ptr, sizeof(*item));
	btrfs_mark_buffer_dirty(path->nodes[0]);
out:
	btrfs_free_path(path);
	return ret;
}
Beispiel #12
0
/*
 * add a btrfs_root_ref item.  type is either BTRFS_ROOT_REF_KEY
 * or BTRFS_ROOT_BACKREF_KEY.
 *
 * The dirid, sequence, name and name_len refer to the directory entry
 * that is referencing the root.
 *
 * For a forward ref, the root_id is the id of the tree referencing
 * the root and ref_id is the id of the subvol  or snapshot.
 *
 * For a back ref the root_id is the id of the subvol or snapshot and
 * ref_id is the id of the tree referencing it.
 */
int btrfs_add_root_ref(struct btrfs_trans_handle *trans,
		       struct btrfs_root *tree_root,
		       u64 root_id, u8 type, u64 ref_id,
		       u64 dirid, u64 sequence,
		       const char *name, int name_len)
{
	struct btrfs_key key;
	int ret;
	struct btrfs_path *path;
	struct btrfs_root_ref *ref;
	struct extent_buffer *leaf;
	unsigned long ptr;


	path = btrfs_alloc_path();

	key.objectid = root_id;
	key.type = type;
	key.offset = ref_id;

	ret = btrfs_insert_empty_item(trans, tree_root, path, &key,
				      sizeof(*ref) + name_len);
	BUG_ON(ret);

	leaf = path->nodes[0];
	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
	btrfs_set_root_ref_dirid(leaf, ref, dirid);
	btrfs_set_root_ref_sequence(leaf, ref, sequence);
	btrfs_set_root_ref_name_len(leaf, ref, name_len);
	ptr = (unsigned long)(ref + 1);
	write_extent_buffer(leaf, name, ptr, name_len);
	btrfs_mark_buffer_dirty(leaf);

	btrfs_free_path(path);
	return ret;
}
int remove_from_free_space_tree(struct btrfs_trans_handle *trans,
                                struct btrfs_fs_info *fs_info,
                                u64 start, u64 size)
{
    struct btrfs_block_group_cache *block_group;
    struct btrfs_path *path;
    int ret;

    if (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
        return 0;

    path = btrfs_alloc_path();
    if (!path) {
        ret = -ENOMEM;
        goto out;
    }

    block_group = btrfs_lookup_block_group(fs_info, start);
    if (!block_group) {
        ASSERT(0);
        ret = -ENOENT;
        goto out;
    }

    mutex_lock(&block_group->free_space_lock);
    ret = __remove_from_free_space_tree(trans, fs_info, block_group, path,
                                        start, size);
    mutex_unlock(&block_group->free_space_lock);

    btrfs_put_block_group(block_group);
out:
    btrfs_free_path(path);
    if (ret)
        btrfs_abort_transaction(trans, ret);
    return ret;
}
Beispiel #14
0
/*
 * Punch hole ranged [offset,len) for the file given by ino and root.
 *
 * Unlink kernel punch_hole, which will not zero/free existing extent,
 * instead it will return -EEXIST if there is any extents in the hole
 * range.
 */
int btrfs_punch_hole(struct btrfs_trans_handle *trans,
		     struct btrfs_root *root,
		     u64 ino, u64 offset, u64 len)
{
	struct btrfs_path *path;
	int ret = 0;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	ret = btrfs_get_extent(NULL, root, path, ino, offset, len, 0);
	if (ret < 0)
		goto out;
	if (ret == 0) {
		ret = -EEXIST;
		goto out;
	}

	ret = btrfs_insert_file_extent(trans, root, ino, offset, 0, 0, len);
out:
	btrfs_free_path(path);
	return ret;
}
Beispiel #15
0
int btrfs_dedup_enable(struct btrfs_fs_info *fs_info, u16 type, u16 backend,
		       u64 blocksize, u64 limit)
{
	struct btrfs_dedup_info *dedup_info;
	struct btrfs_root *dedup_root;
	struct btrfs_key key;
	struct btrfs_trans_handle *trans;
	struct btrfs_path *path;
	struct btrfs_dedup_status_item *status;
	int create_tree;
	u64 compat_ro_flag = btrfs_super_compat_ro_flags(fs_info->super_copy);
	int ret = 0;

	/* Sanity check */
	if (blocksize > BTRFS_DEDUP_BLOCKSIZE_MAX ||
	    blocksize < BTRFS_DEDUP_BLOCKSIZE_MIN ||
	    blocksize < fs_info->tree_root->sectorsize ||
	    !is_power_of_2(blocksize))
		return -EINVAL;
	if (type > ARRAY_SIZE(btrfs_dedup_sizes))
		return -EINVAL;
	if (backend >= BTRFS_DEDUP_BACKEND_LAST)
		return -EINVAL;
	if (backend == BTRFS_DEDUP_BACKEND_INMEMORY && limit == 0)
		limit = 4096; /* default value */
	if (backend == BTRFS_DEDUP_BACKEND_ONDISK && limit != 0)
		limit = 0;

	/*
	 * If current fs doesn't support DEDUP feature, don't enable
	 * on-disk dedup.
	 */
	if (!(compat_ro_flag & BTRFS_FEATURE_COMPAT_RO_DEDUP) &&
	    backend == BTRFS_DEDUP_BACKEND_ONDISK)
		return -EINVAL;

	/* Meaningless and unable to enable dedup for RO fs */
	if (fs_info->sb->s_flags & MS_RDONLY)
		return -EINVAL;

	if (fs_info->dedup_info) {
		dedup_info = fs_info->dedup_info;

		/* Check if we are re-enable for different dedup config */
		if (dedup_info->blocksize != blocksize ||
		    dedup_info->hash_type != type ||
		    dedup_info->backend != backend) {
			btrfs_dedup_disable(fs_info);
			goto enable;
		}

		/* On-fly limit change is OK */
		mutex_lock(&dedup_info->lock);
		fs_info->dedup_info->limit_nr = limit;
		mutex_unlock(&dedup_info->lock);
		return 0;
	}

enable:
	create_tree = compat_ro_flag & BTRFS_FEATURE_COMPAT_RO_DEDUP;

	ret = init_dedup_info(fs_info, type, backend, blocksize, limit);
	dedup_info = fs_info->dedup_info;
	if (ret < 0)
		goto out;

	if (!create_tree)
		goto out;

	/* Create dedup tree for status at least */
	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}

	trans = btrfs_start_transaction(fs_info->tree_root, 2);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		btrfs_free_path(path);
		goto out;
	}

	dedup_root = btrfs_create_tree(trans, fs_info,
				       BTRFS_DEDUP_TREE_OBJECTID);
	if (IS_ERR(dedup_root)) {
		ret = PTR_ERR(dedup_root);
		btrfs_abort_transaction(trans, fs_info->tree_root, ret);
		btrfs_free_path(path);
		goto out;
	}

	dedup_info->dedup_root = dedup_root;

	key.objectid = 0;
	key.type = BTRFS_DEDUP_STATUS_ITEM_KEY;
	key.offset = 0;

	ret = btrfs_insert_empty_item(trans, dedup_root, path, &key,
				      sizeof(*status));
	if (ret < 0) {
		btrfs_abort_transaction(trans, fs_info->tree_root, ret);
		btrfs_free_path(path);
		goto out;
	}
	status = btrfs_item_ptr(path->nodes[0], path->slots[0],
				struct btrfs_dedup_status_item);
	btrfs_set_dedup_status_blocksize(path->nodes[0], status, blocksize);
	btrfs_set_dedup_status_limit(path->nodes[0], status, limit);
	btrfs_set_dedup_status_hash_type(path->nodes[0], status, type);
	btrfs_set_dedup_status_backend(path->nodes[0], status, backend);
	btrfs_mark_buffer_dirty(path->nodes[0]);

	btrfs_free_path(path);
	ret = btrfs_commit_transaction(trans, fs_info->tree_root);

out:
	if (ret < 0) {
		kfree(dedup_info);
		fs_info->dedup_info = NULL;
	}
	return ret;
}
Beispiel #16
0
/*
 * Return 0 for not found
 * Return >0 for found and set bytenr_ret
 * Return <0 for error
 */
static int ondisk_search_hash(struct btrfs_dedup_info *dedup_info, u8 *hash,
			      u64 *bytenr_ret, u32 *num_bytes_ret)
{
	struct btrfs_path *path;
	struct btrfs_key key;
	struct btrfs_root *dedup_root = dedup_info->dedup_root;
	u8 *buf = NULL;
	u64 hash_key;
	int hash_len = btrfs_dedup_sizes[dedup_info->hash_type];
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	buf = kmalloc(hash_len, GFP_NOFS);
	if (!buf) {
		ret = -ENOMEM;
		goto out;
	}

	memcpy(&hash_key, hash + hash_len - 8, 8);
	key.objectid = hash_key;
	key.type = BTRFS_DEDUP_HASH_ITEM_KEY;
	key.offset = (u64)-1;

	ret = btrfs_search_slot(NULL, dedup_root, &key, path, 0, 0);
	if (ret < 0)
		goto out;
	WARN_ON(ret == 0);
	while (1) {
		struct extent_buffer *node;
		struct btrfs_dedup_hash_item *hash_item;
		int slot;

		ret = btrfs_previous_item(dedup_root, path, hash_key,
					  BTRFS_DEDUP_HASH_ITEM_KEY);
		if (ret < 0)
			goto out;
		if (ret > 0) {
			ret = 0;
			goto out;
		}

		node = path->nodes[0];
		slot = path->slots[0];
		btrfs_item_key_to_cpu(node, &key, slot);

		if (key.type != BTRFS_DEDUP_HASH_ITEM_KEY ||
		    memcmp(&key.objectid, hash + hash_len - 8, 8))
			break;
		hash_item = btrfs_item_ptr(node, slot,
				struct btrfs_dedup_hash_item);
		read_extent_buffer(node, buf, (unsigned long)(hash_item + 1),
				   hash_len);
		if (!memcmp(buf, hash, hash_len)) {
			ret = 1;
			*bytenr_ret = key.offset;
			*num_bytes_ret = btrfs_dedup_hash_len(node, hash_item);
			break;
		}
	}
out:
	kfree(buf);
	btrfs_free_path(path);
	return ret;
}
Beispiel #17
0
static int ondisk_add(struct btrfs_trans_handle *trans,
		      struct btrfs_dedup_info *dedup_info,
		      struct btrfs_dedup_hash *hash)
{
	struct btrfs_path *path;
	struct btrfs_root *dedup_root = dedup_info->dedup_root;
	struct btrfs_key key;
	struct btrfs_dedup_hash_item *hash_item;
	u64 bytenr;
	u32 num_bytes;
	int hash_len = btrfs_dedup_sizes[dedup_info->hash_type];
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	mutex_lock(&dedup_info->lock);

	ret = ondisk_search_bytenr(NULL, dedup_info, path, hash->bytenr, 0);
	if (ret < 0)
		goto out;
	if (ret > 0) {
		ret = 0;
		goto out;
	}
	btrfs_release_path(path);

	ret = ondisk_search_hash(dedup_info, hash->hash, &bytenr, &num_bytes);
	if (ret < 0)
		goto out;
	/* Same hash found, don't re-add to save dedup tree space */
	if (ret > 0) {
		ret = 0;
		goto out;
	}

	/* Insert hash->bytenr item */
	memcpy(&key.objectid, hash->hash + hash_len - 8, 8);
	key.type = BTRFS_DEDUP_HASH_ITEM_KEY;
	key.offset = hash->bytenr;

	ret = btrfs_insert_empty_item(trans, dedup_root, path, &key,
			sizeof(*hash_item) + hash_len);
	WARN_ON(ret == -EEXIST);
	if (ret < 0)
		goto out;
	hash_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
				   struct btrfs_dedup_hash_item);
	btrfs_set_dedup_hash_len(path->nodes[0], hash_item, hash->num_bytes);
	write_extent_buffer(path->nodes[0], hash->hash,
			    (unsigned long)(hash_item + 1), hash_len);
	btrfs_mark_buffer_dirty(path->nodes[0]);
	btrfs_release_path(path);

	/* Then bytenr->hash item */
	key.objectid = hash->bytenr;
	key.type = BTRFS_DEDUP_BYTENR_ITEM_KEY;
	memcpy(&key.offset, hash->hash + hash_len - 8, 8);

	ret = btrfs_insert_empty_item(trans, dedup_root, path, &key, hash_len);
	WARN_ON(ret == -EEXIST);
	if (ret < 0)
		goto out;
	write_extent_buffer(path->nodes[0], hash->hash,
			btrfs_item_ptr_offset(path->nodes[0], path->slots[0]),
			hash_len);
	btrfs_mark_buffer_dirty(path->nodes[0]);

out:
	mutex_unlock(&dedup_info->lock);
	btrfs_free_path(path);
	return ret;
}
Beispiel #18
0
/*
 * insert a directory item in the tree, doing all the magic for
 * both indexes. 'dir' indicates which objectid to insert it into,
 * 'location' is the key to stuff into the directory item, 'type' is the
 * type of the inode we're pointing to, and 'index' is the sequence number
 * to use for the second index (if one is created).
 * Will return 0 or -ENOMEM
 */
int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
			  *root, const char *name, int name_len,
			  struct inode *dir, struct btrfs_key *location,
			  u8 type, u64 index)
{
	int ret = 0;
	int ret2 = 0;
	struct btrfs_path *path;
	struct btrfs_dir_item *dir_item;
	struct extent_buffer *leaf;
	unsigned long name_ptr;
	struct btrfs_key key;
	struct btrfs_disk_key disk_key;
	u32 data_size;

	key.objectid = btrfs_ino(dir);
	btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY);
	key.offset = btrfs_name_hash(name, name_len);

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
	path->leave_spinning = 1;

	btrfs_cpu_key_to_disk(&disk_key, location);

	data_size = sizeof(*dir_item) + name_len;
	dir_item = insert_with_overflow(trans, root, path, &key, data_size,
					name, name_len);
	if (IS_ERR(dir_item)) {
		ret = PTR_ERR(dir_item);
		if (ret == -EEXIST)
			goto second_insert;
		goto out_free;
	}

	leaf = path->nodes[0];
	btrfs_set_dir_item_key(leaf, dir_item, &disk_key);
	btrfs_set_dir_type(leaf, dir_item, type);
	btrfs_set_dir_data_len(leaf, dir_item, 0);
	btrfs_set_dir_name_len(leaf, dir_item, name_len);
	btrfs_set_dir_transid(leaf, dir_item, trans->transid);
	name_ptr = (unsigned long)(dir_item + 1);

	write_extent_buffer(leaf, name, name_ptr, name_len);
	btrfs_mark_buffer_dirty(leaf);

second_insert:
	/* FIXME, use some real flag for selecting the extra index */
	if (root == root->fs_info->tree_root) {
		ret = 0;
		goto out_free;
	}
	btrfs_release_path(path);

	ret2 = btrfs_insert_delayed_dir_index(trans, root, name, name_len, dir,
					      &disk_key, type, index);
out_free:
	btrfs_free_path(path);
	if (ret)
		return ret;
	if (ret2)
		return ret2;
	return 0;
}
Beispiel #19
0
/*
 * walks the btree of allocated inodes and find a hole.
 */
int btrfs_find_free_objectid(struct btrfs_trans_handle *trans,
			     struct btrfs_root *root,
			     u64 dirid, u64 *objectid)
{
	struct btrfs_path *path;
	struct btrfs_key key;
	int ret;
	int slot = 0;
	u64 last_ino = 0;
	int start_found;
	struct extent_buffer *l;
	struct btrfs_key search_key;
	u64 search_start = dirid;

	path = btrfs_alloc_path();
	BUG_ON(!path);
	search_start = root->last_inode_alloc;
	search_start = max((unsigned long long)search_start,
				BTRFS_FIRST_FREE_OBJECTID);
	search_key.objectid = search_start;
	search_key.offset = 0;

	btrfs_init_path(path);
	start_found = 0;
	ret = btrfs_search_slot(trans, root, &search_key, path, 0, 0);
	if (ret < 0)
		goto error;

	if (path->slots[0] > 0)
		path->slots[0]--;

	while (1) {
		l = path->nodes[0];
		slot = path->slots[0];
		if (slot >= btrfs_header_nritems(l)) {
			ret = btrfs_next_leaf(root, path);
			if (ret == 0)
				continue;
			if (ret < 0)
				goto error;
			if (!start_found) {
				*objectid = search_start;
				start_found = 1;
				goto found;
			}
			*objectid = last_ino > search_start ?
				last_ino : search_start;
			goto found;
		}
		btrfs_item_key_to_cpu(l, &key, slot);
		if (key.objectid >= search_start) {
			if (start_found) {
				if (last_ino < search_start)
					last_ino = search_start;
				if (key.objectid > last_ino) {
					*objectid = last_ino;
					goto found;
				}
			}
		}
		start_found = 1;
		last_ino = key.objectid + 1;
		path->slots[0]++;
	}
	// FIXME -ENOSPC
found:
	root->last_inode_alloc = *objectid;
	btrfs_release_path(root, path);
	btrfs_free_path(path);
	BUG_ON(*objectid < search_start);
	return 0;
error:
	btrfs_release_path(root, path);
	btrfs_free_path(path);
	return ret;
}
Beispiel #20
0
static int do_setxattr(struct btrfs_trans_handle *trans,
		       struct inode *inode, const char *name,
		       const void *value, size_t size, int flags)
{
	struct btrfs_dir_item *di;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_path *path;
	size_t name_len = strlen(name);
	int ret = 0;

	if (name_len + size > BTRFS_MAX_XATTR_SIZE(root))
		return -ENOSPC;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	if (flags & XATTR_REPLACE) {
		di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name,
					name_len, -1);
		if (IS_ERR(di)) {
			ret = PTR_ERR(di);
			goto out;
		} else if (!di) {
			ret = -ENODATA;
			goto out;
		}
		ret = btrfs_delete_one_dir_name(trans, root, path, di);
		if (ret)
			goto out;
		btrfs_release_path(path);

		/*
		 * remove the attribute
		 */
		if (!value)
			goto out;
	}

again:
	ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode),
				      name, name_len, value, size);
	if (ret == -EEXIST) {
		if (flags & XATTR_CREATE)
			goto out;
		/*
		 * We can't use the path we already have since we won't have the
		 * proper locking for a delete, so release the path and
		 * re-lookup to delete the thing.
		 */
		btrfs_release_path(path);
		di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode),
					name, name_len, -1);
		if (IS_ERR(di)) {
			ret = PTR_ERR(di);
			goto out;
		} else if (!di) {
			/* Shouldn't happen but just in case... */
			btrfs_release_path(path);
			goto again;
		}

		ret = btrfs_delete_one_dir_name(trans, root, path, di);
		if (ret)
			goto out;

		/*
		 * We have a value to set, so go back and try to insert it now.
		 */
		if (value) {
			btrfs_release_path(path);
			goto again;
		}
	}
out:
	btrfs_free_path(path);
	return ret;
}
Beispiel #21
0
ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
{
	struct btrfs_key key, found_key;
	struct inode *inode = dentry->d_inode;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_dir_item *di;
	int ret = 0, slot;
	size_t total_size = 0, size_left = size;
	unsigned long name_ptr;
	size_t name_len;

	/*
	 * ok we want all objects associated with this id.
	 * NOTE: we set key.offset = 0; because we want to start with the
	 * first xattr that we find and walk forward
	 */
	key.objectid = btrfs_ino(inode);
	btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY);
	key.offset = 0;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
	path->reada = 2;

	/* search for our xattrs */
	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0)
		goto err;

	while (1) {
		leaf = path->nodes[0];
		slot = path->slots[0];

		/* this is where we start walking through the path */
		if (slot >= btrfs_header_nritems(leaf)) {
			/*
			 * if we've reached the last slot in this leaf we need
			 * to go to the next leaf and reset everything
			 */
			ret = btrfs_next_leaf(root, path);
			if (ret < 0)
				goto err;
			else if (ret > 0)
				break;
			continue;
		}

		btrfs_item_key_to_cpu(leaf, &found_key, slot);

		/* check to make sure this item is what we want */
		if (found_key.objectid != key.objectid)
			break;
		if (btrfs_key_type(&found_key) != BTRFS_XATTR_ITEM_KEY)
			break;

		di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
		if (verify_dir_item(root, leaf, di))
			continue;

		name_len = btrfs_dir_name_len(leaf, di);
		total_size += name_len + 1;

		/* we are just looking for how big our buffer needs to be */
		if (!size)
			goto next;

		if (!buffer || (name_len + 1) > size_left) {
			ret = -ERANGE;
			goto err;
		}

		name_ptr = (unsigned long)(di + 1);
		read_extent_buffer(leaf, buffer, name_ptr, name_len);
		buffer[name_len] = '\0';

		size_left -= name_len + 1;
		buffer += name_len + 1;
next:
		path->slots[0]++;
	}
	ret = total_size;

err:
	btrfs_free_path(path);

	return ret;
}
Beispiel #22
0
int btrfs_csum_file_block(struct btrfs_trans_handle *trans,
			  struct btrfs_root *root, u64 alloc_end,
			  u64 bytenr, char *data, size_t len)
{
	int ret;
	struct btrfs_key file_key;
	struct btrfs_key found_key;
	u64 next_offset = (u64)-1;
	int found_next = 0;
	struct btrfs_path *path;
	struct btrfs_csum_item *item;
	struct extent_buffer *leaf = NULL;
	u64 csum_offset;
	u32 csum_result = ~(u32)0;
	u32 nritems;
	u32 ins_size;
	u16 csum_size =
		btrfs_super_csum_size(&root->fs_info->super_copy);

	path = btrfs_alloc_path();
	BUG_ON(!path);

	file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
	file_key.offset = bytenr;
	file_key.type = BTRFS_EXTENT_CSUM_KEY;

	item = btrfs_lookup_csum(trans, root, path, bytenr, 1);
	if (!IS_ERR(item)) {
		leaf = path->nodes[0];
		goto found;
	}
	ret = PTR_ERR(item);
	if (ret == -EFBIG) {
		u32 item_size;
		/* we found one, but it isn't big enough yet */
		leaf = path->nodes[0];
		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
		if ((item_size / csum_size) >= MAX_CSUM_ITEMS(root, csum_size)) {
			/* already at max size, make a new one */
			goto insert;
		}
	} else {
		int slot = path->slots[0] + 1;
		/* we didn't find a csum item, insert one */
		nritems = btrfs_header_nritems(path->nodes[0]);
		if (path->slots[0] >= nritems - 1) {
			ret = btrfs_next_leaf(root, path);
			if (ret == 1)
				found_next = 1;
			if (ret != 0)
				goto insert;
			slot = 0;
		}
		btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
		if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
		    found_key.type != BTRFS_EXTENT_CSUM_KEY) {
			found_next = 1;
			goto insert;
		}
		next_offset = found_key.offset;
		found_next = 1;
		goto insert;
	}

	/*
	 * at this point, we know the tree has an item, but it isn't big
	 * enough yet to put our csum in.  Grow it
	 */
	btrfs_release_path(root, path);
	ret = btrfs_search_slot(trans, root, &file_key, path,
				csum_size, 1);
	if (ret < 0)
		goto fail;
	if (ret == 0) {
		BUG();
	}
	if (path->slots[0] == 0) {
		goto insert;
	}
	path->slots[0]--;
	leaf = path->nodes[0];
	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
	csum_offset = (file_key.offset - found_key.offset) / root->sectorsize;
	if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
	    found_key.type != BTRFS_EXTENT_CSUM_KEY ||
	    csum_offset >= MAX_CSUM_ITEMS(root, csum_size)) {
		goto insert;
	}
	if (csum_offset >= btrfs_item_size_nr(leaf, path->slots[0]) /
	    csum_size) {
		u32 diff = (csum_offset + 1) * csum_size;
		diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
		if (diff != csum_size)
			goto insert;
		ret = btrfs_extend_item(trans, root, path, diff);
		BUG_ON(ret);
		goto csum;
	}

insert:
	btrfs_release_path(root, path);
	csum_offset = 0;
	if (found_next) {
		u64 tmp = min(alloc_end, next_offset);
		tmp -= file_key.offset;
		tmp /= root->sectorsize;
		tmp = max((u64)1, tmp);
		tmp = min(tmp, (u64)MAX_CSUM_ITEMS(root, csum_size));
		ins_size = csum_size * tmp;
	} else {
		ins_size = csum_size;
	}
	ret = btrfs_insert_empty_item(trans, root, path, &file_key,
				      ins_size);
	if (ret < 0)
		goto fail;
	if (ret != 0) {
		WARN_ON(1);
		goto fail;
	}
csum:
	leaf = path->nodes[0];
	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
	ret = 0;
	item = (struct btrfs_csum_item *)((unsigned char *)item +
					  csum_offset * csum_size);
found:
	csum_result = btrfs_csum_data(root, data, csum_result, len);
	btrfs_csum_final(csum_result, (char *)&csum_result);
	if (csum_result == 0) {
		printk("csum result is 0 for block %llu\n",
		       (unsigned long long)bytenr);
	}

	write_extent_buffer(leaf, &csum_result, (unsigned long)item,
			    csum_size);
	btrfs_mark_buffer_dirty(path->nodes[0]);
fail:
	btrfs_release_path(root, path);
	btrfs_free_path(path);
	return ret;
}
Beispiel #23
0
/*
 * called from commit_transaction. Writes changed device replace state to
 * disk.
 */
int btrfs_run_dev_replace(struct btrfs_trans_handle *trans,
			  struct btrfs_fs_info *fs_info)
{
	int ret;
	struct btrfs_root *dev_root = fs_info->dev_root;
	struct btrfs_path *path;
	struct btrfs_key key;
	struct extent_buffer *eb;
	struct btrfs_dev_replace_item *ptr;
	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;

	btrfs_dev_replace_lock(dev_replace);
	if (!dev_replace->is_valid ||
	    !dev_replace->item_needs_writeback) {
		btrfs_dev_replace_unlock(dev_replace);
		return 0;
	}
	btrfs_dev_replace_unlock(dev_replace);

	key.objectid = 0;
	key.type = BTRFS_DEV_REPLACE_KEY;
	key.offset = 0;

	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}
	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
	if (ret < 0) {
		pr_warn("btrfs: error %d while searching for dev_replace item!\n",
			ret);
		goto out;
	}

	if (ret == 0 &&
	    btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
		/*
		 * need to delete old one and insert a new one.
		 * Since no attempt is made to recover any old state, if the
		 * dev_replace state is 'running', the data on the target
		 * drive is lost.
		 * It would be possible to recover the state: just make sure
		 * that the beginning of the item is never changed and always
		 * contains all the essential information. Then read this
		 * minimal set of information and use it as a base for the
		 * new state.
		 */
		ret = btrfs_del_item(trans, dev_root, path);
		if (ret != 0) {
			pr_warn("btrfs: delete too small dev_replace item failed %d!\n",
				ret);
			goto out;
		}
		ret = 1;
	}

	if (ret == 1) {
		/* need to insert a new item */
		btrfs_release_path(path);
		ret = btrfs_insert_empty_item(trans, dev_root, path,
					      &key, sizeof(*ptr));
		if (ret < 0) {
			pr_warn("btrfs: insert dev_replace item failed %d!\n",
				ret);
			goto out;
		}
	}

	eb = path->nodes[0];
	ptr = btrfs_item_ptr(eb, path->slots[0],
			     struct btrfs_dev_replace_item);

	btrfs_dev_replace_lock(dev_replace);
	if (dev_replace->srcdev)
		btrfs_set_dev_replace_src_devid(eb, ptr,
			dev_replace->srcdev->devid);
	else
		btrfs_set_dev_replace_src_devid(eb, ptr, (u64)-1);
	btrfs_set_dev_replace_cont_reading_from_srcdev_mode(eb, ptr,
		dev_replace->cont_reading_from_srcdev_mode);
	btrfs_set_dev_replace_replace_state(eb, ptr,
		dev_replace->replace_state);
	btrfs_set_dev_replace_time_started(eb, ptr, dev_replace->time_started);
	btrfs_set_dev_replace_time_stopped(eb, ptr, dev_replace->time_stopped);
	btrfs_set_dev_replace_num_write_errors(eb, ptr,
		atomic64_read(&dev_replace->num_write_errors));
	btrfs_set_dev_replace_num_uncorrectable_read_errors(eb, ptr,
		atomic64_read(&dev_replace->num_uncorrectable_read_errors));
	dev_replace->cursor_left_last_write_of_item =
		dev_replace->cursor_left;
	btrfs_set_dev_replace_cursor_left(eb, ptr,
		dev_replace->cursor_left_last_write_of_item);
	btrfs_set_dev_replace_cursor_right(eb, ptr,
		dev_replace->cursor_right);
	dev_replace->item_needs_writeback = 0;
	btrfs_dev_replace_unlock(dev_replace);

	btrfs_mark_buffer_dirty(eb);

out:
	btrfs_free_path(path);

	return ret;
}
/*
 * walks the btree of allocated inodes and find a hole.
 */
int btrfs_find_free_objectid(struct btrfs_trans_handle *trans,
			     struct btrfs_root *root,
			     u64 dirid, u64 *objectid)
{
	struct btrfs_path *path;
	struct btrfs_key key;
	int ret;
	int slot = 0;
	u64 last_ino = 0;
	int start_found;
	struct extent_buffer *l;
	struct btrfs_key search_key;
	u64 search_start = dirid;

	mutex_lock(&root->objectid_mutex);
	if (root->last_inode_alloc >= BTRFS_FIRST_FREE_OBJECTID &&
	    root->last_inode_alloc < BTRFS_LAST_FREE_OBJECTID) {
		*objectid = ++root->last_inode_alloc;
		mutex_unlock(&root->objectid_mutex);
		return 0;
	}
	path = btrfs_alloc_path();
	BUG_ON(!path);
	search_start = max(search_start, BTRFS_FIRST_FREE_OBJECTID);
	search_key.objectid = search_start;
	search_key.type = 0;
	search_key.offset = 0;

	start_found = 0;
	ret = btrfs_search_slot(trans, root, &search_key, path, 0, 0);
	if (ret < 0)
		goto error;

	while (1) {
		l = path->nodes[0];
		slot = path->slots[0];
		if (slot >= btrfs_header_nritems(l)) {
			ret = btrfs_next_leaf(root, path);
			if (ret == 0)
				continue;
			if (ret < 0)
				goto error;
			if (!start_found) {
				*objectid = search_start;
				start_found = 1;
				goto found;
			}
			*objectid = last_ino > search_start ?
				last_ino : search_start;
			goto found;
		}
		btrfs_item_key_to_cpu(l, &key, slot);
		if (key.objectid >= search_start) {
			if (start_found) {
				if (last_ino < search_start)
					last_ino = search_start;
				if (key.objectid > last_ino) {
					*objectid = last_ino;
					goto found;
				}
			} else if (key.objectid > search_start) {
				*objectid = search_start;
				goto found;
			}
		}
		if (key.objectid >= BTRFS_LAST_FREE_OBJECTID)
			break;

		start_found = 1;
		last_ino = key.objectid + 1;
		path->slots[0]++;
	}
	BUG_ON(1);
found:
	btrfs_release_path(root, path);
	btrfs_free_path(path);
	BUG_ON(*objectid < search_start);
	mutex_unlock(&root->objectid_mutex);
	return 0;
error:
	btrfs_release_path(root, path);
	btrfs_free_path(path);
	mutex_unlock(&root->objectid_mutex);
	return ret;
}
Beispiel #25
0
static int set_file_xattrs(struct btrfs_root *root, u64 inode,
			   int fd, const char *file_name)
{
	struct btrfs_key key;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_dir_item *di;
	u32 name_len = 0;
	u32 data_len = 0;
	u32 len = 0;
	u32 cur, total_len;
	char *name = NULL;
	char *data = NULL;
	int ret = 0;

	key.objectid = inode;
	key.type = BTRFS_XATTR_ITEM_KEY;
	key.offset = 0;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0)
		goto out;

	leaf = path->nodes[0];
	while (1) {
		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
			do {
				ret = next_leaf(root, path);
				if (ret < 0) {
					fprintf(stderr,
						"Error searching for extended attributes: %d\n",
						ret);
					goto out;
				} else if (ret) {
					/* No more leaves to search */
					ret = 0;
					goto out;
				}
				leaf = path->nodes[0];
			} while (!leaf);
			continue;
		}

		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
		if (key.type != BTRFS_XATTR_ITEM_KEY || key.objectid != inode)
			break;
		cur = 0;
		total_len = btrfs_item_size_nr(leaf, path->slots[0]);
		di = btrfs_item_ptr(leaf, path->slots[0],
				    struct btrfs_dir_item);

		while (cur < total_len) {
			len = btrfs_dir_name_len(leaf, di);
			if (len > name_len) {
				free(name);
				name = (char *) malloc(len + 1);
				if (!name) {
					ret = -ENOMEM;
					goto out;
				}
			}
			read_extent_buffer(leaf, name,
					   (unsigned long)(di + 1), len);
			name[len] = '\0';
			name_len = len;

			len = btrfs_dir_data_len(leaf, di);
			if (len > data_len) {
				free(data);
				data = (char *) malloc(len);
				if (!data) {
					ret = -ENOMEM;
					goto out;
				}
			}
			read_extent_buffer(leaf, data,
					   (unsigned long)(di + 1) + name_len,
					   len);
			data_len = len;

			if (fsetxattr(fd, name, data, data_len, 0)) {
				int err = errno;

				fprintf(stderr,
					"Error setting extended attribute %s on file %s: %s\n",
					name, file_name, strerror(err));
			}

			len = sizeof(*di) + name_len + data_len;
			cur += len;
			di = (struct btrfs_dir_item *)((char *)di + len);
		}
		path->slots[0]++;
	}
	ret = 0;
out:
	btrfs_free_path(path);
	free(name);
	free(data);

	return ret;
}
Beispiel #26
0
int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
			u64 subid)
{
	struct btrfs_fs_info *fs_info = trans->fs_info;
	struct btrfs_root *uuid_root = fs_info->uuid_root;
	int ret;
	struct btrfs_path *path = NULL;
	struct btrfs_key key;
	struct extent_buffer *eb;
	int slot;
	unsigned long offset;
	u32 item_size;
	unsigned long move_dst;
	unsigned long move_src;
	unsigned long move_len;

	if (WARN_ON_ONCE(!uuid_root)) {
		ret = -EINVAL;
		goto out;
	}

	btrfs_uuid_to_key(uuid, type, &key);

	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}

	ret = btrfs_search_slot(trans, uuid_root, &key, path, -1, 1);
	if (ret < 0) {
		btrfs_warn(fs_info, "error %d while searching for uuid item!",
			   ret);
		goto out;
	}
	if (ret > 0) {
		ret = -ENOENT;
		goto out;
	}

	eb = path->nodes[0];
	slot = path->slots[0];
	offset = btrfs_item_ptr_offset(eb, slot);
	item_size = btrfs_item_size_nr(eb, slot);
	if (!IS_ALIGNED(item_size, sizeof(u64))) {
		btrfs_warn(fs_info, "uuid item with illegal size %lu!",
			   (unsigned long)item_size);
		ret = -ENOENT;
		goto out;
	}
	while (item_size) {
		__le64 read_subid;

		read_extent_buffer(eb, &read_subid, offset, sizeof(read_subid));
		if (le64_to_cpu(read_subid) == subid)
			break;
		offset += sizeof(read_subid);
		item_size -= sizeof(read_subid);
	}

	if (!item_size) {
		ret = -ENOENT;
		goto out;
	}

	item_size = btrfs_item_size_nr(eb, slot);
	if (item_size == sizeof(subid)) {
		ret = btrfs_del_item(trans, uuid_root, path);
		goto out;
	}

	move_dst = offset;
	move_src = offset + sizeof(subid);
	move_len = item_size - (move_src - btrfs_item_ptr_offset(eb, slot));
	memmove_extent_buffer(eb, move_dst, move_src, move_len);
	btrfs_truncate_item(path, item_size - sizeof(subid), 1);

out:
	btrfs_free_path(path);
	return ret;
}
Beispiel #27
0
int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info)
{
	struct btrfs_key key;
	struct btrfs_root *dev_root = fs_info->dev_root;
	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
	struct extent_buffer *eb;
	int slot;
	int ret = 0;
	struct btrfs_path *path = NULL;
	int item_size;
	struct btrfs_dev_replace_item *ptr;
	u64 src_devid;

	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}

	key.objectid = 0;
	key.type = BTRFS_DEV_REPLACE_KEY;
	key.offset = 0;
	ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
	if (ret) {
no_valid_dev_replace_entry_found:
		ret = 0;
		dev_replace->replace_state =
			BTRFS_DEV_REPLACE_ITEM_STATE_NEVER_STARTED;
		dev_replace->cont_reading_from_srcdev_mode =
		    BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_ALWAYS;
		dev_replace->replace_state = 0;
		dev_replace->time_started = 0;
		dev_replace->time_stopped = 0;
		atomic64_set(&dev_replace->num_write_errors, 0);
		atomic64_set(&dev_replace->num_uncorrectable_read_errors, 0);
		dev_replace->cursor_left = 0;
		dev_replace->committed_cursor_left = 0;
		dev_replace->cursor_left_last_write_of_item = 0;
		dev_replace->cursor_right = 0;
		dev_replace->srcdev = NULL;
		dev_replace->tgtdev = NULL;
		dev_replace->is_valid = 0;
		dev_replace->item_needs_writeback = 0;
		goto out;
	}
	slot = path->slots[0];
	eb = path->nodes[0];
	item_size = btrfs_item_size_nr(eb, slot);
	ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_replace_item);

	if (item_size != sizeof(struct btrfs_dev_replace_item)) {
		pr_warn("btrfs: dev_replace entry found has unexpected size, ignore entry\n");
		goto no_valid_dev_replace_entry_found;
	}

	src_devid = btrfs_dev_replace_src_devid(eb, ptr);
	dev_replace->cont_reading_from_srcdev_mode =
		btrfs_dev_replace_cont_reading_from_srcdev_mode(eb, ptr);
	dev_replace->replace_state = btrfs_dev_replace_replace_state(eb, ptr);
	dev_replace->time_started = btrfs_dev_replace_time_started(eb, ptr);
	dev_replace->time_stopped =
		btrfs_dev_replace_time_stopped(eb, ptr);
	atomic64_set(&dev_replace->num_write_errors,
		     btrfs_dev_replace_num_write_errors(eb, ptr));
	atomic64_set(&dev_replace->num_uncorrectable_read_errors,
		     btrfs_dev_replace_num_uncorrectable_read_errors(eb, ptr));
	dev_replace->cursor_left = btrfs_dev_replace_cursor_left(eb, ptr);
	dev_replace->committed_cursor_left = dev_replace->cursor_left;
	dev_replace->cursor_left_last_write_of_item = dev_replace->cursor_left;
	dev_replace->cursor_right = btrfs_dev_replace_cursor_right(eb, ptr);
	dev_replace->is_valid = 1;

	dev_replace->item_needs_writeback = 0;
	switch (dev_replace->replace_state) {
	case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
	case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
	case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
		dev_replace->srcdev = NULL;
		dev_replace->tgtdev = NULL;
		break;
	case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
	case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
		dev_replace->srcdev = btrfs_find_device(fs_info, src_devid,
							NULL, NULL);
		dev_replace->tgtdev = btrfs_find_device(fs_info,
							BTRFS_DEV_REPLACE_DEVID,
							NULL, NULL);
		/*
		 * allow 'btrfs dev replace_cancel' if src/tgt device is
		 * missing
		 */
		if (!dev_replace->srcdev &&
		    !btrfs_test_opt(dev_root, DEGRADED)) {
			ret = -EIO;
			pr_warn("btrfs: cannot mount because device replace operation is ongoing and\n" "srcdev (devid %llu) is missing, need to run 'btrfs dev scan'?\n",
				(unsigned long long)src_devid);
		}
		if (!dev_replace->tgtdev &&
		    !btrfs_test_opt(dev_root, DEGRADED)) {
			ret = -EIO;
			pr_warn("btrfs: cannot mount because device replace operation is ongoing and\n" "tgtdev (devid %llu) is missing, need to run btrfs dev scan?\n",
				(unsigned long long)BTRFS_DEV_REPLACE_DEVID);
		}
		if (dev_replace->tgtdev) {
			if (dev_replace->srcdev) {
				dev_replace->tgtdev->total_bytes =
					dev_replace->srcdev->total_bytes;
				dev_replace->tgtdev->disk_total_bytes =
					dev_replace->srcdev->disk_total_bytes;
				dev_replace->tgtdev->bytes_used =
					dev_replace->srcdev->bytes_used;
			}
			dev_replace->tgtdev->is_tgtdev_for_dev_replace = 1;
			btrfs_init_dev_replace_tgtdev_for_resume(fs_info,
				dev_replace->tgtdev);
		}
		break;
	}

out:
	if (path)
		btrfs_free_path(path);
	return ret;
}
Beispiel #28
0
int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info,
			    int (*check_func)(struct btrfs_fs_info *, u8 *, u8,
					      u64))
{
	struct btrfs_root *root = fs_info->uuid_root;
	struct btrfs_key key;
	struct btrfs_path *path;
	int ret = 0;
	struct extent_buffer *leaf;
	int slot;
	u32 item_size;
	unsigned long offset;

	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}

	key.objectid = 0;
	key.type = 0;
	key.offset = 0;

again_search_slot:
	ret = btrfs_search_forward(root, &key, path, BTRFS_OLDEST_GENERATION);
	if (ret) {
		if (ret > 0)
			ret = 0;
		goto out;
	}

	while (1) {
		cond_resched();
		leaf = path->nodes[0];
		slot = path->slots[0];
		btrfs_item_key_to_cpu(leaf, &key, slot);

		if (key.type != BTRFS_UUID_KEY_SUBVOL &&
		    key.type != BTRFS_UUID_KEY_RECEIVED_SUBVOL)
			goto skip;

		offset = btrfs_item_ptr_offset(leaf, slot);
		item_size = btrfs_item_size_nr(leaf, slot);
		if (!IS_ALIGNED(item_size, sizeof(u64))) {
			btrfs_warn(fs_info,
				   "uuid item with illegal size %lu!",
				   (unsigned long)item_size);
			goto skip;
		}
		while (item_size) {
			u8 uuid[BTRFS_UUID_SIZE];
			__le64 subid_le;
			u64 subid_cpu;

			put_unaligned_le64(key.objectid, uuid);
			put_unaligned_le64(key.offset, uuid + sizeof(u64));
			read_extent_buffer(leaf, &subid_le, offset,
					   sizeof(subid_le));
			subid_cpu = le64_to_cpu(subid_le);
			ret = check_func(fs_info, uuid, key.type, subid_cpu);
			if (ret < 0)
				goto out;
			if (ret > 0) {
				btrfs_release_path(path);
				ret = btrfs_uuid_iter_rem(root, uuid, key.type,
							  subid_cpu);
				if (ret == 0) {
					/*
					 * this might look inefficient, but the
					 * justification is that it is an
					 * exception that check_func returns 1,
					 * and that in the regular case only one
					 * entry per UUID exists.
					 */
					goto again_search_slot;
				}
				if (ret < 0 && ret != -ENOENT)
					goto out;
			}
			item_size -= sizeof(subid_le);
			offset += sizeof(subid_le);
		}

skip:
		ret = btrfs_next_item(root, path);
		if (ret == 0)
			continue;
		else if (ret > 0)
			ret = 0;
		break;
	}

out:
	btrfs_free_path(path);
	return ret;
}
Beispiel #29
0
/*
 * deletes the csum items from the csum tree for a given
 * range of bytes.
 */
int btrfs_del_csums(struct btrfs_trans_handle *trans,
		    struct btrfs_root *root, u64 bytenr, u64 len)
{
	struct btrfs_path *path;
	struct btrfs_key key;
	u64 end_byte = bytenr + len;
	u64 csum_end;
	struct extent_buffer *leaf;
	int ret;
	u16 csum_size =
		btrfs_super_csum_size(&root->fs_info->super_copy);
	int blocksize = root->sectorsize;

	root = root->fs_info->csum_root;

	path = btrfs_alloc_path();

	while (1) {
		key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
		key.offset = end_byte - 1;
		key.type = BTRFS_EXTENT_CSUM_KEY;

		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
		if (ret > 0) {
			if (path->slots[0] == 0)
				goto out;
			path->slots[0]--;
		}
		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);

		if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
		    key.type != BTRFS_EXTENT_CSUM_KEY) {
			break;
		}

		if (key.offset >= end_byte)
			break;

		csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
		csum_end *= blocksize;
		csum_end += key.offset;

		/* this csum ends before we start, we're done */
		if (csum_end <= bytenr)
			break;

		/* delete the entire item, it is inside our range */
		if (key.offset >= bytenr && csum_end <= end_byte) {
			ret = btrfs_del_item(trans, root, path);
			BUG_ON(ret);
		} else if (key.offset < bytenr && csum_end > end_byte) {
			unsigned long offset;
			unsigned long shift_len;
			unsigned long item_offset;
			/*
			 *        [ bytenr - len ]
			 *     [csum                ]
			 *
			 * Our bytes are in the middle of the csum,
			 * we need to split this item and insert a new one.
			 *
			 * But we can't drop the path because the
			 * csum could change, get removed, extended etc.
			 *
			 * The trick here is the max size of a csum item leaves
			 * enough room in the tree block for a single
			 * item header.  So, we split the item in place,
			 * adding a new header pointing to the existing
			 * bytes.  Then we loop around again and we have
			 * a nicely formed csum item that we can neatly
			 * truncate.
			 */
			offset = (bytenr - key.offset) / blocksize;
			offset *= csum_size;

			shift_len = (len / blocksize) * csum_size;

			item_offset = btrfs_item_ptr_offset(leaf,
							    path->slots[0]);

			memset_extent_buffer(leaf, 0, item_offset + offset,
					     shift_len);
			key.offset = bytenr;

			/*
			 * btrfs_split_item returns -EAGAIN when the
			 * item changed size or key
			 */
			ret = btrfs_split_item(trans, root, path, &key, offset);
			BUG_ON(ret && ret != -EAGAIN);

			key.offset = end_byte - 1;
		} else {
			ret = truncate_one_csum(trans, root, path,
						&key, bytenr, len);
			BUG_ON(ret);
		}
		btrfs_release_path(root, path);
	}
out:
	btrfs_free_path(path);
	return 0;
}
Beispiel #30
0
int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
			u64 subid_cpu)
{
	struct btrfs_fs_info *fs_info = trans->fs_info;
	struct btrfs_root *uuid_root = fs_info->uuid_root;
	int ret;
	struct btrfs_path *path = NULL;
	struct btrfs_key key;
	struct extent_buffer *eb;
	int slot;
	unsigned long offset;
	__le64 subid_le;

	ret = btrfs_uuid_tree_lookup(uuid_root, uuid, type, subid_cpu);
	if (ret != -ENOENT)
		return ret;

	if (WARN_ON_ONCE(!uuid_root)) {
		ret = -EINVAL;
		goto out;
	}

	btrfs_uuid_to_key(uuid, type, &key);

	path = btrfs_alloc_path();
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}

	ret = btrfs_insert_empty_item(trans, uuid_root, path, &key,
				      sizeof(subid_le));
	if (ret >= 0) {
		/* Add an item for the type for the first time */
		eb = path->nodes[0];
		slot = path->slots[0];
		offset = btrfs_item_ptr_offset(eb, slot);
	} else if (ret == -EEXIST) {
		/*
		 * An item with that type already exists.
		 * Extend the item and store the new subid at the end.
		 */
		btrfs_extend_item(path, sizeof(subid_le));
		eb = path->nodes[0];
		slot = path->slots[0];
		offset = btrfs_item_ptr_offset(eb, slot);
		offset += btrfs_item_size_nr(eb, slot) - sizeof(subid_le);
	} else {
		btrfs_warn(fs_info,
			   "insert uuid item failed %d (0x%016llx, 0x%016llx) type %u!",
			   ret, (unsigned long long)key.objectid,
			   (unsigned long long)key.offset, type);
		goto out;
	}

	ret = 0;
	subid_le = cpu_to_le64(subid_cpu);
	write_extent_buffer(eb, &subid_le, offset, sizeof(subid_le));
	btrfs_mark_buffer_dirty(eb);

out:
	btrfs_free_path(path);
	return ret;
}