/* * insert a name into a directory, doing overflow properly if there is a hash * collision. data_size indicates how big the item inserted should be. On * success a struct btrfs_dir_item pointer is returned, otherwise it is * an ERR_PTR. * * The name is not copied into the dir item, you have to do that yourself. */ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *cpu_key, u32 data_size, const char *name, int name_len) { int ret; char *ptr; struct btrfs_item *item; struct extent_buffer *leaf; ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size); if (ret == -EEXIST) { struct btrfs_dir_item *di; di = btrfs_match_dir_item_name(root, path, name, name_len); if (di) return ERR_PTR(-EEXIST); btrfs_extend_item(trans, root, path, data_size); } else if (ret < 0) return ERR_PTR(ret); WARN_ON(ret > 0); leaf = path->nodes[0]; item = btrfs_item_nr(leaf, path->slots[0]); ptr = btrfs_item_ptr(leaf, path->slots[0], char); BUG_ON(data_size > btrfs_item_size(leaf, item)); ptr += btrfs_item_size(leaf, item) - data_size; return (struct btrfs_dir_item *)ptr; }
static int add_new_free_space_info(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, struct btrfs_block_group_cache *block_group, struct btrfs_path *path) { struct btrfs_root *root = fs_info->free_space_root; struct btrfs_free_space_info *info; struct btrfs_key key; struct extent_buffer *leaf; int ret; key.objectid = block_group->key.objectid; key.type = BTRFS_FREE_SPACE_INFO_KEY; key.offset = block_group->key.offset; ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*info)); if (ret) goto out; leaf = path->nodes[0]; info = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_free_space_info); btrfs_set_free_space_extent_count(leaf, info, 0); btrfs_set_free_space_flags(leaf, info, 0); btrfs_mark_buffer_dirty(leaf); ret = 0; out: btrfs_release_path(path); return ret; }
static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *cpu_key, u32 data_size) { int ret; char *ptr; struct btrfs_item *item; struct btrfs_leaf *leaf; ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size); if (ret == -EEXIST) { ret = btrfs_extend_item(trans, root, path, data_size); BUG_ON(ret > 0); if (ret) return NULL; } BUG_ON(ret > 0); leaf = &path->nodes[0]->leaf; item = leaf->items + path->slots[0]; ptr = btrfs_item_ptr(leaf, path->slots[0], char); BUG_ON(data_size > btrfs_item_size(item)); ptr += btrfs_item_size(item) - data_size; return (struct btrfs_dir_item *)ptr; }
int btrfs_add_orphan_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 ino) { struct btrfs_key key; key.objectid = BTRFS_ORPHAN_OBJECTID; key.type = BTRFS_ORPHAN_ITEM_KEY; key.offset = ino; return btrfs_insert_empty_item(trans, root, path, &key, 0); }
static int insert_normal_tree_ref(struct btrfs_root *root, u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid) { struct btrfs_trans_handle trans; struct btrfs_extent_item *item; struct btrfs_extent_inline_ref *iref; struct btrfs_tree_block_info *block_info; struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_key ins; u32 size = sizeof(*item) + sizeof(*iref) + sizeof(*block_info); int ret; btrfs_init_dummy_trans(&trans); ins.objectid = bytenr; ins.type = BTRFS_EXTENT_ITEM_KEY; ins.offset = num_bytes; path = btrfs_alloc_path(); if (!path) { test_msg("Couldn't allocate path\n"); return -ENOMEM; } path->leave_spinning = 1; ret = btrfs_insert_empty_item(&trans, root, path, &ins, size); if (ret) { test_msg("Couldn't insert ref %d\n", ret); btrfs_free_path(path); return ret; } leaf = path->nodes[0]; item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); btrfs_set_extent_refs(leaf, item, 1); btrfs_set_extent_generation(leaf, item, 1); btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_TREE_BLOCK); block_info = (struct btrfs_tree_block_info *)(item + 1); btrfs_set_tree_block_level(leaf, block_info, 1); iref = (struct btrfs_extent_inline_ref *)(block_info + 1); if (parent > 0) { btrfs_set_extent_inline_ref_type(leaf, iref, BTRFS_SHARED_BLOCK_REF_KEY); btrfs_set_extent_inline_ref_offset(leaf, iref, parent); } else { btrfs_set_extent_inline_ref_type(leaf, iref, BTRFS_TREE_BLOCK_REF_KEY); btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid); } btrfs_free_path(path); return 0; }
static int add_tree_ref(struct btrfs_root *root, u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid) { struct btrfs_trans_handle trans; struct btrfs_extent_item *item; struct btrfs_path *path; struct btrfs_key key; u64 refs; int ret; btrfs_init_dummy_trans(&trans); key.objectid = bytenr; key.type = BTRFS_EXTENT_ITEM_KEY; key.offset = num_bytes; path = btrfs_alloc_path(); if (!path) { test_msg("Couldn't allocate path\n"); return -ENOMEM; } path->leave_spinning = 1; ret = btrfs_search_slot(&trans, root, &key, path, 0, 1); if (ret) { test_msg("Couldn't find extent ref\n"); btrfs_free_path(path); return ret; } item = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_extent_item); refs = btrfs_extent_refs(path->nodes[0], item); btrfs_set_extent_refs(path->nodes[0], item, refs + 1); btrfs_release_path(path); key.objectid = bytenr; if (parent) { key.type = BTRFS_SHARED_BLOCK_REF_KEY; key.offset = parent; } else { key.type = BTRFS_TREE_BLOCK_REF_KEY; key.offset = root_objectid; } ret = btrfs_insert_empty_item(&trans, root, path, &key, 0); if (ret) test_msg("Failed to insert backref\n"); btrfs_free_path(path); return ret; }
int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 objectid, u64 pos, u64 disk_offset, u64 disk_num_bytes, u64 num_bytes, u64 offset, u64 ram_bytes, u8 compression, u8 encryption, u16 other_encoding) { int ret = 0; struct btrfs_file_extent_item *item; struct btrfs_key file_key; struct btrfs_path *path; struct extent_buffer *leaf; path = btrfs_alloc_path(); if (!path) return -ENOMEM; file_key.objectid = objectid; file_key.offset = pos; btrfs_set_key_type(&file_key, BTRFS_EXTENT_DATA_KEY); path->leave_spinning = 1; ret = btrfs_insert_empty_item(trans, root, path, &file_key, sizeof(*item)); if (ret < 0) goto out; BUG_ON(ret); /* Can't happen */ leaf = path->nodes[0]; item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); btrfs_set_file_extent_disk_bytenr(leaf, item, disk_offset); btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes); btrfs_set_file_extent_offset(leaf, item, offset); btrfs_set_file_extent_num_bytes(leaf, item, num_bytes); btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes); btrfs_set_file_extent_generation(leaf, item, trans->transid); btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG); btrfs_set_file_extent_compression(leaf, item, compression); btrfs_set_file_extent_encryption(leaf, item, encryption); btrfs_set_file_extent_other_encoding(leaf, item, other_encoding); btrfs_mark_buffer_dirty(leaf); out: btrfs_free_path(path); return ret; }
int btrfs_insert_inline_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 objectid, u64 offset, char *buffer, size_t size) { struct btrfs_key key; struct btrfs_path *path; struct extent_buffer *leaf; unsigned long ptr; struct btrfs_file_extent_item *ei; u32 datasize; int err = 0; int ret; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = objectid; key.offset = offset; btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); datasize = btrfs_file_extent_calc_inline_size(size); ret = btrfs_insert_empty_item(trans, root, path, &key, datasize); if (ret) { err = ret; goto fail; } leaf = path->nodes[0]; ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); btrfs_set_file_extent_generation(leaf, ei, trans->transid); btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE); btrfs_set_file_extent_ram_bytes(leaf, ei, size); btrfs_set_file_extent_compression(leaf, ei, 0); btrfs_set_file_extent_encryption(leaf, ei, 0); btrfs_set_file_extent_other_encoding(leaf, ei, 0); ptr = btrfs_file_extent_inline_start(ei) + offset - key.offset; write_extent_buffer(leaf, buffer, ptr, size); btrfs_mark_buffer_dirty(leaf); fail: btrfs_free_path(path); return err; }
int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 offset) { struct btrfs_path *path; struct btrfs_key key; int ret = 0; key.objectid = BTRFS_ORPHAN_OBJECTID; btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY); key.offset = offset; path = btrfs_alloc_path(); if (!path) return -ENOMEM; ret = btrfs_insert_empty_item(trans, root, path, &key, 0); btrfs_free_path(path); return ret; }
int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, const char *name, int name_len, u64 inode_objectid, u64 ref_objectid, u64 index) { struct btrfs_path *path; struct btrfs_key key; struct btrfs_inode_ref *ref; unsigned long ptr; int ret; int ins_len = name_len + sizeof(*ref); key.objectid = inode_objectid; key.offset = ref_objectid; btrfs_set_key_type(&key, BTRFS_INODE_REF_KEY); path = btrfs_alloc_path(); if (!path) return -ENOMEM; ret = btrfs_insert_empty_item(trans, root, path, &key, ins_len); if (ret == -EEXIST) { u32 old_size; if (find_name_in_backref(path, name, name_len, &ref)) goto out; old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); ret = btrfs_extend_item(trans, root, path, ins_len); BUG_ON(ret); ref = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_inode_ref); ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size); btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len); btrfs_set_inode_ref_index(path->nodes[0], ref, index); ptr = (unsigned long)(ref + 1); ret = 0; } else if (ret < 0) {
/* * insert a name into a directory, doing overflow properly if there is a hash * collision. data_size indicates how big the item inserted should be. On * success a struct btrfs_dir_item pointer is returned, otherwise it is * an ERR_PTR. * * The name is not copied into the dir item, you have to do that yourself. */ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *cpu_key, u32 data_size, const char *name, int name_len) { int ret; char *ptr; struct btrfs_item *item; struct extent_buffer *leaf; ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size); if (ret == -EEXIST) { struct btrfs_dir_item *di; di = btrfs_match_dir_item_name(root, path, name, name_len); if (di) return ERR_PTR(-EEXIST); <<<<<<< HEAD btrfs_extend_item(trans, root, path, data_size); } else if (ret < 0)
/* * add a btrfs_root_ref item. type is either BTRFS_ROOT_REF_KEY * or BTRFS_ROOT_BACKREF_KEY. * * The dirid, sequence, name and name_len refer to the directory entry * that is referencing the root. * * For a forward ref, the root_id is the id of the tree referencing * the root and ref_id is the id of the subvol or snapshot. * * For a back ref the root_id is the id of the subvol or snapshot and * ref_id is the id of the tree referencing it. */ int btrfs_add_root_ref(struct btrfs_trans_handle *trans, struct btrfs_root *tree_root, u64 root_id, u8 type, u64 ref_id, u64 dirid, u64 sequence, const char *name, int name_len) { struct btrfs_key key; int ret; struct btrfs_path *path; struct btrfs_root_ref *ref; struct extent_buffer *leaf; unsigned long ptr; path = btrfs_alloc_path(); key.objectid = root_id; key.type = type; key.offset = ref_id; ret = btrfs_insert_empty_item(trans, tree_root, path, &key, sizeof(*ref) + name_len); BUG_ON(ret); leaf = path->nodes[0]; ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); btrfs_set_root_ref_dirid(leaf, ref, dirid); btrfs_set_root_ref_sequence(leaf, ref, sequence); btrfs_set_root_ref_name_len(leaf, ref, name_len); ptr = (unsigned long)(ref + 1); write_extent_buffer(leaf, name, ptr, name_len); btrfs_mark_buffer_dirty(leaf); btrfs_free_path(path); return ret; }
int btrfs_csum_file_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 alloc_end, u64 bytenr, char *data, size_t len) { int ret; struct btrfs_key file_key; struct btrfs_key found_key; u64 next_offset = (u64)-1; int found_next = 0; struct btrfs_path *path; struct btrfs_csum_item *item; struct extent_buffer *leaf = NULL; u64 csum_offset; u32 csum_result = ~(u32)0; u32 nritems; u32 ins_size; u16 csum_size = btrfs_super_csum_size(&root->fs_info->super_copy); path = btrfs_alloc_path(); BUG_ON(!path); file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; file_key.offset = bytenr; file_key.type = BTRFS_EXTENT_CSUM_KEY; item = btrfs_lookup_csum(trans, root, path, bytenr, 1); if (!IS_ERR(item)) { leaf = path->nodes[0]; goto found; } ret = PTR_ERR(item); if (ret == -EFBIG) { u32 item_size; /* we found one, but it isn't big enough yet */ leaf = path->nodes[0]; item_size = btrfs_item_size_nr(leaf, path->slots[0]); if ((item_size / csum_size) >= MAX_CSUM_ITEMS(root, csum_size)) { /* already at max size, make a new one */ goto insert; } } else { int slot = path->slots[0] + 1; /* we didn't find a csum item, insert one */ nritems = btrfs_header_nritems(path->nodes[0]); if (path->slots[0] >= nritems - 1) { ret = btrfs_next_leaf(root, path); if (ret == 1) found_next = 1; if (ret != 0) goto insert; slot = 0; } btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot); if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || found_key.type != BTRFS_EXTENT_CSUM_KEY) { found_next = 1; goto insert; } next_offset = found_key.offset; found_next = 1; goto insert; } /* * at this point, we know the tree has an item, but it isn't big * enough yet to put our csum in. Grow it */ btrfs_release_path(root, path); ret = btrfs_search_slot(trans, root, &file_key, path, csum_size, 1); if (ret < 0) goto fail; if (ret == 0) { BUG(); } if (path->slots[0] == 0) { goto insert; } path->slots[0]--; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); csum_offset = (file_key.offset - found_key.offset) / root->sectorsize; if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || found_key.type != BTRFS_EXTENT_CSUM_KEY || csum_offset >= MAX_CSUM_ITEMS(root, csum_size)) { goto insert; } if (csum_offset >= btrfs_item_size_nr(leaf, path->slots[0]) / csum_size) { u32 diff = (csum_offset + 1) * csum_size; diff = diff - btrfs_item_size_nr(leaf, path->slots[0]); if (diff != csum_size) goto insert; ret = btrfs_extend_item(trans, root, path, diff); BUG_ON(ret); goto csum; } insert: btrfs_release_path(root, path); csum_offset = 0; if (found_next) { u64 tmp = min(alloc_end, next_offset); tmp -= file_key.offset; tmp /= root->sectorsize; tmp = max((u64)1, tmp); tmp = min(tmp, (u64)MAX_CSUM_ITEMS(root, csum_size)); ins_size = csum_size * tmp; } else { ins_size = csum_size; } ret = btrfs_insert_empty_item(trans, root, path, &file_key, ins_size); if (ret < 0) goto fail; if (ret != 0) { WARN_ON(1); goto fail; } csum: leaf = path->nodes[0]; item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); ret = 0; item = (struct btrfs_csum_item *)((unsigned char *)item + csum_offset * csum_size); found: csum_result = btrfs_csum_data(root, data, csum_result, len); btrfs_csum_final(csum_result, (char *)&csum_result); if (csum_result == 0) { printk("csum result is 0 for block %llu\n", (unsigned long long)bytenr); } write_extent_buffer(leaf, &csum_result, (unsigned long)item, csum_size); btrfs_mark_buffer_dirty(path->nodes[0]); fail: btrfs_release_path(root, path); btrfs_free_path(path); return ret; }
static int record_file_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 objectid, struct btrfs_inode_item *inode, u64 file_pos, u64 disk_bytenr, u64 num_bytes) { int ret; struct btrfs_fs_info *info = root->fs_info; struct btrfs_root *extent_root = info->extent_root; struct extent_buffer *leaf; struct btrfs_file_extent_item *fi; struct btrfs_key ins_key; struct btrfs_path path; struct btrfs_extent_item *ei; btrfs_init_path(&path); ins_key.objectid = objectid; ins_key.offset = 0; btrfs_set_key_type(&ins_key, BTRFS_EXTENT_DATA_KEY); ret = btrfs_insert_empty_item(trans, root, &path, &ins_key, sizeof(*fi)); if (ret) goto fail; leaf = path.nodes[0]; fi = btrfs_item_ptr(leaf, path.slots[0], struct btrfs_file_extent_item); btrfs_set_file_extent_generation(leaf, fi, trans->transid); btrfs_set_file_extent_type(leaf, fi, BTRFS_FILE_EXTENT_REG); btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr); btrfs_set_file_extent_disk_num_bytes(leaf, fi, num_bytes); btrfs_set_file_extent_offset(leaf, fi, 0); btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes); btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes); btrfs_set_file_extent_compression(leaf, fi, 0); btrfs_set_file_extent_encryption(leaf, fi, 0); btrfs_set_file_extent_other_encoding(leaf, fi, 0); btrfs_mark_buffer_dirty(leaf); btrfs_release_path(root, &path); ins_key.objectid = disk_bytenr; ins_key.offset = num_bytes; ins_key.type = BTRFS_EXTENT_ITEM_KEY; ret = btrfs_insert_empty_item(trans, extent_root, &path, &ins_key, sizeof(*ei)); if (ret == 0) { leaf = path.nodes[0]; ei = btrfs_item_ptr(leaf, path.slots[0], struct btrfs_extent_item); btrfs_set_extent_refs(leaf, ei, 0); btrfs_set_extent_generation(leaf, ei, trans->transid); btrfs_set_extent_flags(leaf, ei, BTRFS_EXTENT_FLAG_DATA); btrfs_mark_buffer_dirty(leaf); ret = btrfs_update_block_group(trans, root, disk_bytenr, num_bytes, 1, 0); if (ret) goto fail; } else if (ret != -EEXIST) {
int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, struct btrfs_block_group_cache *block_group, struct btrfs_path *path) { struct btrfs_root *root = fs_info->free_space_root; struct btrfs_free_space_info *info; struct btrfs_key key, found_key; struct extent_buffer *leaf; unsigned long *bitmap; char *bitmap_cursor; u64 start, end; u64 bitmap_range, i; u32 bitmap_size, flags, expected_extent_count; u32 extent_count = 0; int done = 0, nr; int ret; bitmap_size = free_space_bitmap_size(block_group->key.offset, block_group->sectorsize); bitmap = alloc_bitmap(bitmap_size); if (!bitmap) { ret = -ENOMEM; goto out; } start = block_group->key.objectid; end = block_group->key.objectid + block_group->key.offset; key.objectid = end - 1; key.type = (u8)-1; key.offset = (u64)-1; while (!done) { ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); if (ret) goto out; leaf = path->nodes[0]; nr = 0; path->slots[0]++; while (path->slots[0] > 0) { btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1); if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) { ASSERT(found_key.objectid == block_group->key.objectid); ASSERT(found_key.offset == block_group->key.offset); done = 1; break; } else if (found_key.type == BTRFS_FREE_SPACE_EXTENT_KEY) { u64 first, last; ASSERT(found_key.objectid >= start); ASSERT(found_key.objectid < end); ASSERT(found_key.objectid + found_key.offset <= end); first = div_u64(found_key.objectid - start, block_group->sectorsize); last = div_u64(found_key.objectid + found_key.offset - start, block_group->sectorsize); bitmap_set(bitmap, first, last - first); extent_count++; nr++; path->slots[0]--; } else { ASSERT(0); } } ret = btrfs_del_items(trans, root, path, path->slots[0], nr); if (ret) goto out; btrfs_release_path(path); } info = search_free_space_info(trans, fs_info, block_group, path, 1); if (IS_ERR(info)) { ret = PTR_ERR(info); goto out; } leaf = path->nodes[0]; flags = btrfs_free_space_flags(leaf, info); flags |= BTRFS_FREE_SPACE_USING_BITMAPS; btrfs_set_free_space_flags(leaf, info, flags); expected_extent_count = btrfs_free_space_extent_count(leaf, info); btrfs_mark_buffer_dirty(leaf); btrfs_release_path(path); if (extent_count != expected_extent_count) { btrfs_err(fs_info, "incorrect extent count for %llu; counted %u, expected %u", block_group->key.objectid, extent_count, expected_extent_count); ASSERT(0); ret = -EIO; goto out; } bitmap_cursor = (char *)bitmap; bitmap_range = block_group->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS; i = start; while (i < end) { unsigned long ptr; u64 extent_size; u32 data_size; extent_size = min(end - i, bitmap_range); data_size = free_space_bitmap_size(extent_size, block_group->sectorsize); key.objectid = i; key.type = BTRFS_FREE_SPACE_BITMAP_KEY; key.offset = extent_size; ret = btrfs_insert_empty_item(trans, root, path, &key, data_size); if (ret) goto out; leaf = path->nodes[0]; ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); write_extent_buffer(leaf, bitmap_cursor, ptr, data_size); btrfs_mark_buffer_dirty(leaf); btrfs_release_path(path); i += extent_size; bitmap_cursor += data_size; } ret = 0; out: kvfree(bitmap); if (ret) btrfs_abort_transaction(trans, ret); return ret; }
int btrfs_dedup_enable(struct btrfs_fs_info *fs_info, u16 type, u16 backend, u64 blocksize, u64 limit) { struct btrfs_dedup_info *dedup_info; struct btrfs_root *dedup_root; struct btrfs_key key; struct btrfs_trans_handle *trans; struct btrfs_path *path; struct btrfs_dedup_status_item *status; int create_tree; u64 compat_ro_flag = btrfs_super_compat_ro_flags(fs_info->super_copy); int ret = 0; /* Sanity check */ if (blocksize > BTRFS_DEDUP_BLOCKSIZE_MAX || blocksize < BTRFS_DEDUP_BLOCKSIZE_MIN || blocksize < fs_info->tree_root->sectorsize || !is_power_of_2(blocksize)) return -EINVAL; if (type > ARRAY_SIZE(btrfs_dedup_sizes)) return -EINVAL; if (backend >= BTRFS_DEDUP_BACKEND_LAST) return -EINVAL; if (backend == BTRFS_DEDUP_BACKEND_INMEMORY && limit == 0) limit = 4096; /* default value */ if (backend == BTRFS_DEDUP_BACKEND_ONDISK && limit != 0) limit = 0; /* * If current fs doesn't support DEDUP feature, don't enable * on-disk dedup. */ if (!(compat_ro_flag & BTRFS_FEATURE_COMPAT_RO_DEDUP) && backend == BTRFS_DEDUP_BACKEND_ONDISK) return -EINVAL; /* Meaningless and unable to enable dedup for RO fs */ if (fs_info->sb->s_flags & MS_RDONLY) return -EINVAL; if (fs_info->dedup_info) { dedup_info = fs_info->dedup_info; /* Check if we are re-enable for different dedup config */ if (dedup_info->blocksize != blocksize || dedup_info->hash_type != type || dedup_info->backend != backend) { btrfs_dedup_disable(fs_info); goto enable; } /* On-fly limit change is OK */ mutex_lock(&dedup_info->lock); fs_info->dedup_info->limit_nr = limit; mutex_unlock(&dedup_info->lock); return 0; } enable: create_tree = compat_ro_flag & BTRFS_FEATURE_COMPAT_RO_DEDUP; ret = init_dedup_info(fs_info, type, backend, blocksize, limit); dedup_info = fs_info->dedup_info; if (ret < 0) goto out; if (!create_tree) goto out; /* Create dedup tree for status at least */ path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; goto out; } trans = btrfs_start_transaction(fs_info->tree_root, 2); if (IS_ERR(trans)) { ret = PTR_ERR(trans); btrfs_free_path(path); goto out; } dedup_root = btrfs_create_tree(trans, fs_info, BTRFS_DEDUP_TREE_OBJECTID); if (IS_ERR(dedup_root)) { ret = PTR_ERR(dedup_root); btrfs_abort_transaction(trans, fs_info->tree_root, ret); btrfs_free_path(path); goto out; } dedup_info->dedup_root = dedup_root; key.objectid = 0; key.type = BTRFS_DEDUP_STATUS_ITEM_KEY; key.offset = 0; ret = btrfs_insert_empty_item(trans, dedup_root, path, &key, sizeof(*status)); if (ret < 0) { btrfs_abort_transaction(trans, fs_info->tree_root, ret); btrfs_free_path(path); goto out; } status = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_dedup_status_item); btrfs_set_dedup_status_blocksize(path->nodes[0], status, blocksize); btrfs_set_dedup_status_limit(path->nodes[0], status, limit); btrfs_set_dedup_status_hash_type(path->nodes[0], status, type); btrfs_set_dedup_status_backend(path->nodes[0], status, backend); btrfs_mark_buffer_dirty(path->nodes[0]); btrfs_free_path(path); ret = btrfs_commit_transaction(trans, fs_info->tree_root); out: if (ret < 0) { kfree(dedup_info); fs_info->dedup_info = NULL; } return ret; }
/* * called from commit_transaction. Writes changed device replace state to * disk. */ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info) { int ret; struct btrfs_root *dev_root = fs_info->dev_root; struct btrfs_path *path; struct btrfs_key key; struct extent_buffer *eb; struct btrfs_dev_replace_item *ptr; struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; btrfs_dev_replace_lock(dev_replace); if (!dev_replace->is_valid || !dev_replace->item_needs_writeback) { btrfs_dev_replace_unlock(dev_replace); return 0; } btrfs_dev_replace_unlock(dev_replace); key.objectid = 0; key.type = BTRFS_DEV_REPLACE_KEY; key.offset = 0; path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; goto out; } ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); if (ret < 0) { pr_warn("btrfs: error %d while searching for dev_replace item!\n", ret); goto out; } if (ret == 0 && btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) { /* * need to delete old one and insert a new one. * Since no attempt is made to recover any old state, if the * dev_replace state is 'running', the data on the target * drive is lost. * It would be possible to recover the state: just make sure * that the beginning of the item is never changed and always * contains all the essential information. Then read this * minimal set of information and use it as a base for the * new state. */ ret = btrfs_del_item(trans, dev_root, path); if (ret != 0) { pr_warn("btrfs: delete too small dev_replace item failed %d!\n", ret); goto out; } ret = 1; } if (ret == 1) { /* need to insert a new item */ btrfs_release_path(path); ret = btrfs_insert_empty_item(trans, dev_root, path, &key, sizeof(*ptr)); if (ret < 0) { pr_warn("btrfs: insert dev_replace item failed %d!\n", ret); goto out; } } eb = path->nodes[0]; ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_replace_item); btrfs_dev_replace_lock(dev_replace); if (dev_replace->srcdev) btrfs_set_dev_replace_src_devid(eb, ptr, dev_replace->srcdev->devid); else btrfs_set_dev_replace_src_devid(eb, ptr, (u64)-1); btrfs_set_dev_replace_cont_reading_from_srcdev_mode(eb, ptr, dev_replace->cont_reading_from_srcdev_mode); btrfs_set_dev_replace_replace_state(eb, ptr, dev_replace->replace_state); btrfs_set_dev_replace_time_started(eb, ptr, dev_replace->time_started); btrfs_set_dev_replace_time_stopped(eb, ptr, dev_replace->time_stopped); btrfs_set_dev_replace_num_write_errors(eb, ptr, atomic64_read(&dev_replace->num_write_errors)); btrfs_set_dev_replace_num_uncorrectable_read_errors(eb, ptr, atomic64_read(&dev_replace->num_uncorrectable_read_errors)); dev_replace->cursor_left_last_write_of_item = dev_replace->cursor_left; btrfs_set_dev_replace_cursor_left(eb, ptr, dev_replace->cursor_left_last_write_of_item); btrfs_set_dev_replace_cursor_right(eb, ptr, dev_replace->cursor_right); dev_replace->item_needs_writeback = 0; btrfs_dev_replace_unlock(dev_replace); btrfs_mark_buffer_dirty(eb); out: btrfs_free_path(path); return ret; }
static int ondisk_add(struct btrfs_trans_handle *trans, struct btrfs_dedup_info *dedup_info, struct btrfs_dedup_hash *hash) { struct btrfs_path *path; struct btrfs_root *dedup_root = dedup_info->dedup_root; struct btrfs_key key; struct btrfs_dedup_hash_item *hash_item; u64 bytenr; u32 num_bytes; int hash_len = btrfs_dedup_sizes[dedup_info->hash_type]; int ret; path = btrfs_alloc_path(); if (!path) return -ENOMEM; mutex_lock(&dedup_info->lock); ret = ondisk_search_bytenr(NULL, dedup_info, path, hash->bytenr, 0); if (ret < 0) goto out; if (ret > 0) { ret = 0; goto out; } btrfs_release_path(path); ret = ondisk_search_hash(dedup_info, hash->hash, &bytenr, &num_bytes); if (ret < 0) goto out; /* Same hash found, don't re-add to save dedup tree space */ if (ret > 0) { ret = 0; goto out; } /* Insert hash->bytenr item */ memcpy(&key.objectid, hash->hash + hash_len - 8, 8); key.type = BTRFS_DEDUP_HASH_ITEM_KEY; key.offset = hash->bytenr; ret = btrfs_insert_empty_item(trans, dedup_root, path, &key, sizeof(*hash_item) + hash_len); WARN_ON(ret == -EEXIST); if (ret < 0) goto out; hash_item = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_dedup_hash_item); btrfs_set_dedup_hash_len(path->nodes[0], hash_item, hash->num_bytes); write_extent_buffer(path->nodes[0], hash->hash, (unsigned long)(hash_item + 1), hash_len); btrfs_mark_buffer_dirty(path->nodes[0]); btrfs_release_path(path); /* Then bytenr->hash item */ key.objectid = hash->bytenr; key.type = BTRFS_DEDUP_BYTENR_ITEM_KEY; memcpy(&key.offset, hash->hash + hash_len - 8, 8); ret = btrfs_insert_empty_item(trans, dedup_root, path, &key, hash_len); WARN_ON(ret == -EEXIST); if (ret < 0) goto out; write_extent_buffer(path->nodes[0], hash->hash, btrfs_item_ptr_offset(path->nodes[0], path->slots[0]), hash_len); btrfs_mark_buffer_dirty(path->nodes[0]); out: mutex_unlock(&dedup_info->lock); btrfs_free_path(path); return ret; }
int convert_free_space_to_extents(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, struct btrfs_block_group_cache *block_group, struct btrfs_path *path) { struct btrfs_root *root = fs_info->free_space_root; struct btrfs_free_space_info *info; struct btrfs_key key, found_key; struct extent_buffer *leaf; unsigned long *bitmap; u64 start, end; /* Initialize to silence GCC. */ u64 extent_start = 0; u64 offset; u32 bitmap_size, flags, expected_extent_count; int prev_bit = 0, bit, bitnr; u32 extent_count = 0; int done = 0, nr; int ret; bitmap_size = free_space_bitmap_size(block_group->key.offset, block_group->sectorsize); bitmap = alloc_bitmap(bitmap_size); if (!bitmap) { ret = -ENOMEM; goto out; } start = block_group->key.objectid; end = block_group->key.objectid + block_group->key.offset; key.objectid = end - 1; key.type = (u8)-1; key.offset = (u64)-1; while (!done) { ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); if (ret) goto out; leaf = path->nodes[0]; nr = 0; path->slots[0]++; while (path->slots[0] > 0) { btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0] - 1); if (found_key.type == BTRFS_FREE_SPACE_INFO_KEY) { ASSERT(found_key.objectid == block_group->key.objectid); ASSERT(found_key.offset == block_group->key.offset); done = 1; break; } else if (found_key.type == BTRFS_FREE_SPACE_BITMAP_KEY) { unsigned long ptr; char *bitmap_cursor; u32 bitmap_pos, data_size; ASSERT(found_key.objectid >= start); ASSERT(found_key.objectid < end); ASSERT(found_key.objectid + found_key.offset <= end); bitmap_pos = div_u64(found_key.objectid - start, block_group->sectorsize * BITS_PER_BYTE); bitmap_cursor = ((char *)bitmap) + bitmap_pos; data_size = free_space_bitmap_size(found_key.offset, block_group->sectorsize); ptr = btrfs_item_ptr_offset(leaf, path->slots[0] - 1); read_extent_buffer(leaf, bitmap_cursor, ptr, data_size); nr++; path->slots[0]--; } else { ASSERT(0); } } ret = btrfs_del_items(trans, root, path, path->slots[0], nr); if (ret) goto out; btrfs_release_path(path); } info = search_free_space_info(trans, fs_info, block_group, path, 1); if (IS_ERR(info)) { ret = PTR_ERR(info); goto out; } leaf = path->nodes[0]; flags = btrfs_free_space_flags(leaf, info); flags &= ~BTRFS_FREE_SPACE_USING_BITMAPS; btrfs_set_free_space_flags(leaf, info, flags); expected_extent_count = btrfs_free_space_extent_count(leaf, info); btrfs_mark_buffer_dirty(leaf); btrfs_release_path(path); offset = start; bitnr = 0; while (offset < end) { bit = !!test_bit(bitnr, bitmap); if (prev_bit == 0 && bit == 1) { extent_start = offset; } else if (prev_bit == 1 && bit == 0) { key.objectid = extent_start; key.type = BTRFS_FREE_SPACE_EXTENT_KEY; key.offset = offset - extent_start; ret = btrfs_insert_empty_item(trans, root, path, &key, 0); if (ret) goto out; btrfs_release_path(path); extent_count++; } prev_bit = bit; offset += block_group->sectorsize; bitnr++; } if (prev_bit == 1) { key.objectid = extent_start; key.type = BTRFS_FREE_SPACE_EXTENT_KEY; key.offset = end - extent_start; ret = btrfs_insert_empty_item(trans, root, path, &key, 0); if (ret) goto out; btrfs_release_path(path); extent_count++; } if (extent_count != expected_extent_count) { btrfs_err(fs_info, "incorrect extent count for %llu; counted %u, expected %u", block_group->key.objectid, extent_count, expected_extent_count); ASSERT(0); ret = -EIO; goto out; } ret = 0; out: kvfree(bitmap); if (ret) btrfs_abort_transaction(trans, ret); return ret; }
int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, u8 *uuid, u8 type, u64 subid_cpu) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_root *uuid_root = fs_info->uuid_root; int ret; struct btrfs_path *path = NULL; struct btrfs_key key; struct extent_buffer *eb; int slot; unsigned long offset; __le64 subid_le; ret = btrfs_uuid_tree_lookup(uuid_root, uuid, type, subid_cpu); if (ret != -ENOENT) return ret; if (WARN_ON_ONCE(!uuid_root)) { ret = -EINVAL; goto out; } btrfs_uuid_to_key(uuid, type, &key); path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; goto out; } ret = btrfs_insert_empty_item(trans, uuid_root, path, &key, sizeof(subid_le)); if (ret >= 0) { /* Add an item for the type for the first time */ eb = path->nodes[0]; slot = path->slots[0]; offset = btrfs_item_ptr_offset(eb, slot); } else if (ret == -EEXIST) { /* * An item with that type already exists. * Extend the item and store the new subid at the end. */ btrfs_extend_item(path, sizeof(subid_le)); eb = path->nodes[0]; slot = path->slots[0]; offset = btrfs_item_ptr_offset(eb, slot); offset += btrfs_item_size_nr(eb, slot) - sizeof(subid_le); } else { btrfs_warn(fs_info, "insert uuid item failed %d (0x%016llx, 0x%016llx) type %u!", ret, (unsigned long long)key.objectid, (unsigned long long)key.offset, type); goto out; } ret = 0; subid_le = cpu_to_le64(subid_cpu); write_extent_buffer(eb, &subid_le, offset, sizeof(subid_le)); btrfs_mark_buffer_dirty(eb); out: btrfs_free_path(path); return ret; }
static int remove_free_space_extent(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, struct btrfs_block_group_cache *block_group, struct btrfs_path *path, u64 start, u64 size) { struct btrfs_root *root = fs_info->free_space_root; struct btrfs_key key; u64 found_start, found_end; u64 end = start + size; int new_extents = -1; int ret; key.objectid = start; key.type = (u8)-1; key.offset = (u64)-1; ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); if (ret) goto out; btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); ASSERT(key.type == BTRFS_FREE_SPACE_EXTENT_KEY); found_start = key.objectid; found_end = key.objectid + key.offset; ASSERT(start >= found_start && end <= found_end); /* * Okay, now that we've found the free space extent which contains the * free space that we are removing, there are four cases: * * 1. We're using the whole extent: delete the key we found and * decrement the free space extent count. * 2. We are using part of the extent starting at the beginning: delete * the key we found and insert a new key representing the leftover at * the end. There is no net change in the number of extents. * 3. We are using part of the extent ending at the end: delete the key * we found and insert a new key representing the leftover at the * beginning. There is no net change in the number of extents. * 4. We are using part of the extent in the middle: delete the key we * found and insert two new keys representing the leftovers on each * side. Where we used to have one extent, we now have two, so increment * the extent count. We may need to convert the block group to bitmaps * as a result. */ /* Delete the existing key (cases 1-4). */ ret = btrfs_del_item(trans, root, path); if (ret) goto out; /* Add a key for leftovers at the beginning (cases 3 and 4). */ if (start > found_start) { key.objectid = found_start; key.type = BTRFS_FREE_SPACE_EXTENT_KEY; key.offset = start - found_start; btrfs_release_path(path); ret = btrfs_insert_empty_item(trans, root, path, &key, 0); if (ret) goto out; new_extents++; } /* Add a key for leftovers at the end (cases 2 and 4). */ if (end < found_end) { key.objectid = end; key.type = BTRFS_FREE_SPACE_EXTENT_KEY; key.offset = found_end - end; btrfs_release_path(path); ret = btrfs_insert_empty_item(trans, root, path, &key, 0); if (ret) goto out; new_extents++; } btrfs_release_path(path); ret = update_free_space_extent_count(trans, fs_info, block_group, path, new_extents); out: return ret; }
static int add_free_space_extent(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, struct btrfs_block_group_cache *block_group, struct btrfs_path *path, u64 start, u64 size) { struct btrfs_root *root = fs_info->free_space_root; struct btrfs_key key, new_key; u64 found_start, found_end; u64 end = start + size; int new_extents = 1; int ret; /* * We are adding a new extent of free space, but we need to merge * extents. There are four cases here: * * 1. The new extent does not have any immediate neighbors to merge * with: add the new key and increment the free space extent count. We * may need to convert the block group to bitmaps as a result. * 2. The new extent has an immediate neighbor before it: remove the * previous key and insert a new key combining both of them. There is no * net change in the number of extents. * 3. The new extent has an immediate neighbor after it: remove the next * key and insert a new key combining both of them. There is no net * change in the number of extents. * 4. The new extent has immediate neighbors on both sides: remove both * of the keys and insert a new key combining all of them. Where we used * to have two extents, we now have one, so decrement the extent count. */ new_key.objectid = start; new_key.type = BTRFS_FREE_SPACE_EXTENT_KEY; new_key.offset = size; /* Search for a neighbor on the left. */ if (start == block_group->key.objectid) goto right; key.objectid = start - 1; key.type = (u8)-1; key.offset = (u64)-1; ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); if (ret) goto out; btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (key.type != BTRFS_FREE_SPACE_EXTENT_KEY) { ASSERT(key.type == BTRFS_FREE_SPACE_INFO_KEY); btrfs_release_path(path); goto right; } found_start = key.objectid; found_end = key.objectid + key.offset; ASSERT(found_start >= block_group->key.objectid && found_end > block_group->key.objectid); ASSERT(found_start < start && found_end <= start); /* * Delete the neighbor on the left and absorb it into the new key (cases * 2 and 4). */ if (found_end == start) { ret = btrfs_del_item(trans, root, path); if (ret) goto out; new_key.objectid = found_start; new_key.offset += key.offset; new_extents--; } btrfs_release_path(path); right: /* Search for a neighbor on the right. */ if (end == block_group->key.objectid + block_group->key.offset) goto insert; key.objectid = end; key.type = (u8)-1; key.offset = (u64)-1; ret = btrfs_search_prev_slot(trans, root, &key, path, -1, 1); if (ret) goto out; btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (key.type != BTRFS_FREE_SPACE_EXTENT_KEY) { ASSERT(key.type == BTRFS_FREE_SPACE_INFO_KEY); btrfs_release_path(path); goto insert; } found_start = key.objectid; found_end = key.objectid + key.offset; ASSERT(found_start >= block_group->key.objectid && found_end > block_group->key.objectid); ASSERT((found_start < start && found_end <= start) || (found_start >= end && found_end > end)); /* * Delete the neighbor on the right and absorb it into the new key * (cases 3 and 4). */ if (found_start == end) { ret = btrfs_del_item(trans, root, path); if (ret) goto out; new_key.offset += key.offset; new_extents--; } btrfs_release_path(path); insert: /* Insert the new key (cases 1-4). */ ret = btrfs_insert_empty_item(trans, root, path, &new_key, 0); if (ret) goto out; btrfs_release_path(path); ret = update_free_space_extent_count(trans, fs_info, block_group, path, new_extents); out: return ret; }