/* * this adds all existing backrefs (inline backrefs, backrefs and delayed * refs) for the given bytenr to the refs list, merges duplicates and resolves * indirect refs to their parent bytenr. * When roots are found, they're added to the roots list * * FIXME some caching might speed things up */ static int find_parent_nodes(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, u64 bytenr, u64 time_seq, struct ulist *refs, struct ulist *roots, const u64 *extent_item_pos) { struct btrfs_key key; struct btrfs_path *path; int info_level = 0; int ret; struct list_head prefs; struct __prelim_ref *ref; struct extent_inode_elem *eie = NULL; u64 total_refs = 0; INIT_LIST_HEAD(&prefs); key.objectid = bytenr; key.offset = (u64)-1; if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) key.type = BTRFS_METADATA_ITEM_KEY; else key.type = BTRFS_EXTENT_ITEM_KEY; path = btrfs_alloc_path(); if (!path) return -ENOMEM; ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0); if (ret < 0) goto out; BUG_ON(ret == 0); if (path->slots[0]) { struct extent_buffer *leaf; int slot; path->slots[0]--; leaf = path->nodes[0]; slot = path->slots[0]; btrfs_item_key_to_cpu(leaf, &key, slot); if (key.objectid == bytenr && (key.type == BTRFS_EXTENT_ITEM_KEY || key.type == BTRFS_METADATA_ITEM_KEY)) { ret = __add_inline_refs(fs_info, path, bytenr, &info_level, &prefs, &total_refs); if (ret) goto out; ret = __add_keyed_refs(fs_info, path, bytenr, info_level, &prefs); if (ret) goto out; } } btrfs_release_path(path); ret = __add_missing_keys(fs_info, &prefs); if (ret) goto out; __merge_refs(&prefs, 1); ret = __resolve_indirect_refs(fs_info, path, time_seq, &prefs, extent_item_pos, total_refs); if (ret) goto out; __merge_refs(&prefs, 2); while (!list_empty(&prefs)) { ref = list_first_entry(&prefs, struct __prelim_ref, list); WARN_ON(ref->count < 0); if (roots && ref->count && ref->root_id && ref->parent == 0) { /* no parent == root of tree */ ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS); if (ret < 0) goto out; } if (ref->count && ref->parent) { if (extent_item_pos && !ref->inode_list && ref->level == 0) { u32 bsz; struct extent_buffer *eb; bsz = fs_info->extent_root->nodesize; eb = read_tree_block(fs_info->extent_root, ref->parent, bsz, 0); if (!extent_buffer_uptodate(eb)) { free_extent_buffer(eb); ret = -EIO; goto out; } ret = find_extent_in_eb(eb, bytenr, *extent_item_pos, &eie); free_extent_buffer(eb); if (ret < 0) goto out; ref->inode_list = eie; } ret = ulist_add_merge_ptr(refs, ref->parent, ref->inode_list, (void **)&eie, GFP_NOFS); if (ret < 0) goto out; if (!ret && extent_item_pos) { /* * we've recorded that parent, so we must extend * its inode list here */ BUG_ON(!eie); while (eie->next) eie = eie->next; eie->next = ref->inode_list; } eie = NULL; } list_del(&ref->list); kfree(ref); } out: btrfs_free_path(path); while (!list_empty(&prefs)) { ref = list_first_entry(&prefs, struct __prelim_ref, list); list_del(&ref->list); kfree(ref); } if (ret < 0) free_inode_elem_list(eie); return ret; }
/* * this adds all existing backrefs (inline backrefs, backrefs and delayed * refs) for the given bytenr to the refs list, merges duplicates and resolves * indirect refs to their parent bytenr. * When roots are found, they're added to the roots list * * FIXME some caching might speed things up */ static int find_parent_nodes(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, u64 bytenr, u64 time_seq, struct ulist *refs, struct ulist *roots, const u64 *extent_item_pos) { struct btrfs_key key; struct btrfs_path *path; struct btrfs_delayed_ref_root *delayed_refs = NULL; struct btrfs_delayed_ref_head *head; int info_level = 0; int ret; struct list_head prefs_delayed; struct list_head prefs; struct __prelim_ref *ref; INIT_LIST_HEAD(&prefs); INIT_LIST_HEAD(&prefs_delayed); key.objectid = bytenr; key.offset = (u64)-1; if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) key.type = BTRFS_METADATA_ITEM_KEY; else key.type = BTRFS_EXTENT_ITEM_KEY; path = btrfs_alloc_path(); if (!path) return -ENOMEM; if (!trans) path->search_commit_root = 1; /* * grab both a lock on the path and a lock on the delayed ref head. * We need both to get a consistent picture of how the refs look * at a specified point in time */ again: head = NULL; ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0); if (ret < 0) goto out; BUG_ON(ret == 0); if (trans) { /* * look if there are updates for this ref queued and lock the * head */ delayed_refs = &trans->transaction->delayed_refs; spin_lock(&delayed_refs->lock); head = btrfs_find_delayed_ref_head(trans, bytenr); if (head) { if (!mutex_trylock(&head->mutex)) { atomic_inc(&head->node.refs); spin_unlock(&delayed_refs->lock); btrfs_release_path(path); /* * Mutex was contended, block until it's * released and try again */ mutex_lock(&head->mutex); mutex_unlock(&head->mutex); btrfs_put_delayed_ref(&head->node); goto again; } ret = __add_delayed_refs(head, time_seq, &prefs_delayed); mutex_unlock(&head->mutex); if (ret) { spin_unlock(&delayed_refs->lock); goto out; } } spin_unlock(&delayed_refs->lock); } if (path->slots[0]) { struct extent_buffer *leaf; int slot; path->slots[0]--; leaf = path->nodes[0]; slot = path->slots[0]; btrfs_item_key_to_cpu(leaf, &key, slot); if (key.objectid == bytenr && (key.type == BTRFS_EXTENT_ITEM_KEY || key.type == BTRFS_METADATA_ITEM_KEY)) { ret = __add_inline_refs(fs_info, path, bytenr, &info_level, &prefs); if (ret) goto out; ret = __add_keyed_refs(fs_info, path, bytenr, info_level, &prefs); if (ret) goto out; } } btrfs_release_path(path); list_splice_init(&prefs_delayed, &prefs); ret = __add_missing_keys(fs_info, &prefs); if (ret) goto out; __merge_refs(&prefs, 1); ret = __resolve_indirect_refs(fs_info, path, time_seq, &prefs, extent_item_pos); if (ret) goto out; __merge_refs(&prefs, 2); while (!list_empty(&prefs)) { ref = list_first_entry(&prefs, struct __prelim_ref, list); list_del(&ref->list); WARN_ON(ref->count < 0); if (ref->count && ref->root_id && ref->parent == 0) { /* no parent == root of tree */ ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS); if (ret < 0) goto out; } if (ref->count && ref->parent) { struct extent_inode_elem *eie = NULL; if (extent_item_pos && !ref->inode_list) { u32 bsz; struct extent_buffer *eb; bsz = btrfs_level_size(fs_info->extent_root, info_level); eb = read_tree_block(fs_info->extent_root, ref->parent, bsz, 0); if (!eb || !extent_buffer_uptodate(eb)) { free_extent_buffer(eb); ret = -EIO; goto out; } ret = find_extent_in_eb(eb, bytenr, *extent_item_pos, &eie); ref->inode_list = eie; free_extent_buffer(eb); } ret = ulist_add_merge(refs, ref->parent, (uintptr_t)ref->inode_list, (u64 *)&eie, GFP_NOFS); if (ret < 0) goto out; if (!ret && extent_item_pos) { /* * we've recorded that parent, so we must extend * its inode list here */ BUG_ON(!eie); while (eie->next) eie = eie->next; eie->next = ref->inode_list; } } kfree(ref); } out: btrfs_free_path(path); while (!list_empty(&prefs)) { ref = list_first_entry(&prefs, struct __prelim_ref, list); list_del(&ref->list); kfree(ref); } while (!list_empty(&prefs_delayed)) { ref = list_first_entry(&prefs_delayed, struct __prelim_ref, list); list_del(&ref->list); kfree(ref); } return ret; }
/* * this adds all existing backrefs (inline backrefs, backrefs and delayed * refs) for the given bytenr to the refs list, merges duplicates and resolves * indirect refs to their parent bytenr. * When roots are found, they're added to the roots list * * FIXME some caching might speed things up */ static int find_parent_nodes(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, u64 bytenr, u64 time_seq, struct ulist *refs, struct ulist *roots, const u64 *extent_item_pos) { struct btrfs_key key; struct btrfs_path *path; struct btrfs_delayed_ref_root *delayed_refs = NULL; struct btrfs_delayed_ref_head *head; int info_level = 0; int ret; struct list_head prefs_delayed; struct list_head prefs; struct __prelim_ref *ref; struct extent_inode_elem *eie = NULL; u64 total_refs = 0; INIT_LIST_HEAD(&prefs); INIT_LIST_HEAD(&prefs_delayed); key.objectid = bytenr; key.offset = (u64)-1; if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) key.type = BTRFS_METADATA_ITEM_KEY; else key.type = BTRFS_EXTENT_ITEM_KEY; path = btrfs_alloc_path(); if (!path) return -ENOMEM; if (!trans) { path->search_commit_root = 1; path->skip_locking = 1; } /* * grab both a lock on the path and a lock on the delayed ref head. * We need both to get a consistent picture of how the refs look * at a specified point in time */ again: head = NULL; ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0); if (ret < 0) goto out; BUG_ON(ret == 0); #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS if (trans && likely(trans->type != __TRANS_DUMMY)) { #else if (trans) { #endif /* * look if there are updates for this ref queued and lock the * head */ delayed_refs = &trans->transaction->delayed_refs; spin_lock(&delayed_refs->lock); head = btrfs_find_delayed_ref_head(trans, bytenr); if (head) { if (!mutex_trylock(&head->mutex)) { atomic_inc(&head->node.refs); spin_unlock(&delayed_refs->lock); btrfs_release_path(path); /* * Mutex was contended, block until it's * released and try again */ mutex_lock(&head->mutex); mutex_unlock(&head->mutex); btrfs_put_delayed_ref(&head->node); goto again; } spin_unlock(&delayed_refs->lock); ret = __add_delayed_refs(head, time_seq, &prefs_delayed, &total_refs); mutex_unlock(&head->mutex); if (ret) goto out; } else { spin_unlock(&delayed_refs->lock); } } if (path->slots[0]) { struct extent_buffer *leaf; int slot; path->slots[0]--; leaf = path->nodes[0]; slot = path->slots[0]; btrfs_item_key_to_cpu(leaf, &key, slot); if (key.objectid == bytenr && (key.type == BTRFS_EXTENT_ITEM_KEY || key.type == BTRFS_METADATA_ITEM_KEY)) { ret = __add_inline_refs(fs_info, path, bytenr, &info_level, &prefs, &total_refs); if (ret) goto out; ret = __add_keyed_refs(fs_info, path, bytenr, info_level, &prefs); if (ret) goto out; } } btrfs_release_path(path); list_splice_init(&prefs_delayed, &prefs); ret = __add_missing_keys(fs_info, &prefs); if (ret) goto out; __merge_refs(&prefs, 1); ret = __resolve_indirect_refs(fs_info, path, time_seq, &prefs, extent_item_pos, total_refs); if (ret) goto out; __merge_refs(&prefs, 2); while (!list_empty(&prefs)) { ref = list_first_entry(&prefs, struct __prelim_ref, list); WARN_ON(ref->count < 0); if (roots && ref->count && ref->root_id && ref->parent == 0) { /* no parent == root of tree */ ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS); if (ret < 0) goto out; } if (ref->count && ref->parent) { if (extent_item_pos && !ref->inode_list && ref->level == 0) { u32 bsz; struct extent_buffer *eb; bsz = btrfs_level_size(fs_info->extent_root, ref->level); eb = read_tree_block(fs_info->extent_root, ref->parent, bsz, 0); if (!eb || !extent_buffer_uptodate(eb)) { free_extent_buffer(eb); ret = -EIO; goto out; } btrfs_tree_read_lock(eb); btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); ret = find_extent_in_eb(eb, bytenr, *extent_item_pos, &eie); btrfs_tree_read_unlock_blocking(eb); free_extent_buffer(eb); if (ret < 0) goto out; ref->inode_list = eie; } ret = ulist_add_merge_ptr(refs, ref->parent, ref->inode_list, (void **)&eie, GFP_NOFS); if (ret < 0) goto out; if (!ret && extent_item_pos) { /* * we've recorded that parent, so we must extend * its inode list here */ BUG_ON(!eie); while (eie->next) eie = eie->next; eie->next = ref->inode_list; } eie = NULL; } list_del(&ref->list); kmem_cache_free(btrfs_prelim_ref_cache, ref); } out: btrfs_free_path(path); while (!list_empty(&prefs)) { ref = list_first_entry(&prefs, struct __prelim_ref, list); list_del(&ref->list); kmem_cache_free(btrfs_prelim_ref_cache, ref); } while (!list_empty(&prefs_delayed)) { ref = list_first_entry(&prefs_delayed, struct __prelim_ref, list); list_del(&ref->list); kmem_cache_free(btrfs_prelim_ref_cache, ref); } if (ret < 0) free_inode_elem_list(eie); return ret; } static void free_leaf_list(struct ulist *blocks) { struct ulist_node *node = NULL; struct extent_inode_elem *eie; struct ulist_iterator uiter; ULIST_ITER_INIT(&uiter); while ((node = ulist_next(blocks, &uiter))) { if (!node->aux) continue; eie = (struct extent_inode_elem *)(uintptr_t)node->aux; free_inode_elem_list(eie); node->aux = 0; } ulist_free(blocks); } /* * Finds all leafs with a reference to the specified combination of bytenr and * offset. key_list_head will point to a list of corresponding keys (caller must * free each list element). The leafs will be stored in the leafs ulist, which * must be freed with ulist_free. * * returns 0 on success, <0 on error */ static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, u64 bytenr, u64 time_seq, struct ulist **leafs, const u64 *extent_item_pos) { int ret; *leafs = ulist_alloc(GFP_NOFS); if (!*leafs) return -ENOMEM; ret = find_parent_nodes(trans, fs_info, bytenr, time_seq, *leafs, NULL, extent_item_pos); if (ret < 0 && ret != -ENOENT) { free_leaf_list(*leafs); return ret; } return 0; }