/* * calls iterate() for every inode that references the extent identified by * the given parameters. * when the iterator function returns a non-zero value, iteration stops. */ int iterate_extent_inodes(struct btrfs_fs_info *fs_info, u64 extent_item_objectid, u64 extent_item_pos, int search_commit_root, iterate_extent_inodes_t *iterate, void *ctx) { int ret; struct btrfs_trans_handle *trans = NULL; struct ulist *refs = NULL; struct ulist *roots = NULL; struct ulist_node *ref_node = NULL; struct ulist_node *root_node = NULL; struct seq_list tree_mod_seq_elem = {}; struct ulist_iterator ref_uiter; struct ulist_iterator root_uiter; pr_debug("resolving all inodes for extent %llu\n", extent_item_objectid); if (!search_commit_root) { trans = btrfs_join_transaction(fs_info->extent_root); if (IS_ERR(trans)) return PTR_ERR(trans); btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem); } ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid, tree_mod_seq_elem.seq, &refs, &extent_item_pos); if (ret) goto out; ULIST_ITER_INIT(&ref_uiter); while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) { ret = btrfs_find_all_roots(trans, fs_info, ref_node->val, tree_mod_seq_elem.seq, &roots); if (ret) break; ULIST_ITER_INIT(&root_uiter); while (!ret && (root_node = ulist_next(roots, &root_uiter))) { pr_debug("root %llu references leaf %llu, data list " "%#llx\n", root_node->val, ref_node->val, (long long)ref_node->aux); ret = iterate_leaf_refs((struct extent_inode_elem *) (uintptr_t)ref_node->aux, root_node->val, extent_item_objectid, iterate, ctx); } ulist_free(roots); } free_leaf_list(refs); out: if (!search_commit_root) { btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem); btrfs_end_transaction(trans, fs_info->extent_root); } return ret; }
/* * after copy_from_user, pages need to be dirtied and we need to make * sure holes are created between the current EOF and the start of * any next extents (if required). * * this also makes the decision about creating an inline extent vs * doing real data extents, marking pages dirty and delalloc as required. */ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct file *file, struct page **pages, size_t num_pages, loff_t pos, size_t write_bytes) { int err = 0; int i; struct inode *inode = fdentry(file)->d_inode; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; u64 hint_byte; u64 num_bytes; u64 start_pos; u64 end_of_last_block; u64 end_pos = pos + write_bytes; loff_t isize = i_size_read(inode); start_pos = pos & ~((u64)root->sectorsize - 1); num_bytes = (write_bytes + pos - start_pos + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); end_of_last_block = start_pos + num_bytes - 1; lock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS); trans = btrfs_join_transaction(root, 1); if (!trans) { err = -ENOMEM; goto out_unlock; } btrfs_set_trans_block_group(trans, inode); hint_byte = 0; set_extent_uptodate(io_tree, start_pos, end_of_last_block, GFP_NOFS); /* check for reserved extents on each page, we don't want * to reset the delalloc bit on things that already have * extents reserved. */ btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block); for (i = 0; i < num_pages; i++) { struct page *p = pages[i]; SetPageUptodate(p); ClearPageChecked(p); set_page_dirty(p); } if (end_pos > isize) { i_size_write(inode, end_pos); /* we've only changed i_size in ram, and we haven't updated * the disk i_size. There is no need to log the inode * at this time. */ } err = btrfs_end_transaction(trans, root); out_unlock: unlock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS); return err; }
static noinline int finish_pending_snapshot(struct btrfs_fs_info *fs_info, struct btrfs_pending_snapshot *pending) { int ret; int namelen; u64 index = 0; struct btrfs_trans_handle *trans; struct inode *parent_inode; struct inode *inode; struct btrfs_root *parent_root; parent_inode = pending->dentry->d_parent->d_inode; parent_root = BTRFS_I(parent_inode)->root; trans = btrfs_join_transaction(parent_root, 1); /* * insert the directory item */ namelen = strlen(pending->name); ret = btrfs_set_inode_index(parent_inode, &index); ret = btrfs_insert_dir_item(trans, parent_root, pending->name, namelen, parent_inode->i_ino, &pending->root_key, BTRFS_FT_DIR, index); if (ret) goto fail; btrfs_i_size_write(parent_inode, parent_inode->i_size + namelen * 2); ret = btrfs_update_inode(trans, parent_root, parent_inode); BUG_ON(ret); ret = btrfs_add_root_ref(trans, parent_root->fs_info->tree_root, pending->root_key.objectid, parent_root->root_key.objectid, parent_inode->i_ino, index, pending->name, namelen); BUG_ON(ret); inode = btrfs_lookup_dentry(parent_inode, pending->dentry); d_instantiate(pending->dentry, inode); fail: btrfs_end_transaction(trans, fs_info->fs_root); return ret; }
static int generic_search(struct inode *inode, u64 file_pos, struct btrfs_dedup_hash *hash) { int ret; struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_trans_handle *trans; struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_delayed_ref_head *head; struct btrfs_dedup_info *dedup_info = fs_info->dedup_info; u64 bytenr; u64 tmp_bytenr; u32 num_bytes; trans = btrfs_join_transaction(root); if (IS_ERR(trans)) return PTR_ERR(trans); again: mutex_lock(&dedup_info->lock); ret = generic_search_hash(dedup_info, hash->hash, &bytenr, &num_bytes); if (ret <= 0) goto out; delayed_refs = &trans->transaction->delayed_refs; spin_lock(&delayed_refs->lock); head = btrfs_find_delayed_ref_head(trans, bytenr); if (!head) { /* * We can safely insert a new delayed_ref as long as we * hold delayed_refs->lock. * Only need to use atomic inc_extent_ref() */ ret = btrfs_inc_extent_ref_atomic(trans, root, bytenr, num_bytes, 0, root->root_key.objectid, btrfs_ino(inode), file_pos); spin_unlock(&delayed_refs->lock); if (ret == 0) { hash->bytenr = bytenr; hash->num_bytes = num_bytes; ret = 1; } goto out; } /* * We can't lock ref head with dedup_info->lock hold or we will cause * ABBA dead lock. */ mutex_unlock(&dedup_info->lock); ret = btrfs_delayed_ref_lock(trans, head); spin_unlock(&delayed_refs->lock); if (ret == -EAGAIN) goto again; mutex_lock(&dedup_info->lock); /* * Search again to ensure the hash is still here and bytenr didn't * change */ ret = generic_search_hash(dedup_info, hash->hash, &tmp_bytenr, &num_bytes); if (ret <= 0) { mutex_unlock(&head->mutex); goto out; } if (tmp_bytenr != bytenr) { mutex_unlock(&head->mutex); mutex_unlock(&dedup_info->lock); goto again; } hash->bytenr = bytenr; hash->num_bytes = num_bytes; /* * Increase the extent ref right now, to avoid delayed ref run * Or we may increase ref on non-exist extent. */ btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0, root->root_key.objectid, btrfs_ino(inode), file_pos); mutex_unlock(&head->mutex); out: mutex_unlock(&dedup_info->lock); btrfs_end_transaction(trans, root); return ret; }
static int btrfs_ioctl_setflags(struct file *file, void __user *arg) { struct inode *inode = file->f_path.dentry->d_inode; struct btrfs_inode *ip = BTRFS_I(inode); struct btrfs_root *root = ip->root; struct btrfs_trans_handle *trans; unsigned int flags, oldflags; int ret; if (copy_from_user(&flags, arg, sizeof(flags))) return -EFAULT; if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ FS_NOATIME_FL | FS_NODUMP_FL | \ FS_SYNC_FL | FS_DIRSYNC_FL)) return -EOPNOTSUPP; if (!is_owner_or_cap(inode)) return -EACCES; mutex_lock(&inode->i_mutex); flags = btrfs_mask_flags(inode->i_mode, flags); oldflags = btrfs_flags_to_ioctl(ip->flags); if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) { if (!capable(CAP_LINUX_IMMUTABLE)) { ret = -EPERM; goto out_unlock; } } ret = mnt_want_write(file->f_path.mnt); if (ret) goto out_unlock; if (flags & FS_SYNC_FL) ip->flags |= BTRFS_INODE_SYNC; else ip->flags &= ~BTRFS_INODE_SYNC; if (flags & FS_IMMUTABLE_FL) ip->flags |= BTRFS_INODE_IMMUTABLE; else ip->flags &= ~BTRFS_INODE_IMMUTABLE; if (flags & FS_APPEND_FL) ip->flags |= BTRFS_INODE_APPEND; else ip->flags &= ~BTRFS_INODE_APPEND; if (flags & FS_NODUMP_FL) ip->flags |= BTRFS_INODE_NODUMP; else ip->flags &= ~BTRFS_INODE_NODUMP; if (flags & FS_NOATIME_FL) ip->flags |= BTRFS_INODE_NOATIME; else ip->flags &= ~BTRFS_INODE_NOATIME; if (flags & FS_DIRSYNC_FL) ip->flags |= BTRFS_INODE_DIRSYNC; else ip->flags &= ~BTRFS_INODE_DIRSYNC; trans = btrfs_join_transaction(root, 1); BUG_ON(!trans); ret = btrfs_update_inode(trans, root, inode); BUG_ON(ret); btrfs_update_iflags(inode); inode->i_ctime = CURRENT_TIME; btrfs_end_transaction(trans, root); mnt_drop_write(file->f_path.mnt); out_unlock: mutex_unlock(&inode->i_mutex); return 0; }
int __btrfs_setxattr(struct inode *inode, const char *name, const void *value, size_t size, int flags) { struct btrfs_dir_item *di; struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_trans_handle *trans; struct btrfs_path *path; int ret = 0, mod = 0; path = btrfs_alloc_path(); if (!path) return -ENOMEM; trans = btrfs_join_transaction(root, 1); btrfs_set_trans_block_group(trans, inode); /* first lets see if we already have this xattr */ di = btrfs_lookup_xattr(trans, root, path, inode->i_ino, name, strlen(name), -1); if (IS_ERR(di)) { ret = PTR_ERR(di); goto out; } /* ok we already have this xattr, lets remove it */ if (di) { /* if we want create only exit */ if (flags & XATTR_CREATE) { ret = -EEXIST; goto out; } ret = btrfs_delete_one_dir_name(trans, root, path, di); if (ret) goto out; btrfs_release_path(root, path); /* if we don't have a value then we are removing the xattr */ if (!value) { mod = 1; goto out; } } else { btrfs_release_path(root, path); if (flags & XATTR_REPLACE) { /* we couldn't find the attr to replace */ ret = -ENODATA; goto out; } } /* ok we have to create a completely new xattr */ ret = btrfs_insert_xattr_item(trans, root, name, strlen(name), value, size, inode->i_ino); if (ret) goto out; mod = 1; out: if (mod) { inode->i_ctime = CURRENT_TIME; ret = btrfs_update_inode(trans, root, inode); } btrfs_end_transaction(trans, root); btrfs_free_path(path); return ret; }