/* * get a page into the pagecache */ static struct page *afs_dir_get_page(struct inode *dir, unsigned long index, struct key *key) { struct page *page; struct file file = { .private_data = key, }; _enter("{%lu},%lu", dir->i_ino, index); page = read_mapping_page(dir->i_mapping, index, &file); if (!IS_ERR(page)) { kmap(page); if (!PageChecked(page)) afs_dir_check_page(dir, page); if (PageError(page)) goto fail; } return page; fail: afs_dir_put_page(page); _leave(" = -EIO"); return ERR_PTR(-EIO); }
/** * ecryptfs_get_locked_page * * Get one page from cache or lower f/s, return error otherwise. * * Returns locked and up-to-date page (if ok), with increased * refcnt. */ struct page *ecryptfs_get_locked_page(struct inode *inode, loff_t index) { struct page *page = read_mapping_page(inode->i_mapping, index, NULL); if (!IS_ERR(page)) lock_page(page); return page; }
static const char *ext4_encrypted_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *done) { struct page *cpage = NULL; char *caddr, *paddr = NULL; struct fscrypt_str cstr, pstr; struct fscrypt_symlink_data *sd; int res; u32 max_size = inode->i_sb->s_blocksize; if (!dentry) return ERR_PTR(-ECHILD); res = fscrypt_get_encryption_info(inode); if (res) return ERR_PTR(res); if (ext4_inode_is_fast_symlink(inode)) { caddr = (char *) EXT4_I(inode)->i_data; max_size = sizeof(EXT4_I(inode)->i_data); } else { cpage = read_mapping_page(inode->i_mapping, 0, NULL); if (IS_ERR(cpage)) return ERR_CAST(cpage); caddr = page_address(cpage); } /* Symlink is encrypted */ sd = (struct fscrypt_symlink_data *)caddr; cstr.name = sd->encrypted_path; cstr.len = le16_to_cpu(sd->len); if ((cstr.len + sizeof(struct fscrypt_symlink_data) - 1) > max_size) { /* Symlink data on the disk is corrupted */ res = -EFSCORRUPTED; goto errout; } res = fscrypt_fname_alloc_buffer(inode, cstr.len, &pstr); if (res) goto errout; paddr = pstr.name; res = fscrypt_fname_disk_to_usr(inode, 0, 0, &cstr, &pstr); if (res) goto errout; /* Null-terminate the name */ paddr[pstr.len] = '\0'; if (cpage) put_page(cpage); set_delayed_call(done, kfree_link, paddr); return paddr; errout: if (cpage) put_page(cpage); kfree(paddr); return ERR_PTR(res); }
static struct page * dir_get_page(struct inode *dir, unsigned long n) { struct address_space *mapping = dir->i_mapping; struct page *page = read_mapping_page(mapping, n, NULL); if (!IS_ERR(page)) kmap(page); return page; }
static int ttm_tt_swapin(struct ttm_tt *ttm) { struct address_space *swap_space; struct file *swap_storage; struct page *from_page; struct page *to_page; void *from_virtual; void *to_virtual; int i; int ret = -ENOMEM; if (ttm->page_flags & TTM_PAGE_FLAG_USER) { ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start, ttm->num_pages); if (unlikely(ret != 0)) return ret; ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED; return 0; } swap_storage = ttm->swap_storage; BUG_ON(swap_storage == NULL); swap_space = swap_storage->f_path.dentry->d_inode->i_mapping; for (i = 0; i < ttm->num_pages; ++i) { from_page = read_mapping_page(swap_space, i, NULL); if (IS_ERR(from_page)) { ret = PTR_ERR(from_page); goto out_err; } to_page = __ttm_tt_get_page(ttm, i); if (unlikely(to_page == NULL)) goto out_err; preempt_disable(); from_virtual = kmap_atomic(from_page, KM_USER0); to_virtual = kmap_atomic(to_page, KM_USER1); memcpy(to_virtual, from_virtual, PAGE_SIZE); kunmap_atomic(to_virtual, KM_USER1); kunmap_atomic(from_virtual, KM_USER0); preempt_enable(); page_cache_release(from_page); } if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP)) fput(swap_storage); ttm->swap_storage = NULL; ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED; return 0; out_err: ttm_tt_free_alloced_pages(ttm); return ret; }
/** * ecryptfs_get1page * * Get one page from cache or lower f/s, return error otherwise. * * Returns unlocked and up-to-date page (if ok), with increased * refcnt. */ static struct page *ecryptfs_get1page(struct file *file, int index) { struct dentry *dentry; struct inode *inode; struct address_space *mapping; dentry = file->f_path.dentry; inode = dentry->d_inode; mapping = inode->i_mapping; return read_mapping_page(mapping, index, (void *)file); }
/* get the link contents into pagecache */ static char *ocfs2_page_getlink(struct dentry * dentry, struct page **ppage) { struct page * page; struct address_space *mapping = dentry->d_inode->i_mapping; page = read_mapping_page(mapping, 0, NULL); if (IS_ERR(page)) goto sync_fail; *ppage = page; return kmap(page); sync_fail: return (char*)page; }
/* * check a symbolic link to see whether it actually encodes a mountpoint * - sets the AFS_VNODE_MOUNTPOINT flag on the vnode appropriately */ int afs_mntpt_check_symlink(struct afs_vnode *vnode, struct key *key) { struct file file = { .private_data = key, }; struct page *page; size_t size; char *buf; int ret; _enter("{%x:%u,%u}", vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique); /* read the contents of the symlink into the pagecache */ page = read_mapping_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0, &file); if (IS_ERR(page)) { ret = PTR_ERR(page); goto out; } ret = -EIO; if (PageError(page)) goto out_free; buf = kmap(page); /* examine the symlink's contents */ size = vnode->status.size; _debug("symlink to %*.*s", (int) size, (int) size, buf); if (size > 2 && (buf[0] == '%' || buf[0] == '#') && buf[size - 1] == '.' ) { _debug("symlink is a mountpoint"); spin_lock(&vnode->lock); set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags); spin_unlock(&vnode->lock); } ret = 0; kunmap(page); out_free: page_cache_release(page); out: _leave(" = %d", ret); return ret; }
/** * ecryptfs_get_locked_page * * Get one page from cache or lower f/s, return error otherwise. * * Returns locked and up-to-date page (if ok), with increased * refcnt. */ struct page *ecryptfs_get_locked_page(struct file *file, loff_t index) { struct dentry *dentry; struct inode *inode; struct address_space *mapping; struct page *page; dentry = file->f_path.dentry; inode = dentry->d_inode; mapping = inode->i_mapping; page = read_mapping_page(mapping, index, (void *)file); if (!IS_ERR(page)) lock_page(page); return page; }
/* * check a symbolic link to see whether it actually encodes a mountpoint * - sets the AFS_VNODE_MOUNTPOINT flag on the vnode appropriately */ int afs_mntpt_check_symlink(struct afs_vnode *vnode) { struct page *page; size_t size; char *buf; int ret; _enter("{%u,%u}", vnode->fid.vnode, vnode->fid.unique); /* read the contents of the symlink into the pagecache */ page = read_mapping_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0, NULL); if (IS_ERR(page)) { ret = PTR_ERR(page); goto out; } ret = -EIO; wait_on_page_locked(page); buf = kmap(page); if (!PageUptodate(page)) goto out_free; if (PageError(page)) goto out_free; /* examine the symlink's contents */ size = vnode->status.size; _debug("symlink to %*.*s", size, (int) size, buf); if (size > 2 && (buf[0] == '%' || buf[0] == '#') && buf[size - 1] == '.' ) { _debug("symlink is a mountpoint"); spin_lock(&vnode->lock); vnode->flags |= AFS_VNODE_MOUNTPOINT; spin_unlock(&vnode->lock); } ret = 0; out_free: kunmap(page); page_cache_release(page); out: _leave(" = %d", ret); return ret; } /* end afs_mntpt_check_symlink() */
static struct page * dir_get_page(struct inode *dir, unsigned long n) { struct address_space *mapping = dir->i_mapping; struct page *page = read_mapping_page(mapping, n, NULL); if (!IS_ERR(page)) { wait_on_page_locked(page); kmap(page); if (!PageUptodate(page)) goto fail; } return page; fail: dir_put_page(page); return ERR_PTR(-EIO); }
/* * Read a page's worth of file data into the page cache. Return the page * locked. */ static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset) { struct address_space *mapping; struct page *page; pgoff_t n; n = offset >> PAGE_SHIFT; mapping = inode->i_mapping; page = read_mapping_page(mapping, n, NULL); if (IS_ERR(page)) return page; if (!PageUptodate(page)) { put_page(page); return ERR_PTR(-EIO); } lock_page(page); return page; }
int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count) { struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); struct page *page; struct address_space *mapping; __be32 *pptr, *curr, *end; u32 mask, len, pnr; int i; /* is there any actual work to be done? */ if (!count) return 0; hfs_dbg(BITMAP, "block_free: %u,%u\n", offset, count); /* are all of the bits in range? */ if ((offset + count) > sbi->total_blocks) return -ENOENT; mutex_lock(&sbi->alloc_mutex); mapping = sbi->alloc_file->i_mapping; pnr = offset / PAGE_CACHE_BITS; page = read_mapping_page(mapping, pnr, NULL); if (IS_ERR(page)) goto kaboom; pptr = kmap(page); curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32; end = pptr + PAGE_CACHE_BITS / 32; len = count; /* do any partial u32 at the start */ i = offset % 32; if (i) { int j = 32 - i; mask = 0xffffffffU << j; if (j > count) { mask |= 0xffffffffU >> (i + count); *curr++ &= cpu_to_be32(mask); goto out; } *curr++ &= cpu_to_be32(mask); count -= j; }
/* * get a page into the pagecache */ static struct page *afs_dir_get_page(struct inode *dir, unsigned long index) { struct page *page; _enter("{%lu},%lu", dir->i_ino, index); page = read_mapping_page(dir->i_mapping, index, NULL); if (!IS_ERR(page)) { kmap(page); if (!PageChecked(page)) afs_dir_check_page(dir, page); if (PageError(page)) goto fail; } return page; fail: afs_dir_put_page(page); return ERR_PTR(-EIO); } /* end afs_dir_get_page() */
/* get the link contents into pagecache */ static char *ocfs2_page_getlink(struct dentry * dentry, struct page **ppage) { struct page * page; struct address_space *mapping = dentry->d_inode->i_mapping; page = read_mapping_page(mapping, 0, NULL); if (IS_ERR(page)) goto sync_fail; wait_on_page_locked(page); if (!PageUptodate(page)) goto async_fail; *ppage = page; return kmap(page); async_fail: page_cache_release(page); return ERR_PTR(-EIO); sync_fail: return (char*)page; }
struct page *ufs_get_locked_page(struct address_space *mapping, pgoff_t index) { struct page *page; page = find_lock_page(mapping, index); if (!page) { page = read_mapping_page(mapping, index, NULL); if (IS_ERR(page)) { printk(KERN_ERR "ufs_change_blocknr: " "read_mapping_page error: ino %lu, index: %lu\n", mapping->host->i_ino, index); goto out; } lock_page(page); if (unlikely(page->mapping == NULL)) { /* Truncate got there first */ unlock_page(page); put_page(page); page = NULL; goto out; } if (!PageUptodate(page) || PageError(page)) { unlock_page(page); put_page(page); printk(KERN_ERR "ufs_change_blocknr: " "can not read page: ino %lu, index: %lu\n", mapping->host->i_ino, index); page = ERR_PTR(-EIO); } } out: return page; }
static const char *f2fs_encrypted_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *done) { struct page *cpage = NULL; char *caddr, *paddr = NULL; struct fscrypt_str cstr = FSTR_INIT(NULL, 0); struct fscrypt_str pstr = FSTR_INIT(NULL, 0); struct fscrypt_symlink_data *sd; u32 max_size = inode->i_sb->s_blocksize; int res; if (!dentry) return ERR_PTR(-ECHILD); res = fscrypt_get_encryption_info(inode); if (res) return ERR_PTR(res); cpage = read_mapping_page(inode->i_mapping, 0, NULL); if (IS_ERR(cpage)) return ERR_CAST(cpage); caddr = page_address(cpage); /* Symlink is encrypted */ sd = (struct fscrypt_symlink_data *)caddr; cstr.name = sd->encrypted_path; cstr.len = le16_to_cpu(sd->len); /* this is broken symlink case */ if (unlikely(cstr.len == 0)) { res = -ENOENT; goto errout; } if ((cstr.len + sizeof(struct fscrypt_symlink_data) - 1) > max_size) { /* Symlink data on the disk is corrupted */ res = -EIO; goto errout; } res = fscrypt_fname_alloc_buffer(inode, cstr.len, &pstr); if (res) goto errout; res = fscrypt_fname_disk_to_usr(inode, 0, 0, &cstr, &pstr); if (res) goto errout; /* this is broken symlink case */ if (unlikely(pstr.name[0] == 0)) { res = -ENOENT; goto errout; } paddr = pstr.name; /* Null-terminate the name */ paddr[pstr.len] = '\0'; put_page(cpage); set_delayed_call(done, kfree_link, paddr); return paddr; errout: fscrypt_fname_free_buffer(&pstr); put_page(cpage); return ERR_PTR(res); }
struct dentry *f2fs_get_parent(struct dentry *child) { struct qstr dotdot = {.len = 2, .name = ".."}; struct page *page; unsigned long ino = f2fs_inode_by_name(d_inode(child), &dotdot, &page); if (!ino) { if (IS_ERR(page)) return ERR_CAST(page); return ERR_PTR(-ENOENT); } return d_obtain_alias(f2fs_iget(child->d_sb, ino)); } static int __recover_dot_dentries(struct inode *dir, nid_t pino) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct qstr dot = QSTR_INIT(".", 1); struct qstr dotdot = QSTR_INIT("..", 2); struct f2fs_dir_entry *de; struct page *page; int err = 0; if (f2fs_readonly(sbi->sb)) { f2fs_msg(sbi->sb, KERN_INFO, "skip recovering inline_dots inode (ino:%lu, pino:%u) " "in readonly mountpoint", dir->i_ino, pino); return 0; } f2fs_balance_fs(sbi, true); f2fs_lock_op(sbi); de = f2fs_find_entry(dir, &dot, &page); if (de) { f2fs_dentry_kunmap(dir, page); f2fs_put_page(page, 0); } else if (IS_ERR(page)) { err = PTR_ERR(page); goto out; } else { err = __f2fs_add_link(dir, &dot, NULL, dir->i_ino, S_IFDIR); if (err) goto out; } de = f2fs_find_entry(dir, &dotdot, &page); if (de) { f2fs_dentry_kunmap(dir, page); f2fs_put_page(page, 0); } else if (IS_ERR(page)) { err = PTR_ERR(page); } else { err = __f2fs_add_link(dir, &dotdot, NULL, pino, S_IFDIR); } out: if (!err) clear_inode_flag(dir, FI_INLINE_DOTS); f2fs_unlock_op(sbi); return err; } static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct inode *inode = NULL; struct f2fs_dir_entry *de; struct page *page; nid_t ino; int err = 0; unsigned int root_ino = F2FS_ROOT_INO(F2FS_I_SB(dir)); if (f2fs_encrypted_inode(dir)) { int res = fscrypt_get_encryption_info(dir); /* * DCACHE_ENCRYPTED_WITH_KEY is set if the dentry is * created while the directory was encrypted and we * don't have access to the key. */ if (fscrypt_has_encryption_key(dir)) fscrypt_set_encrypted_dentry(dentry); fscrypt_set_d_op(dentry); if (res && res != -ENOKEY) return ERR_PTR(res); } if (dentry->d_name.len > F2FS_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); de = f2fs_find_entry(dir, &dentry->d_name, &page); if (!de) { if (IS_ERR(page)) return (struct dentry *)page; return d_splice_alias(inode, dentry); } ino = le32_to_cpu(de->ino); f2fs_dentry_kunmap(dir, page); f2fs_put_page(page, 0); inode = f2fs_iget(dir->i_sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); if ((dir->i_ino == root_ino) && f2fs_has_inline_dots(dir)) { err = __recover_dot_dentries(dir, root_ino); if (err) goto err_out; } if (f2fs_has_inline_dots(inode)) { err = __recover_dot_dentries(inode, dir->i_ino); if (err) goto err_out; } if (!IS_ERR(inode) && f2fs_encrypted_inode(dir) && (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) && !fscrypt_has_permitted_context(dir, inode)) { bool nokey = f2fs_encrypted_inode(inode) && !fscrypt_has_encryption_key(inode); err = nokey ? -ENOKEY : -EPERM; goto err_out; } return d_splice_alias(inode, dentry); err_out: iput(inode); return ERR_PTR(err); } static int f2fs_unlink(struct inode *dir, struct dentry *dentry) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode = d_inode(dentry); struct f2fs_dir_entry *de; struct page *page; int err = -ENOENT; trace_f2fs_unlink_enter(dir, dentry); de = f2fs_find_entry(dir, &dentry->d_name, &page); if (!de) { if (IS_ERR(page)) err = PTR_ERR(page); goto fail; } f2fs_balance_fs(sbi, true); f2fs_lock_op(sbi); err = acquire_orphan_inode(sbi); if (err) { f2fs_unlock_op(sbi); f2fs_dentry_kunmap(dir, page); f2fs_put_page(page, 0); goto fail; } f2fs_delete_entry(de, page, dir, inode); f2fs_unlock_op(sbi); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); fail: trace_f2fs_unlink_exit(inode, err); return err; } static void *f2fs_follow_link(struct dentry *dentry, struct nameidata *nd) { struct page *page; char *link; page = page_follow_link_light(dentry, nd); if (IS_ERR(page)) return page; link = nd_get_link(nd); if (IS_ERR(link)) return link; /* this is broken symlink case */ if (*link == 0) { kunmap(page); page_cache_release(page); return ERR_PTR(-ENOENT); } return page; } static int f2fs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode; size_t len = strlen(symname); struct fscrypt_str disk_link = FSTR_INIT((char *)symname, len + 1); struct fscrypt_symlink_data *sd = NULL; int err; if (f2fs_encrypted_inode(dir)) { err = fscrypt_get_encryption_info(dir); if (err) return err; if (!fscrypt_has_encryption_key(dir)) return -EPERM; disk_link.len = (fscrypt_fname_encrypted_size(dir, len) + sizeof(struct fscrypt_symlink_data)); } if (disk_link.len > dir->i_sb->s_blocksize) return -ENAMETOOLONG; inode = f2fs_new_inode(dir, S_IFLNK | S_IRWXUGO); if (IS_ERR(inode)) return PTR_ERR(inode); if (f2fs_encrypted_inode(inode)) inode->i_op = &f2fs_encrypted_symlink_inode_operations; else inode->i_op = &f2fs_symlink_inode_operations; inode_nohighmem(inode); inode->i_mapping->a_ops = &f2fs_dblock_aops; f2fs_balance_fs(sbi, true); f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); if (err) goto out; f2fs_unlock_op(sbi); alloc_nid_done(sbi, inode->i_ino); if (f2fs_encrypted_inode(inode)) { struct qstr istr = QSTR_INIT(symname, len); struct fscrypt_str ostr; sd = kzalloc(disk_link.len, GFP_NOFS); if (!sd) { err = -ENOMEM; goto err_out; } err = fscrypt_get_encryption_info(inode); if (err) goto err_out; if (!fscrypt_has_encryption_key(inode)) { err = -EPERM; goto err_out; } ostr.name = sd->encrypted_path; ostr.len = disk_link.len; err = fscrypt_fname_usr_to_disk(inode, &istr, &ostr); if (err < 0) goto err_out; sd->len = cpu_to_le16(ostr.len); disk_link.name = (char *)sd; } err = page_symlink(inode, disk_link.name, disk_link.len); err_out: d_instantiate(dentry, inode); unlock_new_inode(inode); /* * Let's flush symlink data in order to avoid broken symlink as much as * possible. Nevertheless, fsyncing is the best way, but there is no * way to get a file descriptor in order to flush that. * * Note that, it needs to do dir->fsync to make this recoverable. * If the symlink path is stored into inline_data, there is no * performance regression. */ if (!err) { filemap_write_and_wait_range(inode->i_mapping, 0, disk_link.len - 1); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); } else { f2fs_unlink(dir, dentry); } kfree(sd); return err; out: handle_failed_inode(inode); return err; } static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode; int err; inode = f2fs_new_inode(dir, S_IFDIR | mode); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = &f2fs_dir_inode_operations; inode->i_fop = &f2fs_dir_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_HIGH_ZERO); f2fs_balance_fs(sbi, true); set_inode_flag(inode, FI_INC_LINK); f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); if (err) goto out_fail; f2fs_unlock_op(sbi); alloc_nid_done(sbi, inode->i_ino); d_instantiate(dentry, inode); unlock_new_inode(inode); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); return 0; out_fail: clear_inode_flag(inode, FI_INC_LINK); handle_failed_inode(inode); return err; } static int f2fs_rmdir(struct inode *dir, struct dentry *dentry) { struct inode *inode = d_inode(dentry); if (f2fs_empty_dir(inode)) return f2fs_unlink(dir, dentry); return -ENOTEMPTY; } static int f2fs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode; int err = 0; if (!new_valid_dev(rdev)) return -EINVAL; inode = f2fs_new_inode(dir, mode); if (IS_ERR(inode)) return PTR_ERR(inode); init_special_inode(inode, inode->i_mode, rdev); inode->i_op = &f2fs_special_inode_operations; f2fs_balance_fs(sbi, true); f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); if (err) goto out; f2fs_unlock_op(sbi); alloc_nid_done(sbi, inode->i_ino); d_instantiate(dentry, inode); unlock_new_inode(inode); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); return 0; out: handle_failed_inode(inode); return err; } static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct f2fs_sb_info *sbi = F2FS_I_SB(old_dir); struct inode *old_inode = d_inode(old_dentry); struct inode *new_inode = d_inode(new_dentry); struct page *old_dir_page; struct page *old_page, *new_page; struct f2fs_dir_entry *old_dir_entry = NULL; struct f2fs_dir_entry *old_entry; struct f2fs_dir_entry *new_entry; bool is_old_inline = f2fs_has_inline_dentry(old_dir); int err = -ENOENT; if ((old_dir != new_dir) && f2fs_encrypted_inode(new_dir) && !fscrypt_has_permitted_context(new_dir, old_inode)) { err = -EPERM; goto out; } old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page); if (!old_entry) { if (IS_ERR(old_page)) err = PTR_ERR(old_page); goto out; } if (S_ISDIR(old_inode->i_mode)) { old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_page); if (!old_dir_entry) { if (IS_ERR(old_dir_page)) err = PTR_ERR(old_dir_page); goto out_old; } } if (new_inode) { err = -ENOTEMPTY; if (old_dir_entry && !f2fs_empty_dir(new_inode)) goto out_dir; err = -ENOENT; new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_page); if (!new_entry) { if (IS_ERR(new_page)) err = PTR_ERR(new_page); goto out_dir; } f2fs_balance_fs(sbi, true); f2fs_lock_op(sbi); err = acquire_orphan_inode(sbi); if (err) goto put_out_dir; err = update_dent_inode(old_inode, new_inode, &new_dentry->d_name); if (err) { release_orphan_inode(sbi); goto put_out_dir; } f2fs_set_link(new_dir, new_entry, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME; down_write(&F2FS_I(new_inode)->i_sem); if (old_dir_entry) f2fs_i_links_write(new_inode, false); f2fs_i_links_write(new_inode, false); up_write(&F2FS_I(new_inode)->i_sem); if (!new_inode->i_nlink) add_orphan_inode(new_inode); else release_orphan_inode(sbi); } else { f2fs_balance_fs(sbi, true); f2fs_lock_op(sbi); err = f2fs_add_link(new_dentry, old_inode); if (err) { f2fs_unlock_op(sbi); goto out_dir; } if (old_dir_entry) f2fs_i_links_write(new_dir, true); /* * old entry and new entry can locate in the same inline * dentry in inode, when attaching new entry in inline dentry, * it could force inline dentry conversion, after that, * old_entry and old_page will point to wrong address, in * order to avoid this, let's do the check and update here. */ if (is_old_inline && !f2fs_has_inline_dentry(old_dir)) { f2fs_put_page(old_page, 0); old_page = NULL; old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page); if (!old_entry) { err = -ENOENT; if (IS_ERR(old_page)) err = PTR_ERR(old_page); f2fs_unlock_op(sbi); goto out_dir; } } } down_write(&F2FS_I(old_inode)->i_sem); file_lost_pino(old_inode); if (new_inode && file_enc_name(new_inode)) file_set_enc_name(old_inode); up_write(&F2FS_I(old_inode)->i_sem); old_inode->i_ctime = CURRENT_TIME; f2fs_mark_inode_dirty_sync(old_inode); f2fs_delete_entry(old_entry, old_page, old_dir, NULL); if (old_dir_entry) { if (old_dir != new_dir) { f2fs_set_link(old_inode, old_dir_entry, old_dir_page, new_dir); } else { f2fs_dentry_kunmap(old_inode, old_dir_page); f2fs_put_page(old_dir_page, 0); } f2fs_i_links_write(old_dir, false); } f2fs_unlock_op(sbi); if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir)) f2fs_sync_fs(sbi->sb, 1); return 0; put_out_dir: f2fs_unlock_op(sbi); f2fs_dentry_kunmap(new_dir, new_page); f2fs_put_page(new_page, 0); out_dir: if (old_dir_entry) { f2fs_dentry_kunmap(old_inode, old_dir_page); f2fs_put_page(old_dir_page, 0); } out_old: f2fs_dentry_kunmap(old_dir, old_page); f2fs_put_page(old_page, 0); out: return err; } static void *f2fs_encrypted_follow_link(struct dentry *dentry, struct nameidata *nd) { struct page *cpage = NULL; char *caddr, *paddr = NULL; struct fscrypt_str cstr = FSTR_INIT(NULL, 0); struct fscrypt_str pstr = FSTR_INIT(NULL, 0); struct fscrypt_symlink_data *sd; struct inode *inode = d_inode(dentry); loff_t size = min_t(loff_t, i_size_read(inode), PAGE_SIZE - 1); u32 max_size = inode->i_sb->s_blocksize; int res; res = fscrypt_get_encryption_info(inode); if (res) return ERR_PTR(res); cpage = read_mapping_page(inode->i_mapping, 0, NULL); if (IS_ERR(cpage)) return cpage; caddr = kmap(cpage); caddr[size] = 0; /* Symlink is encrypted */ sd = (struct fscrypt_symlink_data *)caddr; cstr.name = sd->encrypted_path; cstr.len = le16_to_cpu(sd->len); /* this is broken symlink case */ if (unlikely(cstr.len == 0)) { res = -ENOENT; goto errout; } if ((cstr.len + sizeof(struct fscrypt_symlink_data) - 1) > max_size) { /* Symlink data on the disk is corrupted */ res = -EIO; goto errout; } res = fscrypt_fname_alloc_buffer(inode, cstr.len, &pstr); if (res) goto errout; res = fscrypt_fname_disk_to_usr(inode, 0, 0, &cstr, &pstr); if (res < 0) goto errout; /* this is broken symlink case */ if (unlikely(pstr.name[0] == 0)) { res = -ENOENT; goto errout; } paddr = pstr.name; /* Null-terminate the name */ paddr[res] = '\0'; nd_set_link(nd, paddr); kunmap(cpage); page_cache_release(cpage); return NULL; errout: fscrypt_fname_free_buffer(&pstr); kunmap(cpage); page_cache_release(cpage); return ERR_PTR(res); } void kfree_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) { char *s = nd_get_link(nd); if (!IS_ERR(s)) kfree(s); }
static const char *ext4_encrypted_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *done) { struct page *cpage = NULL; char *caddr, *paddr = NULL; struct ext4_str cstr, pstr; struct ext4_encrypted_symlink_data *sd; loff_t size = min_t(loff_t, i_size_read(inode), PAGE_SIZE - 1); int res; u32 plen, max_size = inode->i_sb->s_blocksize; if (!dentry) return ERR_PTR(-ECHILD); res = ext4_get_encryption_info(inode); if (res) return ERR_PTR(res); if (ext4_inode_is_fast_symlink(inode)) { caddr = (char *) EXT4_I(inode)->i_data; max_size = sizeof(EXT4_I(inode)->i_data); } else { cpage = read_mapping_page(inode->i_mapping, 0, NULL); if (IS_ERR(cpage)) return ERR_CAST(cpage); caddr = page_address(cpage); caddr[size] = 0; } /* Symlink is encrypted */ sd = (struct ext4_encrypted_symlink_data *)caddr; cstr.name = sd->encrypted_path; cstr.len = le16_to_cpu(sd->len); if ((cstr.len + sizeof(struct ext4_encrypted_symlink_data) - 1) > max_size) { /* Symlink data on the disk is corrupted */ res = -EFSCORRUPTED; goto errout; } plen = (cstr.len < EXT4_FNAME_CRYPTO_DIGEST_SIZE*2) ? EXT4_FNAME_CRYPTO_DIGEST_SIZE*2 : cstr.len; paddr = kmalloc(plen + 1, GFP_NOFS); if (!paddr) { res = -ENOMEM; goto errout; } pstr.name = paddr; pstr.len = plen; res = _ext4_fname_disk_to_usr(inode, NULL, &cstr, &pstr); if (res < 0) goto errout; /* Null-terminate the name */ if (res <= plen) paddr[res] = '\0'; if (cpage) put_page(cpage); set_delayed_call(done, kfree_link, paddr); return paddr; errout: if (cpage) put_page(cpage); kfree(paddr); return ERR_PTR(res); }
/* * Returns a pointer to a buffer containing at least LEN bytes of * filesystem starting at byte offset OFFSET into the filesystem. */ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned int len) { struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping; struct page *pages[BLKS_PER_BUF]; unsigned i, blocknr, buffer, unread; unsigned long devsize; char *data; if (!len) return NULL; blocknr = offset >> PAGE_CACHE_SHIFT; offset &= PAGE_CACHE_SIZE - 1; /* Check if an existing buffer already has the data.. */ for (i = 0; i < READ_BUFFERS; i++) { unsigned int blk_offset; if (buffer_dev[i] != sb) continue; if (blocknr < buffer_blocknr[i]) continue; blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_CACHE_SHIFT; blk_offset += offset; if (blk_offset + len > BUFFER_SIZE) continue; return read_buffers[i] + blk_offset; } devsize = mapping->host->i_size >> PAGE_CACHE_SHIFT; /* Ok, read in BLKS_PER_BUF pages completely first. */ unread = 0; for (i = 0; i < BLKS_PER_BUF; i++) { struct page *page = NULL; if (blocknr + i < devsize) { page = read_mapping_page(mapping, blocknr + i, NULL); /* synchronous error? */ if (IS_ERR(page)) page = NULL; } pages[i] = page; } for (i = 0; i < BLKS_PER_BUF; i++) { struct page *page = pages[i]; if (page) { wait_on_page_locked(page); if (!PageUptodate(page)) { /* asynchronous error */ page_cache_release(page); pages[i] = NULL; } } } buffer = next_buffer; next_buffer = NEXT_BUFFER(buffer); buffer_blocknr[buffer] = blocknr; buffer_dev[buffer] = sb; data = read_buffers[buffer]; for (i = 0; i < BLKS_PER_BUF; i++) { struct page *page = pages[i]; if (page) { memcpy(data, kmap(page), PAGE_CACHE_SIZE); kunmap(page); page_cache_release(page); } else memset(data, 0, PAGE_CACHE_SIZE); data += PAGE_CACHE_SIZE; } return read_buffers[buffer] + offset; }
/* * create a vfsmount to be automounted */ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt) { struct afs_super_info *super; struct vfsmount *mnt; struct page *page = NULL; size_t size; char *buf, *devname = NULL, *options = NULL; int ret; kenter("{%s}", mntpt->d_name.name); BUG_ON(!mntpt->d_inode); ret = -EINVAL; size = mntpt->d_inode->i_size; if (size > PAGE_SIZE - 1) goto error; ret = -ENOMEM; devname = (char *) get_zeroed_page(GFP_KERNEL); if (!devname) goto error; options = (char *) get_zeroed_page(GFP_KERNEL); if (!options) goto error; /* read the contents of the AFS special symlink */ page = read_mapping_page(mntpt->d_inode->i_mapping, 0, NULL); if (IS_ERR(page)) { ret = PTR_ERR(page); goto error; } ret = -EIO; wait_on_page_locked(page); if (!PageUptodate(page) || PageError(page)) goto error; buf = kmap(page); memcpy(devname, buf, size); kunmap(page); page_cache_release(page); page = NULL; /* work out what options we want */ super = AFS_FS_S(mntpt->d_sb); memcpy(options, "cell=", 5); strcpy(options + 5, super->volume->cell->name); if (super->volume->type == AFSVL_RWVOL) strcat(options, ",rwpath"); /* try and do the mount */ kdebug("--- attempting mount %s -o %s ---", devname, options); mnt = vfs_kern_mount(&afs_fs_type, 0, devname, options); kdebug("--- mount result %p ---", mnt); free_page((unsigned long) devname); free_page((unsigned long) options); kleave(" = %p", mnt); return mnt; error: if (page) page_cache_release(page); if (devname) free_page((unsigned long) devname); if (options) free_page((unsigned long) options); kleave(" = %d", ret); return ERR_PTR(ret); } /* end afs_mntpt_do_automount() */
struct page *get_read_data_page(struct inode *inode, pgoff_t index, int rw) { struct address_space *mapping = inode->i_mapping; struct dnode_of_data dn; struct page *page; struct extent_info ei; int err; struct f2fs_io_info fio = { .sbi = F2FS_I_SB(inode), .type = DATA, .rw = rw, .encrypted_page = NULL, }; if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) return read_mapping_page(mapping, index, NULL); page = grab_cache_page(mapping, index); if (!page) return ERR_PTR(-ENOMEM); if (f2fs_lookup_extent_cache(inode, index, &ei)) { dn.data_blkaddr = ei.blk + index - ei.fofs; goto got_it; } set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, index, LOOKUP_NODE); if (err) goto put_err; f2fs_put_dnode(&dn); if (unlikely(dn.data_blkaddr == NULL_ADDR)) { err = -ENOENT; goto put_err; } got_it: if (PageUptodate(page)) { unlock_page(page); return page; } /* * A new dentry page is allocated but not able to be written, since its * new inode page couldn't be allocated due to -ENOSPC. * In such the case, its blkaddr can be remained as NEW_ADDR. * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata. */ if (dn.data_blkaddr == NEW_ADDR) { zero_user_segment(page, 0, PAGE_CACHE_SIZE); SetPageUptodate(page); unlock_page(page); return page; } fio.blk_addr = dn.data_blkaddr; fio.page = page; err = f2fs_submit_page_bio(&fio); if (err) goto put_err; return page; put_err: f2fs_put_page(page, 1); return ERR_PTR(err); }
int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *max) { struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); struct page *page; struct address_space *mapping; __be32 *pptr, *curr, *end; u32 mask, start, len, n; __be32 val; int i; len = *max; if (!len) return size; dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len); mutex_lock(&sbi->alloc_mutex); mapping = sbi->alloc_file->i_mapping; page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL); if (IS_ERR(page)) { start = size; goto out; } pptr = kmap(page); curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32; i = offset % 32; offset &= ~(PAGE_CACHE_BITS - 1); if ((size ^ offset) / PAGE_CACHE_BITS) end = pptr + PAGE_CACHE_BITS / 32; else end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32; /* scan the first partial u32 for zero bits */ val = *curr; if (~val) { n = be32_to_cpu(val); mask = (1U << 31) >> i; for (; i < 32; mask >>= 1, i++) { if (!(n & mask)) goto found; } } curr++; /* scan complete u32s for the first zero bit */ while (1) { while (curr < end) { val = *curr; if (~val) { n = be32_to_cpu(val); mask = 1 << 31; for (i = 0; i < 32; mask >>= 1, i++) { if (!(n & mask)) goto found; } } curr++; } kunmap(page); offset += PAGE_CACHE_BITS; if (offset >= size) break; page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL); if (IS_ERR(page)) { start = size; goto out; } curr = pptr = kmap(page); if ((size ^ offset) / PAGE_CACHE_BITS) end = pptr + PAGE_CACHE_BITS / 32; else end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32; } dprint(DBG_BITMAP, "bitmap full\n"); start = size; goto out; found: start = offset + (curr - pptr) * 32 + i; if (start >= size) { dprint(DBG_BITMAP, "bitmap full\n"); goto out; } /* do any partial u32 at the start */ len = min(size - start, len); while (1) { n |= mask; if (++i >= 32) break; mask >>= 1; if (!--len || n & mask) goto done; } if (!--len) goto done; *curr++ = cpu_to_be32(n); /* do full u32s */ while (1) { while (curr < end) { n = be32_to_cpu(*curr); if (len < 32) goto last; if (n) { len = 32; goto last; } *curr++ = cpu_to_be32(0xffffffff); len -= 32; } set_page_dirty(page); kunmap(page); offset += PAGE_CACHE_BITS; page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL); if (IS_ERR(page)) { start = size; goto out; } pptr = kmap(page); curr = pptr; end = pptr + PAGE_CACHE_BITS / 32; } last: /* do any partial u32 at end */ mask = 1U << 31; for (i = 0; i < len; i++) { if (n & mask) break; n |= mask; mask >>= 1; } done: *curr = cpu_to_be32(n); set_page_dirty(page); kunmap(page); *max = offset + (curr - pptr) * 32 + i - start; sbi->free_blocks -= *max; sb_mark_dirty(sb); dprint(DBG_BITMAP, "-> %u,%u\n", start, *max); out: mutex_unlock(&sbi->alloc_mutex); return start; }
struct dentry *f2fs_get_parent(struct dentry *child) { struct qstr dotdot = {.len = 2, .name = ".."}; unsigned long ino = f2fs_inode_by_name(child->d_inode, &dotdot); if (!ino) return ERR_PTR(-ENOENT); return d_obtain_alias(f2fs_iget(child->d_inode->i_sb, ino)); } static int __recover_dot_dentries(struct inode *dir, nid_t pino) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct qstr dot = {.len = 1, .name = "."}; struct qstr dotdot = {.len = 2, .name = ".."}; struct f2fs_dir_entry *de; struct page *page; int err = 0; f2fs_lock_op(sbi); de = f2fs_find_entry(dir, &dot, &page, 0); if (de) { f2fs_dentry_kunmap(dir, page); f2fs_put_page(page, 0); } else { err = __f2fs_add_link(dir, &dot, NULL, dir->i_ino, S_IFDIR); if (err) goto out; } de = f2fs_find_entry(dir, &dotdot, &page, 0); if (de) { f2fs_dentry_kunmap(dir, page); f2fs_put_page(page, 0); } else { err = __f2fs_add_link(dir, &dotdot, NULL, pino, S_IFDIR); } out: if (!err) { clear_inode_flag(F2FS_I(dir), FI_INLINE_DOTS); mark_inode_dirty(dir); } f2fs_unlock_op(sbi); return err; } static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct inode *inode = NULL; struct f2fs_dir_entry *de; struct page *page; nid_t ino; int err = 0; if (dentry->d_name.len > F2FS_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); de = f2fs_find_entry(dir, &dentry->d_name, &page, nd ? nd->flags : 0); if (!de) return d_splice_alias(inode, dentry); ino = le32_to_cpu(de->ino); f2fs_dentry_kunmap(dir, page); f2fs_put_page(page, 0); inode = f2fs_iget(dir->i_sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); if (f2fs_has_inline_dots(inode)) { err = __recover_dot_dentries(inode, dir->i_ino); if (err) goto err_out; } return d_splice_alias(inode, dentry); err_out: iget_failed(inode); return ERR_PTR(err); } static int f2fs_unlink(struct inode *dir, struct dentry *dentry) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode = dentry->d_inode; struct f2fs_dir_entry *de; struct page *page; int err = -ENOENT; trace_f2fs_unlink_enter(dir, dentry); f2fs_balance_fs(sbi); de = f2fs_find_entry(dir, &dentry->d_name, &page, 0); if (!de) goto fail; f2fs_lock_op(sbi); err = acquire_orphan_inode(sbi); if (err) { f2fs_unlock_op(sbi); f2fs_dentry_kunmap(dir, page); f2fs_put_page(page, 0); goto fail; } f2fs_delete_entry(de, page, dir, inode); f2fs_unlock_op(sbi); /* In order to evict this inode, we set it dirty */ mark_inode_dirty(inode); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); fail: trace_f2fs_unlink_exit(inode, err); return err; } static void *f2fs_follow_link(struct dentry *dentry, struct nameidata *nd) { struct page *page; page = page_follow_link_light(dentry, nd); if (IS_ERR(page)) return page; /* this is broken symlink case */ if (*nd_get_link(nd) == 0) { kunmap(page); page_cache_release(page); return ERR_PTR(-ENOENT); } return page; } static int f2fs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode; size_t len = strlen(symname); size_t p_len; char *p_str; struct f2fs_str disk_link = FSTR_INIT(NULL, 0); struct f2fs_encrypted_symlink_data *sd = NULL; int err; if (len > dir->i_sb->s_blocksize) return -ENAMETOOLONG; f2fs_balance_fs(sbi); inode = f2fs_new_inode(dir, S_IFLNK | S_IRWXUGO); if (IS_ERR(inode)) return PTR_ERR(inode); if (f2fs_encrypted_inode(inode)) inode->i_op = &f2fs_encrypted_symlink_inode_operations; else inode->i_op = &f2fs_symlink_inode_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); if (err) goto out; f2fs_unlock_op(sbi); alloc_nid_done(sbi, inode->i_ino); if (f2fs_encrypted_inode(dir)) { struct qstr istr = QSTR_INIT(symname, len); err = f2fs_get_encryption_info(inode); if (err) goto err_out; err = f2fs_fname_crypto_alloc_buffer(inode, len, &disk_link); if (err) goto err_out; err = f2fs_fname_usr_to_disk(inode, &istr, &disk_link); if (err < 0) goto err_out; p_len = encrypted_symlink_data_len(disk_link.len) + 1; if (p_len > dir->i_sb->s_blocksize) { err = -ENAMETOOLONG; goto err_out; } sd = kzalloc(p_len, GFP_NOFS); if (!sd) { err = -ENOMEM; goto err_out; } memcpy(sd->encrypted_path, disk_link.name, disk_link.len); sd->len = cpu_to_le16(disk_link.len); p_str = (char *)sd; } else { p_len = len + 1; p_str = (char *)symname; } err = page_symlink(inode, p_str, p_len); err_out: d_instantiate(dentry, inode); unlock_new_inode(inode); /* * Let's flush symlink data in order to avoid broken symlink as much as * possible. Nevertheless, fsyncing is the best way, but there is no * way to get a file descriptor in order to flush that. * * Note that, it needs to do dir->fsync to make this recoverable. * If the symlink path is stored into inline_data, there is no * performance regression. */ if (!err) filemap_write_and_wait_range(inode->i_mapping, 0, p_len - 1); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); kfree(sd); f2fs_fname_crypto_free_buffer(&disk_link); return err; out: handle_failed_inode(inode); return err; } static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, int mode) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode; int err; f2fs_balance_fs(sbi); inode = f2fs_new_inode(dir, S_IFDIR | mode); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = &f2fs_dir_inode_operations; inode->i_fop = &f2fs_dir_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_HIGH_ZERO); set_inode_flag(F2FS_I(inode), FI_INC_LINK); f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); if (err) goto out_fail; f2fs_unlock_op(sbi); alloc_nid_done(sbi, inode->i_ino); d_instantiate(dentry, inode); unlock_new_inode(inode); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); return 0; out_fail: clear_inode_flag(F2FS_I(inode), FI_INC_LINK); handle_failed_inode(inode); return err; } static int f2fs_rmdir(struct inode *dir, struct dentry *dentry) { struct inode *inode = dentry->d_inode; if (f2fs_empty_dir(inode)) return f2fs_unlink(dir, dentry); return -ENOTEMPTY; } static int f2fs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode; int err = 0; if (!new_valid_dev(rdev)) return -EINVAL; f2fs_balance_fs(sbi); inode = f2fs_new_inode(dir, mode); if (IS_ERR(inode)) return PTR_ERR(inode); init_special_inode(inode, inode->i_mode, rdev); inode->i_op = &f2fs_special_inode_operations; f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); if (err) goto out; f2fs_unlock_op(sbi); alloc_nid_done(sbi, inode->i_ino); d_instantiate(dentry, inode); unlock_new_inode(inode); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); return 0; out: handle_failed_inode(inode); return err; } static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct f2fs_sb_info *sbi = F2FS_I_SB(old_dir); struct inode *old_inode = old_dentry->d_inode; struct inode *new_inode = new_dentry->d_inode; struct page *old_dir_page; struct page *old_page, *new_page; struct f2fs_dir_entry *old_dir_entry = NULL; struct f2fs_dir_entry *old_entry; struct f2fs_dir_entry *new_entry; int err = -ENOENT; if ((old_dir != new_dir) && f2fs_encrypted_inode(new_dir) && !f2fs_is_child_context_consistent_with_parent(new_dir, old_inode)) { err = -EPERM; goto out; } f2fs_balance_fs(sbi); old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page, 0); if (!old_entry) goto out; if (S_ISDIR(old_inode->i_mode)) { err = -EIO; old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_page); if (!old_dir_entry) goto out_old; } if (new_inode) { err = -ENOTEMPTY; if (old_dir_entry && !f2fs_empty_dir(new_inode)) goto out_dir; err = -ENOENT; new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_page, 0); if (!new_entry) goto out_dir; f2fs_lock_op(sbi); err = acquire_orphan_inode(sbi); if (err) goto put_out_dir; if (update_dent_inode(old_inode, new_inode, &new_dentry->d_name)) { release_orphan_inode(sbi); goto put_out_dir; } f2fs_set_link(new_dir, new_entry, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME; down_write(&F2FS_I(new_inode)->i_sem); if (old_dir_entry) drop_nlink(new_inode); drop_nlink(new_inode); up_write(&F2FS_I(new_inode)->i_sem); mark_inode_dirty(new_inode); if (!new_inode->i_nlink) add_orphan_inode(sbi, new_inode->i_ino); else release_orphan_inode(sbi); update_inode_page(old_inode); update_inode_page(new_inode); } else { f2fs_lock_op(sbi); err = f2fs_add_link(new_dentry, old_inode); if (err) { f2fs_unlock_op(sbi); goto out_dir; } if (old_dir_entry) { inc_nlink(new_dir); update_inode_page(new_dir); } } down_write(&F2FS_I(old_inode)->i_sem); file_lost_pino(old_inode); if (new_inode && file_enc_name(new_inode)) file_set_enc_name(old_inode); up_write(&F2FS_I(old_inode)->i_sem); old_inode->i_ctime = CURRENT_TIME; mark_inode_dirty(old_inode); f2fs_delete_entry(old_entry, old_page, old_dir, NULL); if (old_dir_entry) { if (old_dir != new_dir) { f2fs_set_link(old_inode, old_dir_entry, old_dir_page, new_dir); update_inode_page(old_inode); } else { f2fs_dentry_kunmap(old_inode, old_dir_page); f2fs_put_page(old_dir_page, 0); } drop_nlink(old_dir); mark_inode_dirty(old_dir); update_inode_page(old_dir); } f2fs_unlock_op(sbi); if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir)) f2fs_sync_fs(sbi->sb, 1); return 0; put_out_dir: f2fs_unlock_op(sbi); f2fs_dentry_kunmap(new_dir, new_page); f2fs_put_page(new_page, 0); out_dir: if (old_dir_entry) { f2fs_dentry_kunmap(old_inode, old_dir_page); f2fs_put_page(old_dir_page, 0); } out_old: f2fs_dentry_kunmap(old_dir, old_page); f2fs_put_page(old_page, 0); out: return err; } #ifdef CONFIG_F2FS_FS_ENCRYPTION static void *f2fs_encrypted_follow_link(struct dentry *dentry, struct nameidata *nd) { struct page *cpage = NULL; char *caddr, *paddr = NULL; struct f2fs_str cstr; struct f2fs_str pstr = FSTR_INIT(NULL, 0); struct inode *inode = dentry->d_inode; struct f2fs_encrypted_symlink_data *sd; loff_t size = min_t(loff_t, i_size_read(inode), PAGE_SIZE - 1); u32 max_size = inode->i_sb->s_blocksize; int res; res = f2fs_get_encryption_info(inode); if (res) return ERR_PTR(res); cpage = read_mapping_page(inode->i_mapping, 0, NULL); if (IS_ERR(cpage)) return cpage; caddr = kmap(cpage); caddr[size] = 0; /* Symlink is encrypted */ sd = (struct f2fs_encrypted_symlink_data *)caddr; cstr.name = sd->encrypted_path; cstr.len = le16_to_cpu(sd->len); /* this is broken symlink case */ if (cstr.name[0] == 0 && cstr.len == 0) { res = -ENOENT; goto errout; } if ((cstr.len + sizeof(struct f2fs_encrypted_symlink_data) - 1) > max_size) { /* Symlink data on the disk is corrupted */ res = -EIO; goto errout; } res = f2fs_fname_crypto_alloc_buffer(inode, cstr.len, &pstr); if (res) goto errout; res = f2fs_fname_disk_to_usr(inode, NULL, &cstr, &pstr); if (res < 0) goto errout; paddr = pstr.name; /* Null-terminate the name */ paddr[res] = '\0'; nd_set_link(nd, paddr); kunmap(cpage); page_cache_release(cpage); return NULL; errout: f2fs_fname_crypto_free_buffer(&pstr); kunmap(cpage); page_cache_release(cpage); return ERR_PTR(res); } void kfree_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) { char *s = nd_get_link(nd); if (!IS_ERR(s)) kfree(s); } const struct inode_operations f2fs_encrypted_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = f2fs_encrypted_follow_link, .put_link = kfree_put_link, .getattr = f2fs_getattr, .setattr = f2fs_setattr, .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = f2fs_listxattr, .removexattr = generic_removexattr, }; #endif const struct inode_operations f2fs_dir_inode_operations = { .create = f2fs_create, .lookup = f2fs_lookup, .link = f2fs_link, .unlink = f2fs_unlink, .symlink = f2fs_symlink, .mkdir = f2fs_mkdir, .rmdir = f2fs_rmdir, .mknod = f2fs_mknod, .rename = f2fs_rename, .getattr = f2fs_getattr, .setattr = f2fs_setattr, .check_acl = f2fs_check_acl, #ifdef CONFIG_F2FS_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = f2fs_listxattr, .removexattr = generic_removexattr, #endif }; const struct inode_operations f2fs_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = f2fs_follow_link, .put_link = page_put_link, .getattr = f2fs_getattr, .setattr = f2fs_setattr, #ifdef CONFIG_F2FS_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = f2fs_listxattr, .removexattr = generic_removexattr, #endif }; const struct inode_operations f2fs_special_inode_operations = { .getattr = f2fs_getattr, .setattr = f2fs_setattr, .check_acl = f2fs_check_acl, #ifdef CONFIG_F2FS_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = f2fs_listxattr, .removexattr = generic_removexattr, #endif };
int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage) { struct address_space *swap_space; struct file *swap_storage; struct page *from_page; struct page *to_page; void *from_virtual; void *to_virtual; int i; int ret = -ENOMEM; BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated); BUG_ON(ttm->caching_state != tt_cached); /* * For user buffers, just unpin the pages, as there should be * vma references. */ if (ttm->page_flags & TTM_PAGE_FLAG_USER) { ttm_tt_free_user_pages(ttm); ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; ttm->swap_storage = NULL; return 0; } if (!persistant_swap_storage) { swap_storage = shmem_file_setup("ttm swap", ttm->num_pages << PAGE_SHIFT, 0); if (unlikely(IS_ERR(swap_storage))) { printk(KERN_ERR "Failed allocating swap storage.\n"); return PTR_ERR(swap_storage); } } else swap_storage = persistant_swap_storage; swap_space = swap_storage->f_path.dentry->d_inode->i_mapping; for (i = 0; i < ttm->num_pages; ++i) { from_page = ttm->pages[i]; if (unlikely(from_page == NULL)) continue; to_page = read_mapping_page(swap_space, i, NULL); if (unlikely(IS_ERR(to_page))) { ret = PTR_ERR(to_page); goto out_err; } preempt_disable(); from_virtual = kmap_atomic(from_page, KM_USER0); to_virtual = kmap_atomic(to_page, KM_USER1); memcpy(to_virtual, from_virtual, PAGE_SIZE); kunmap_atomic(to_virtual, KM_USER1); kunmap_atomic(from_virtual, KM_USER0); preempt_enable(); set_page_dirty(to_page); mark_page_accessed(to_page); page_cache_release(to_page); } ttm_tt_free_alloced_pages(ttm); ttm->swap_storage = swap_storage; ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; if (persistant_swap_storage) ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP; return 0; out_err: if (!persistant_swap_storage) fput(swap_storage); return ret; }
/* * create a vfsmount to be automounted */ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt) { struct afs_super_info *as; struct vfsmount *mnt; struct afs_vnode *vnode; struct page *page; char *devname, *options; bool rwpath = false; int ret; _enter("{%pd}", mntpt); BUG_ON(!d_inode(mntpt)); ret = -ENOMEM; devname = (char *) get_zeroed_page(GFP_KERNEL); if (!devname) goto error_no_devname; options = (char *) get_zeroed_page(GFP_KERNEL); if (!options) goto error_no_options; vnode = AFS_FS_I(d_inode(mntpt)); if (test_bit(AFS_VNODE_PSEUDODIR, &vnode->flags)) { /* if the directory is a pseudo directory, use the d_name */ static const char afs_root_cell[] = ":root.cell."; unsigned size = mntpt->d_name.len; ret = -ENOENT; if (size < 2 || size > AFS_MAXCELLNAME) goto error_no_page; if (mntpt->d_name.name[0] == '.') { devname[0] = '%'; memcpy(devname + 1, mntpt->d_name.name + 1, size - 1); memcpy(devname + size, afs_root_cell, sizeof(afs_root_cell)); rwpath = true; } else { devname[0] = '#'; memcpy(devname + 1, mntpt->d_name.name, size); memcpy(devname + size + 1, afs_root_cell, sizeof(afs_root_cell)); } } else { /* read the contents of the AFS special symlink */ loff_t size = i_size_read(d_inode(mntpt)); char *buf; ret = -EINVAL; if (size > PAGE_SIZE - 1) goto error_no_page; page = read_mapping_page(d_inode(mntpt)->i_mapping, 0, NULL); if (IS_ERR(page)) { ret = PTR_ERR(page); goto error_no_page; } if (PageError(page)) { ret = afs_bad(AFS_FS_I(d_inode(mntpt)), afs_file_error_mntpt); goto error; } buf = kmap_atomic(page); memcpy(devname, buf, size); kunmap_atomic(buf); put_page(page); page = NULL; } /* work out what options we want */ as = AFS_FS_S(mntpt->d_sb); if (as->cell) { memcpy(options, "cell=", 5); strcpy(options + 5, as->cell->name); if ((as->volume && as->volume->type == AFSVL_RWVOL) || rwpath) strcat(options, ",rwpath"); } /* try and do the mount */ _debug("--- attempting mount %s -o %s ---", devname, options); mnt = vfs_submount(mntpt, &afs_fs_type, devname, options); _debug("--- mount result %p ---", mnt); free_page((unsigned long) devname); free_page((unsigned long) options); _leave(" = %p", mnt); return mnt; error: put_page(page); error_no_page: free_page((unsigned long) options); error_no_options: free_page((unsigned long) devname); error_no_devname: _leave(" = %d", ret); return ERR_PTR(ret); }
/* for every page of file: read page, cut part of extent pointing to this page, put data of page tree by tail item */ int extent2tail(struct file * file, struct unix_file_info *uf_info) { int result; struct inode *inode; struct page *page; unsigned long num_pages, i; unsigned long start_page; reiser4_key from; reiser4_key to; unsigned count; __u64 offset; assert("nikita-3362", ea_obtained(uf_info)); inode = unix_file_info_to_inode(uf_info); assert("nikita-3412", !IS_RDONLY(inode)); assert("vs-1649", uf_info->container != UF_CONTAINER_TAILS); assert("", !reiser4_inode_get_flag(inode, REISER4_PART_IN_CONV)); offset = 0; if (reiser4_inode_get_flag(inode, REISER4_PART_MIXED)) { /* * file is marked on disk as there was a conversion which did * not complete due to either crash or some error. Find which * offset tail conversion stopped at */ result = find_start(inode, EXTENT_POINTER_ID, &offset); if (result == -ENOENT) { /* no extent found, everything is converted */ uf_info->container = UF_CONTAINER_TAILS; complete_conversion(inode); return 0; } else if (result != 0) /* some other error */ return result; } reiser4_inode_set_flag(inode, REISER4_PART_IN_CONV); /* number of pages in the file */ num_pages = (inode->i_size + - offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; start_page = offset >> PAGE_CACHE_SHIFT; inode_file_plugin(inode)->key_by_inode(inode, offset, &from); to = from; result = 0; for (i = 0; i < num_pages; i++) { __u64 start_byte; result = reserve_extent2tail_iteration(inode); if (result != 0) break; if (i == 0 && offset == 0) { reiser4_inode_set_flag(inode, REISER4_PART_MIXED); reiser4_update_sd(inode); } page = read_mapping_page(inode->i_mapping, (unsigned)(i + start_page), NULL); if (IS_ERR(page)) { result = PTR_ERR(page); break; } wait_on_page_locked(page); if (!PageUptodate(page)) { page_cache_release(page); result = RETERR(-EIO); break; } /* cut part of file we have read */ start_byte = (__u64) ((i + start_page) << PAGE_CACHE_SHIFT); set_key_offset(&from, start_byte); set_key_offset(&to, start_byte + PAGE_CACHE_SIZE - 1); /* * reiser4_cut_tree_object() returns -E_REPEAT to allow atom * commits during over-long truncates. But * extent->tail conversion should be performed in one * transaction. */ result = reiser4_cut_tree(reiser4_tree_by_inode(inode), &from, &to, inode, 0); if (result) { page_cache_release(page); break; } /* put page data into tree via tail_write */ count = PAGE_CACHE_SIZE; if ((i == (num_pages - 1)) && (inode->i_size & ~PAGE_CACHE_MASK)) /* last page can be incompleted */ count = (inode->i_size & ~PAGE_CACHE_MASK); while (count) { loff_t pos = start_byte; assert("edward-1537", file != NULL && file->f_dentry != NULL); assert("edward-1538", file->f_dentry->d_inode == inode); result = reiser4_write_tail(file, inode, (char __user *)kmap(page), count, &pos); reiser4_free_file_fsdata(file); if (result <= 0) { warning("", "reiser4_write_tail failed"); page_cache_release(page); reiser4_inode_clr_flag(inode, REISER4_PART_IN_CONV); return result; } count -= result; } /* release page */ lock_page(page); /* page is already detached from jnode and mapping. */ assert("vs-1086", page->mapping == NULL); assert("nikita-2690", (!PagePrivate(page) && jprivate(page) == 0)); /* waiting for writeback completion with page lock held is * perfectly valid. */ wait_on_page_writeback(page); reiser4_drop_page(page); /* release reference taken by read_cache_page() above */ page_cache_release(page); drop_exclusive_access(uf_info); /* * throttle the conversion. * FIXME-EDWARD: Calculate and pass the precise number * of pages that was dirtied */ reiser4_throttle_write(inode, 1); get_exclusive_access(uf_info); /* * nobody is allowed to complete conversion but a process which * started it */ assert("", reiser4_inode_get_flag(inode, REISER4_PART_MIXED)); } reiser4_inode_clr_flag(inode, REISER4_PART_IN_CONV); if (i == num_pages) { /* file is converted to formatted items */ assert("vs-1698", reiser4_inode_get_flag(inode, REISER4_PART_MIXED)); assert("vs-1260", inode_has_no_jnodes(reiser4_inode_data(inode))); uf_info->container = UF_CONTAINER_TAILS; complete_conversion(inode); return 0; } /* * conversion is not complete. Inode was already marked as * REISER4_PART_MIXED and stat-data were updated at the first * iteration of the loop above. */ warning("nikita-2282", "Partial conversion of %llu: %lu of %lu: %i", (unsigned long long)get_inode_oid(inode), i, num_pages, result); /* this flag should be cleared, otherwise get_exclusive_access_careful() will fall into infinite loop */ assert("edward-1550", !reiser4_inode_get_flag(inode, REISER4_PART_IN_CONV)); return result; }
static int exfat_parse_upcase_table(struct inode *inode, u32 checksum) { struct super_block *sb = inode->i_sb; struct exfat_sb_info *sbi = EXFAT_SB(sb); struct address_space *mapping = inode->i_mapping; pgoff_t index; struct upcase *upcase; u16 *tbl; loff_t left; u32 csum; u16 uc, src_start, src_len, tbl_pos, tbl_end; int i, err, size, overflow, is_hole, range_cnt; tbl = exfat_upcase_table_alloc(inode->i_size); if (!tbl) return -ENOMEM; /* FIXME: readahead all data of upper-case table */ csum = 0; uc = src_start = src_len = 0; overflow = tbl_pos = is_hole = range_cnt = 0; tbl_end = (inode->i_size / sizeof(u16)) - 1; index = 0; left = inode->i_size; while (left > 0 && !overflow) { struct page *page; void *kaddr; page = read_mapping_page(mapping, index, NULL); if (IS_ERR(page)) { err = PTR_ERR(page); goto error; } kaddr = kmap_atomic(page, KM_USER0); size = min_t(loff_t, left, PAGE_CACHE_SIZE); for (i = 0; i < size; i += sizeof(u16)) { uc = le16_to_cpu(*(__le16 *)(kaddr + i)); left -= sizeof(u16); if (is_hole) { u16 skiplen = uc; u16 end_pos = tbl_end - range_cnt * 2; if (src_start + src_len + skiplen >= MAX_CODE) { overflow = 1; break; } /* * Save range temporarily at end of table. * UPCASE_HOLE and skiplen aren't saved into * tbl, so there is space for those. */ tbl[end_pos] = src_start; tbl[end_pos - 1] = src_len; /* If it's bogus empty range, ignore it. */ if (src_len) range_cnt++; is_hole = 0; src_start += src_len + skiplen; src_len = 0; } else if (uc == UPCASE_HOLE && left >= sizeof(u16)) is_hole = 1; else { tbl[tbl_pos] = uc; tbl_pos++; src_len++; } } csum = exfat_checksum32(csum, kaddr, size); kunmap_atomic(kaddr, KM_USER0); page_cache_release(page); index++; } err = 0; if (overflow) { exfat_fs_panic(sb, "invalid upper-case table"); goto error; } else if (checksum != csum) { exfat_fs_panic(sb, "checksum failed for upper-case table" " (0x%08x != 0x%08x)", checksum, csum); goto error; } else if (!range_cnt && !src_len) { exfat_fs_panic(sb, "upper-case table is empty"); goto error; } err = -ENOMEM; upcase = exfat_upcase_alloc(range_cnt + !!src_len); if (!upcase) goto error; upcase->table = tbl; upcase->table_size = inode->i_size; upcase->nr_range = range_cnt + !!src_len; tbl_pos = 0; for (i = 0; i < range_cnt; i++) { u16 end_pos = tbl_end - i * 2; u16 start = upcase->table[end_pos]; u16 len = upcase->table[end_pos - 1]; upcase->ranges[i].tbl = tbl + tbl_pos; upcase->ranges[i].start = start; upcase->ranges[i].end = start + len - 1; tbl_pos += len; } if (src_len) { upcase->ranges[i].tbl = tbl + tbl_pos; upcase->ranges[i].start = src_start; upcase->ranges[i].end = src_start + src_len - 1; } sbi->upcase = upcase; return 0; error: exfat_upcase_table_free(tbl, inode->i_size); return err; }