struct inode *hmfs_make_dentry(struct inode *dir, struct dentry *dentry, umode_t mode) { struct super_block *sb = dir->i_sb; struct hmfs_sb_info *sbi = HMFS_SB(sb); struct inode *inode; int err = 0, ilock; ilock = mutex_lock_op(sbi); inode = hmfs_new_inode(dir, mode); if (IS_ERR(inode)) { mutex_unlock_op(sbi, ilock); return inode; } err = hmfs_add_link(dentry, inode); mutex_unlock_op(sbi, ilock); if (err) goto out; return inode; out: clear_nlink(inode); unlock_new_inode(inode); make_bad_inode(inode); iput(inode); alloc_nid_failed(sbi, inode->i_ino); return ERR_PTR(err); }
int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc) { struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); int ret, ilock; if (inode->i_ino == F2FS_NODE_INO(sbi) || inode->i_ino == F2FS_META_INO(sbi)) return 0; if (!is_inode_flag_set(F2FS_I(inode), FI_DIRTY_INODE)) return 0; /* * We need to lock here to prevent from producing dirty node pages * during the urgent cleaning time when runing out of free sections. */ ilock = mutex_lock_op(sbi); ret = update_inode_page(inode); mutex_unlock_op(sbi, ilock); if (wbc) f2fs_balance_fs(sbi); return ret; }
int hmfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { struct inode *inode; struct hmfs_sb_info *sbi = HMFS_SB(dir->i_sb); void *data_blk; size_t symlen = strlen(symname) + 1; int ilock; if (symlen > HMFS_MAX_SYMLINK_NAME_LEN) return -ENAMETOOLONG; ilock = mutex_lock_op(sbi); inode = hmfs_make_dentry(dir, dentry, S_IFLNK | S_IRWXUGO); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = &hmfs_symlink_inode_operations; inode->i_mapping->a_ops = &hmfs_dblock_aops; data_blk = alloc_new_data_block(inode, 0); if (IS_ERR(data_blk)) return PTR_ERR(data_blk); hmfs_memcpy(data_blk, (void *)symname, symlen); mutex_unlock_op(sbi, ilock); d_instantiate(dentry, inode); unlock_new_inode(inode); return 0; }
/* * Called at the last iput() if i_nlink is zero */ void f2fs_evict_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); int ilock; trace_f2fs_evict_inode(inode); truncate_inode_pages(&inode->i_data, 0); if (inode->i_ino == F2FS_NODE_INO(sbi) || inode->i_ino == F2FS_META_INO(sbi)) goto no_delete; BUG_ON(atomic_read(&F2FS_I(inode)->dirty_dents)); remove_dirty_dir_inode(inode); if (inode->i_nlink || is_bad_inode(inode)) goto no_delete; set_inode_flag(F2FS_I(inode), FI_NO_ALLOC); i_size_write(inode, 0); if (F2FS_HAS_BLOCKS(inode)) f2fs_truncate(inode); ilock = mutex_lock_op(sbi); remove_inode_page(inode); mutex_unlock_op(sbi, ilock); no_delete: end_writeback(inode); }
ssize_t hmfs_xip_file_write(struct file * filp, const char __user * buf, size_t len, loff_t * ppos) { struct address_space *mapping = filp->f_mapping; struct inode *inode = filp->f_inode; struct hmfs_sb_info *sbi = HMFS_SB(inode->i_sb); size_t count = 0, ret; loff_t pos; int ilock; mutex_lock(&inode->i_mutex); if (!access_ok(VERIFY_READ, buf, len)) { ret = -EFAULT; goto out_up; } pos = *ppos; count = len; current->backing_dev_info = mapping->backing_dev_info; ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode)); if (ret) goto out_backing; if (count == 0) goto out_backing; ret = file_remove_suid(filp); if (ret) goto out_backing; ret = file_update_time(filp); if (ret) goto out_backing; inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC; ilock = mutex_lock_op(sbi); ret = __hmfs_xip_file_write(filp, buf, count, pos, ppos); mutex_unlock_op(sbi, ilock); mark_inode_dirty(inode); out_backing: current->backing_dev_info = NULL; out_up: mutex_unlock(&inode->i_mutex); return ret; }
static void fill_zero(struct inode *inode, pgoff_t index, loff_t start, loff_t len) { struct hmfs_sb_info *sbi = HMFS_SB(inode->i_sb); int ilock; if (!len) return; ilock = mutex_lock_op(sbi); alloc_new_data_partial_block(inode, index, start, start + len, true); mutex_unlock_op(sbi, ilock); }
static int hmfs_setxattr(struct inode *inode, int index, const char *name, const void *value, size_t size, int flags) { struct hmfs_sb_info *sbi = HMFS_I_SB(inode); int err, ilock; ilock = mutex_lock_op(sbi); inode_write_lock(inode); err = __hmfs_setxattr(inode, index, name, value, size, flags); inode_write_unlock(inode); mutex_unlock_op(sbi, ilock); return err; }
static int expand_inode_data(struct inode *inode, loff_t offset, loff_t len, int mode) { pgoff_t index, pg_start, pg_end; loff_t new_size = i_size_read(inode); loff_t off_start, off_end; struct dnode_of_data dn; int ret, ilock; struct hmfs_sb_info *sbi = HMFS_SB(inode->i_sb); ret = inode_newsize_ok(inode, (len + offset)); if (ret) return ret; pg_start = ((u64) offset) >> HMFS_PAGE_SIZE_BITS; pg_end = ((u64) offset + len) >> HMFS_PAGE_SIZE_BITS; off_start = offset & (HMFS_PAGE_SIZE - 1); off_end = (offset + len) & (HMFS_PAGE_SIZE - 1); for (index = pg_start; index <= pg_end; index++) { ilock = mutex_lock_op(sbi); set_new_dnode(&dn, inode, NULL, NULL, 0); ret = get_dnode_of_data(&dn, index, ALLOC_NODE); mutex_unlock_op(sbi, ilock); if (ret) { break; } if (pg_start == pg_end) new_size = offset + len; else if (index == pg_start && off_start) new_size = (index + 1) << HMFS_PAGE_SIZE_BITS; else if (index == pg_end) new_size = (index << HMFS_PAGE_SIZE_BITS) + off_end; else new_size += HMFS_PAGE_SIZE; } if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size) { mark_size_dirty(inode, new_size); } return ret; }
int hmfs_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = dentry->d_inode; struct hmfs_inode_info *fi = HMFS_I(inode); struct posix_acl *acl; int err = 0, ilock; struct hmfs_sb_info *sbi = HMFS_I_SB(inode); err = inode_change_ok(inode, attr); if (err) return err; ilock = mutex_lock_op(sbi); inode_write_lock(inode); if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size != i_size_read(inode)) { truncate_setsize(inode, attr->ia_size); hmfs_truncate(inode); } __setattr_copy(inode, attr); if (attr->ia_valid & ATTR_MODE) { acl = hmfs_get_acl(inode, ACL_TYPE_ACCESS); if (!acl || IS_ERR(acl)) { err = PTR_ERR(acl); goto out; } err = posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode); err = hmfs_set_acl(inode, acl, ACL_TYPE_ACCESS); if (err || is_inode_flag_set(fi, FI_ACL_MODE)) { inode->i_mode = fi->i_acl_mode; clear_inode_flag(fi, FI_ACL_MODE); } } out: inode_write_unlock(inode); mutex_unlock_op(sbi, ilock); mark_inode_dirty(inode); return err; }
static int punch_hole(struct inode *inode, loff_t offset, loff_t len, int mode) { pgoff_t pg_start, pg_end; loff_t off_start, off_end; loff_t blk_start, blk_end; int ret = 0, ilock; struct hmfs_sb_info *sbi = HMFS_SB(inode->i_sb); pg_start = ((u64) offset) >> HMFS_PAGE_SIZE_BITS; pg_end = ((u64) offset + len) >> HMFS_PAGE_SIZE_BITS; off_start = offset & (HMFS_PAGE_SIZE - 1); off_end = (offset + len) & (HMFS_PAGE_SIZE - 1); if (pg_start == pg_end) { fill_zero(inode, pg_start, off_start, off_end - off_start); } else { if (off_start) fill_zero(inode, pg_start++, off_start, HMFS_PAGE_SIZE - off_start); if (off_end) fill_zero(inode, pg_end, 0, off_end); if (pg_start < pg_end) { blk_start = pg_start << HMFS_PAGE_SIZE_BITS; blk_end = pg_end << HMFS_PAGE_SIZE_BITS; //FIXME: need this in mmap? //truncate_inode_pages_range(inode,blk_start,blk_end); ilock = mutex_lock_op(sbi); ret = truncate_hole(inode, pg_start, pg_end); mutex_unlock_op(sbi, ilock); } } if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) <= (offset + len)) { mark_size_dirty(inode, offset + len); } return ret; }
void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, struct page *page, struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); mutex_lock_op(sbi, DENTRY_OPS); lock_page(page); wait_on_page_writeback(page); de->ino = cpu_to_le32(inode->i_ino); set_de_type(de, inode); kunmap(page); set_page_dirty(page); dir->i_mtime = dir->i_ctime = CURRENT_TIME; mark_inode_dirty(dir); /* update parent inode number before releasing dentry page */ F2FS_I(inode)->i_pino = dir->i_ino; f2fs_put_page(page, 1); mutex_unlock_op(sbi, DENTRY_OPS); }
static int hmfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = old_dentry->d_inode; struct hmfs_sb_info *sbi = HMFS_I_SB(inode); int err, ilock; inode->i_ctime = CURRENT_TIME; ihold(inode); set_inode_flag(HMFS_I(inode), FI_INC_LINK); ilock = mutex_lock_op(sbi); err = hmfs_add_link(dentry, inode); mutex_unlock_op(sbi, ilock); if (err) goto out; d_instantiate(dentry, inode); return 0; out: clear_inode_flag(HMFS_I(inode), FI_INC_LINK); iput(inode); return err; }
static int truncate_blocks(struct inode *inode, u64 from) { struct dnode_of_data dn; struct hmfs_sb_info *sbi = HMFS_SB(inode->i_sb); int count, err; u64 free_from; int ilock; free_from = (from + HMFS_PAGE_SIZE - 1) >> HMFS_PAGE_SIZE_BITS; ilock = mutex_lock_op(sbi); set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE); if (err) { goto free_next; } if (!dn.level) count = NORMAL_ADDRS_PER_INODE; else count = ADDRS_PER_BLOCK; count -= dn.ofs_in_node; BUG_ON(count < 0); if (dn.ofs_in_node || !dn.level) { truncate_data_blocks_range(&dn, count); free_from += count; } free_next:err = truncate_inode_blocks(inode, free_from); truncate_partial_data_page(inode, from); mutex_unlock_op(sbi, ilock); return err; }
static int hmfs_unlink(struct inode *dir, struct dentry *dentry) { struct super_block *sb = dir->i_sb; struct hmfs_sb_info *sbi = HMFS_SB(sb); struct inode *inode = dentry->d_inode; struct hmfs_dir_entry *de; struct hmfs_dentry_block *res_blk; int err = -ENOENT; int bidx, ofs_in_blk, ilock; de = hmfs_find_entry(dir, &dentry->d_name, &bidx, &ofs_in_blk); if (!de) goto fail; err = check_orphan_space(sbi); if (err) goto fail; ilock = mutex_lock_op(sbi); inode_write_lock(dir); res_blk = get_dentry_block_for_write(dir, bidx); if (IS_ERR(res_blk)) { err = PTR_ERR(res_blk); mutex_unlock_op(sbi, ilock); goto fail; } de = &res_blk->dentry[ofs_in_blk]; hmfs_delete_entry(de, res_blk, dir, inode, bidx); inode_write_unlock(dir); mutex_unlock_op(sbi, ilock); mark_inode_dirty(dir); fail: return err; }
/* * It only removes the dentry from the dentry page,corresponding name * entry in name page does not need to be touched during deletion. */ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, struct inode *inode) { struct f2fs_dentry_block *dentry_blk; unsigned int bit_pos; struct address_space *mapping = page->mapping; struct inode *dir = mapping->host; struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len)); void *kaddr = page_address(page); int i; mutex_lock_op(sbi, DENTRY_OPS); lock_page(page); wait_on_page_writeback(page); dentry_blk = (struct f2fs_dentry_block *)kaddr; bit_pos = dentry - (struct f2fs_dir_entry *)dentry_blk->dentry; for (i = 0; i < slots; i++) test_and_clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap); /* Let's check and deallocate this dentry page */ bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap, NR_DENTRY_IN_BLOCK, 0); kunmap(page); /* kunmap - pair of f2fs_find_entry */ set_page_dirty(page); dir->i_ctime = dir->i_mtime = CURRENT_TIME; if (inode && S_ISDIR(inode->i_mode)) { drop_nlink(dir); f2fs_write_inode(dir, NULL); } else { mark_inode_dirty(dir); } if (inode) { inode->i_ctime = CURRENT_TIME; drop_nlink(inode); if (S_ISDIR(inode->i_mode)) { drop_nlink(inode); i_size_write(inode, 0); } f2fs_write_inode(inode, NULL); if (inode->i_nlink == 0) add_orphan_inode(sbi, inode->i_ino); } if (bit_pos == NR_DENTRY_IN_BLOCK) { truncate_hole(dir, page->index, page->index + 1); clear_page_dirty_for_io(page); ClearPageUptodate(page); dec_page_count(sbi, F2FS_DIRTY_DENTS); inode_dec_dirty_dents(dir); } f2fs_put_page(page, 1); mutex_unlock_op(sbi, DENTRY_OPS); }
int f2fs_setxattr(struct inode *inode, int name_index, const char *name, const void *value, size_t value_len, struct page *ipage) { struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_xattr_header *header = NULL; struct f2fs_xattr_entry *here, *last; struct page *page; void *base_addr; int error, found, free, newsize; size_t name_len; char *pval; int ilock; if (name == NULL) return -EINVAL; if (value == NULL) value_len = 0; name_len = strlen(name); if (name_len > F2FS_NAME_LEN || value_len > MAX_VALUE_LEN) return -ERANGE; f2fs_balance_fs(sbi); ilock = mutex_lock_op(sbi); if (!fi->i_xattr_nid) { /* Allocate new attribute block */ struct dnode_of_data dn; if (!alloc_nid(sbi, &fi->i_xattr_nid)) { error = -ENOSPC; goto exit; } set_new_dnode(&dn, inode, NULL, NULL, fi->i_xattr_nid); mark_inode_dirty(inode); page = new_node_page(&dn, XATTR_NODE_OFFSET, ipage); if (IS_ERR(page)) { alloc_nid_failed(sbi, fi->i_xattr_nid); fi->i_xattr_nid = 0; error = PTR_ERR(page); goto exit; } alloc_nid_done(sbi, fi->i_xattr_nid); base_addr = page_address(page); header = XATTR_HDR(base_addr); header->h_magic = cpu_to_le32(F2FS_XATTR_MAGIC); header->h_refcount = cpu_to_le32(1); } else { /* The inode already has an extended attribute block. */ page = get_node_page(sbi, fi->i_xattr_nid); if (IS_ERR(page)) { error = PTR_ERR(page); goto exit; } base_addr = page_address(page); header = XATTR_HDR(base_addr); } if (le32_to_cpu(header->h_magic) != F2FS_XATTR_MAGIC) { error = -EIO; goto cleanup; } /* find entry with wanted name. */ found = 0; list_for_each_xattr(here, base_addr) { if (here->e_name_index != name_index) continue; if (here->e_name_len != name_len) continue; if (!memcmp(here->e_name, name, name_len)) { found = 1; break; } } last = here; while (!IS_XATTR_LAST_ENTRY(last)) last = XATTR_NEXT_ENTRY(last); newsize = XATTR_ALIGN(sizeof(struct f2fs_xattr_entry) + name_len + value_len); /* 1. Check space */ if (value) { /* If value is NULL, it is remove operation. * In case of update operation, we caculate free. */ free = MIN_OFFSET - ((char *)last - (char *)header); if (found) free = free - ENTRY_SIZE(here); if (free < newsize) { error = -ENOSPC; goto cleanup; } } /* 2. Remove old entry */ if (found) { /* If entry is found, remove old entry. * If not found, remove operation is not needed. */ struct f2fs_xattr_entry *next = XATTR_NEXT_ENTRY(here); int oldsize = ENTRY_SIZE(here); memmove(here, next, (char *)last - (char *)next); last = (struct f2fs_xattr_entry *)((char *)last - oldsize); memset(last, 0, oldsize); } /* 3. Write new entry */ if (value) { /* Before we come here, old entry is removed. * We just write new entry. */ memset(last, 0, newsize); last->e_name_index = name_index; last->e_name_len = name_len; memcpy(last->e_name, name, name_len); pval = last->e_name + name_len; memcpy(pval, value, value_len); last->e_value_size = cpu_to_le16(value_len); } set_page_dirty(page); f2fs_put_page(page, 1); if (is_inode_flag_set(fi, FI_ACL_MODE)) { inode->i_mode = fi->i_acl_mode; inode->i_ctime = CURRENT_TIME; clear_inode_flag(fi, FI_ACL_MODE); } if (ipage) update_inode(inode, ipage); else update_inode_page(inode); mutex_unlock_op(sbi, ilock); return 0; cleanup: f2fs_put_page(page, 1); exit: mutex_unlock_op(sbi, ilock); return error; }
int f2fs_setxattr(struct inode *inode, int name_index, const char *name, const void *value, size_t value_len, struct page *ipage) { struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_xattr_entry *here, *last; void *base_addr; int found, newsize; size_t name_len; int ilock; __u32 new_hsize; int error = -ENOMEM; if (name == NULL) return -EINVAL; if (value == NULL) value_len = 0; name_len = strlen(name); if (name_len > F2FS_NAME_LEN || value_len > MAX_VALUE_LEN(inode)) return -ERANGE; f2fs_balance_fs(sbi); ilock = mutex_lock_op(sbi); base_addr = read_all_xattrs(inode, ipage); if (!base_addr) goto exit; /* find entry with wanted name. */ here = __find_xattr(base_addr, name_index, name_len, name); found = IS_XATTR_LAST_ENTRY(here) ? 0 : 1; last = here; while (!IS_XATTR_LAST_ENTRY(last)) last = XATTR_NEXT_ENTRY(last); newsize = XATTR_ALIGN(sizeof(struct f2fs_xattr_entry) + name_len + value_len); /* 1. Check space */ if (value) { int free; /* * If value is NULL, it is remove operation. * In case of update operation, we caculate free. */ free = MIN_OFFSET(inode) - ((char *)last - (char *)base_addr); if (found) free = free - ENTRY_SIZE(here); if (free < newsize) { error = -ENOSPC; goto exit; } } /* 2. Remove old entry */ if (found) { /* * If entry is found, remove old entry. * If not found, remove operation is not needed. */ struct f2fs_xattr_entry *next = XATTR_NEXT_ENTRY(here); int oldsize = ENTRY_SIZE(here); memmove(here, next, (char *)last - (char *)next); last = (struct f2fs_xattr_entry *)((char *)last - oldsize); memset(last, 0, oldsize); } new_hsize = (char *)last - (char *)base_addr; /* 3. Write new entry */ if (value) { char *pval; /* * Before we come here, old entry is removed. * We just write new entry. */ memset(last, 0, newsize); last->e_name_index = name_index; last->e_name_len = name_len; memcpy(last->e_name, name, name_len); pval = last->e_name + name_len; memcpy(pval, value, value_len); last->e_value_size = cpu_to_le16(value_len); new_hsize += newsize; } error = write_all_xattrs(inode, new_hsize, base_addr, ipage); if (error) goto exit; if (is_inode_flag_set(fi, FI_ACL_MODE)) { inode->i_mode = fi->i_acl_mode; inode->i_ctime = CURRENT_TIME; clear_inode_flag(fi, FI_ACL_MODE); } if (ipage) update_inode(inode, ipage); else update_inode_page(inode); exit: mutex_unlock_op(sbi, ilock); kzfree(base_addr); return error; }
static int hmfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct super_block *sb = old_dir->i_sb; struct hmfs_sb_info *sbi = HMFS_SB(sb); struct inode *old_inode = old_dentry->d_inode; struct inode *new_inode = new_dentry->d_inode; struct hmfs_dentry_block *old_dentry_blk, *new_dentry_blk; struct hmfs_dir_entry *old_dir_entry = NULL, *old_entry, *new_entry; int err = -ENOENT, ilock; int new_ofs, new_bidx, old_bidx, old_ofs; old_entry = hmfs_find_entry(old_dir, &old_dentry->d_name, &old_bidx, &old_ofs); if (!old_entry) goto out; ilock = mutex_lock_op(sbi); if (S_ISDIR(old_inode->i_mode)) { err = -EIO; // .. in hmfs_dentry_block of old_inode old_dir_entry = hmfs_parent_dir(old_inode); if (!old_dir_entry) goto out_k; } inode_write_lock(new_dir); if (new_inode) { err = -ENOTEMPTY; if (old_dir_entry && !hmfs_empty_dir(new_inode)) goto out_k; err = -ENOENT; new_entry = hmfs_find_entry(new_dir, &new_dentry->d_name, &new_bidx, &new_ofs); if (!new_entry) goto out_k; new_dentry_blk = get_dentry_block_for_write(new_dir, new_bidx); if (IS_ERR(new_dentry_blk)) { err = PTR_ERR(new_dentry_blk); goto out_k; } new_entry = &new_dentry_blk->dentry[new_ofs]; hmfs_set_link(new_dir, new_entry, old_inode); new_inode->i_ctime = CURRENT_TIME; if (old_dir_entry) drop_nlink(new_inode); drop_nlink(new_inode); if (!new_inode->i_nlink) { err = check_orphan_space(sbi); if (err) goto out_k; add_orphan_inode(sbi, new_inode->i_ino); } mark_inode_dirty(new_inode); } else { err = hmfs_add_link(new_dentry, old_inode); if (err) goto out_k; if (old_dir_entry) { inc_nlink(new_dir); mark_inode_dirty(new_dir); } } old_inode->i_ctime = CURRENT_TIME; mark_inode_dirty(old_inode); if (old_dir != new_dir) inode_write_lock(old_dir); old_dentry_blk = get_dentry_block_for_write(old_dir, old_bidx); if (IS_ERR(old_dentry_blk)) { err = PTR_ERR(old_dentry_blk); goto unlock_old; } old_entry = &old_dentry_blk->dentry[old_ofs]; hmfs_delete_entry(old_entry, old_dentry_blk, old_dir, NULL, old_bidx); if (old_dir_entry) { if (old_dir != new_dir) { hmfs_set_link(old_inode, old_dir_entry, new_dir); } drop_nlink(old_dir); mark_inode_dirty(old_dir); } unlock_old: if (old_dir != new_dir) inode_write_unlock(old_dir); out_k: inode_write_unlock(new_dir); mutex_unlock_op(sbi, ilock); out: return err; }
int __f2fs_add_link(struct inode *dir, const struct qstr *name, struct inode *inode) { unsigned int bit_pos; unsigned int level; unsigned int current_depth; unsigned long bidx, block; f2fs_hash_t dentry_hash; struct f2fs_dir_entry *de; unsigned int nbucket, nblock; struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); size_t namelen = name->len; struct page *dentry_page = NULL; struct f2fs_dentry_block *dentry_blk = NULL; int slots = GET_DENTRY_SLOTS(namelen); int err = 0; int i; dentry_hash = f2fs_dentry_hash(name->name, name->len); level = 0; current_depth = F2FS_I(dir)->i_current_depth; if (F2FS_I(dir)->chash == dentry_hash) { level = F2FS_I(dir)->clevel; F2FS_I(dir)->chash = 0; } start: if (current_depth == MAX_DIR_HASH_DEPTH) return -ENOSPC; /* Increase the depth, if required */ if (level == current_depth) ++current_depth; nbucket = dir_buckets(level); nblock = bucket_blocks(level); bidx = dir_block_index(level, (le32_to_cpu(dentry_hash) % nbucket)); for (block = bidx; block <= (bidx + nblock - 1); block++) { mutex_lock_op(sbi, DENTRY_OPS); dentry_page = get_new_data_page(dir, block, true); if (IS_ERR(dentry_page)) { mutex_unlock_op(sbi, DENTRY_OPS); return PTR_ERR(dentry_page); } dentry_blk = kmap(dentry_page); bit_pos = room_for_filename(dentry_blk, slots); if (bit_pos < NR_DENTRY_IN_BLOCK) goto add_dentry; kunmap(dentry_page); f2fs_put_page(dentry_page, 1); mutex_unlock_op(sbi, DENTRY_OPS); } /* Move to next level to find the empty slot for new dentry */ ++level; goto start; add_dentry: err = init_inode_metadata(inode, dir, name); if (err) goto fail; wait_on_page_writeback(dentry_page); de = &dentry_blk->dentry[bit_pos]; de->hash_code = dentry_hash; de->name_len = cpu_to_le16(namelen); memcpy(dentry_blk->filename[bit_pos], name->name, name->len); de->ino = cpu_to_le32(inode->i_ino); set_de_type(de, inode); for (i = 0; i < slots; i++) test_and_set_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap); set_page_dirty(dentry_page); update_parent_metadata(dir, inode, current_depth); /* update parent inode number before releasing dentry page */ F2FS_I(inode)->i_pino = dir->i_ino; fail: kunmap(dentry_page); f2fs_put_page(dentry_page, 1); mutex_unlock_op(sbi, DENTRY_OPS); return err; }
static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page = vmf->page; struct inode *inode = file_inode(vma->vm_file); struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); block_t old_blk_addr; struct dnode_of_data dn; int err, ilock; f2fs_balance_fs(sbi); /* F2FS backport: We replace in old kernels sb_start_pagefault(inode->i_sb) with vfs_check_frozen() * and remove the original sb_end_pagefault(inode->i_sb) after the out label * * The introduction of sb_{start,end}_pagefault() was made post-3.2 kernels by Jan Kara * and merged in commit a0e881b7c189fa2bd76c024dbff91e79511c971d. * Discussed at https://lkml.org/lkml/2012/3/5/278 * * - Alex */ vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); /* block allocation */ ilock = mutex_lock_op(sbi); set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, page->index, ALLOC_NODE); if (err) { mutex_unlock_op(sbi, ilock); goto out; } old_blk_addr = dn.data_blkaddr; if (old_blk_addr == NULL_ADDR) { err = reserve_new_block(&dn); if (err) { f2fs_put_dnode(&dn); mutex_unlock_op(sbi, ilock); goto out; } } f2fs_put_dnode(&dn); mutex_unlock_op(sbi, ilock); file_update_time(vma->vm_file); lock_page(page); if (page->mapping != inode->i_mapping || page_offset(page) > i_size_read(inode) || !PageUptodate(page)) { unlock_page(page); err = -EFAULT; goto out; } /* * check to see if the page is mapped already (no holes) */ if (PageMappedToDisk(page)) goto mapped; /* page is wholly or partially inside EOF */ if (((page->index + 1) << PAGE_CACHE_SHIFT) > i_size_read(inode)) { unsigned offset; offset = i_size_read(inode) & ~PAGE_CACHE_MASK; zero_user_segment(page, offset, PAGE_CACHE_SIZE); } set_page_dirty(page); SetPageUptodate(page); mapped: /* fill the page */ wait_on_page_writeback(page); out: return block_page_mkwrite_return(err); }