int hmfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { struct inode *inode; struct hmfs_sb_info *sbi = HMFS_SB(dir->i_sb); void *data_blk; size_t symlen = strlen(symname) + 1; int ilock; if (symlen > HMFS_MAX_SYMLINK_NAME_LEN) return -ENAMETOOLONG; ilock = mutex_lock_op(sbi); inode = hmfs_make_dentry(dir, dentry, S_IFLNK | S_IRWXUGO); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = &hmfs_symlink_inode_operations; inode->i_mapping->a_ops = &hmfs_dblock_aops; data_blk = alloc_new_data_block(inode, 0); if (IS_ERR(data_blk)) return PTR_ERR(data_blk); hmfs_memcpy(data_blk, (void *)symname, symlen); mutex_unlock_op(sbi, ilock); d_instantiate(dentry, inode); unlock_new_inode(inode); return 0; }
struct inode *hmfs_make_dentry(struct inode *dir, struct dentry *dentry, umode_t mode) { struct super_block *sb = dir->i_sb; struct hmfs_sb_info *sbi = HMFS_SB(sb); struct inode *inode; int err = 0, ilock; ilock = mutex_lock_op(sbi); inode = hmfs_new_inode(dir, mode); if (IS_ERR(inode)) { mutex_unlock_op(sbi, ilock); return inode; } err = hmfs_add_link(dentry, inode); mutex_unlock_op(sbi, ilock); if (err) goto out; return inode; out: clear_nlink(inode); unlock_new_inode(inode); make_bad_inode(inode); iput(inode); alloc_nid_failed(sbi, inode->i_ino); return ERR_PTR(err); }
static size_t hmfs_xattr_generic_list(struct dentry *dentry, char *list, size_t list_size, const char *name, size_t len, int flags) { struct hmfs_sb_info *sbi = HMFS_SB(dentry->d_sb); int total_len, prefix_len; const struct xattr_handler *handler; switch (flags) { case HMFS_XATTR_INDEX_USER: if (!test_opt(sbi, XATTR_USER)) return -EOPNOTSUPP; break; case HMFS_XATTR_INDEX_TRUSTED: if (!capable(CAP_SYS_ADMIN)) return -EPERM; break; case HMFS_XATTR_INDEX_SECURITY: break; default: return -EINVAL; } handler = hmfs_xattr_handler_map[flags]; prefix_len = strlen(handler->prefix); total_len = prefix_len + len + 1; if (list && total_len <= list_size) { memcpy(list, handler->prefix, prefix_len); memcpy(list + prefix_len, name, len); list[prefix_len + len] = '\0'; } return total_len; }
ssize_t hmfs_xip_file_write(struct file * filp, const char __user * buf, size_t len, loff_t * ppos) { struct address_space *mapping = filp->f_mapping; struct inode *inode = filp->f_inode; struct hmfs_sb_info *sbi = HMFS_SB(inode->i_sb); size_t count = 0, ret; loff_t pos; int ilock; mutex_lock(&inode->i_mutex); if (!access_ok(VERIFY_READ, buf, len)) { ret = -EFAULT; goto out_up; } pos = *ppos; count = len; current->backing_dev_info = mapping->backing_dev_info; ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode)); if (ret) goto out_backing; if (count == 0) goto out_backing; ret = file_remove_suid(filp); if (ret) goto out_backing; ret = file_update_time(filp); if (ret) goto out_backing; inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC; ilock = mutex_lock_op(sbi); ret = __hmfs_xip_file_write(filp, buf, count, pos, ppos); mutex_unlock_op(sbi, ilock); mark_inode_dirty(inode); out_backing: current->backing_dev_info = NULL; out_up: mutex_unlock(&inode->i_mutex); return ret; }
static void fill_zero(struct inode *inode, pgoff_t index, loff_t start, loff_t len) { struct hmfs_sb_info *sbi = HMFS_SB(inode->i_sb); int ilock; if (!len) return; ilock = mutex_lock_op(sbi); alloc_new_data_partial_block(inode, index, start, start + len, true); mutex_unlock_op(sbi, ilock); }
static int expand_inode_data(struct inode *inode, loff_t offset, loff_t len, int mode) { pgoff_t index, pg_start, pg_end; loff_t new_size = i_size_read(inode); loff_t off_start, off_end; struct dnode_of_data dn; int ret, ilock; struct hmfs_sb_info *sbi = HMFS_SB(inode->i_sb); ret = inode_newsize_ok(inode, (len + offset)); if (ret) return ret; pg_start = ((u64) offset) >> HMFS_PAGE_SIZE_BITS; pg_end = ((u64) offset + len) >> HMFS_PAGE_SIZE_BITS; off_start = offset & (HMFS_PAGE_SIZE - 1); off_end = (offset + len) & (HMFS_PAGE_SIZE - 1); for (index = pg_start; index <= pg_end; index++) { ilock = mutex_lock_op(sbi); set_new_dnode(&dn, inode, NULL, NULL, 0); ret = get_dnode_of_data(&dn, index, ALLOC_NODE); mutex_unlock_op(sbi, ilock); if (ret) { break; } if (pg_start == pg_end) new_size = offset + len; else if (index == pg_start && off_start) new_size = (index + 1) << HMFS_PAGE_SIZE_BITS; else if (index == pg_end) new_size = (index << HMFS_PAGE_SIZE_BITS) + off_end; else new_size += HMFS_PAGE_SIZE; } if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size) { mark_size_dirty(inode, new_size); } return ret; }
static int punch_hole(struct inode *inode, loff_t offset, loff_t len, int mode) { pgoff_t pg_start, pg_end; loff_t off_start, off_end; loff_t blk_start, blk_end; int ret = 0, ilock; struct hmfs_sb_info *sbi = HMFS_SB(inode->i_sb); pg_start = ((u64) offset) >> HMFS_PAGE_SIZE_BITS; pg_end = ((u64) offset + len) >> HMFS_PAGE_SIZE_BITS; off_start = offset & (HMFS_PAGE_SIZE - 1); off_end = (offset + len) & (HMFS_PAGE_SIZE - 1); if (pg_start == pg_end) { fill_zero(inode, pg_start, off_start, off_end - off_start); } else { if (off_start) fill_zero(inode, pg_start++, off_start, HMFS_PAGE_SIZE - off_start); if (off_end) fill_zero(inode, pg_end, 0, off_end); if (pg_start < pg_end) { blk_start = pg_start << HMFS_PAGE_SIZE_BITS; blk_end = pg_end << HMFS_PAGE_SIZE_BITS; //FIXME: need this in mmap? //truncate_inode_pages_range(inode,blk_start,blk_end); ilock = mutex_lock_op(sbi); ret = truncate_hole(inode, pg_start, pg_end); mutex_unlock_op(sbi, ilock); } } if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) <= (offset + len)) { mark_size_dirty(inode, offset + len); } return ret; }
/* dn->node_block should be writable */ int truncate_data_blocks_range(struct dnode_of_data *dn, int count) { int nr_free = 0, ofs = dn->ofs_in_node; struct hmfs_sb_info *sbi = HMFS_SB(dn->inode->i_sb); struct hmfs_node *raw_node = (struct hmfs_node *)dn->node_block; struct hmfs_node *new_node = NULL; void *data_blk; block_t addr; nid_t nid; char sum_type; nid = le32_to_cpu(raw_node->footer.nid); sum_type = dn->level ? SUM_TYPE_DN : SUM_TYPE_INODE; new_node = alloc_new_node(sbi, nid, dn->inode, sum_type); if (IS_ERR(new_node)) return PTR_ERR(new_node); for (; count > 0; count--, ofs++) { if (dn->level) addr = raw_node->dn.addr[ofs]; else addr = raw_node->i.i_addr[ofs]; if (addr == NULL_ADDR) continue; data_blk = ADDR(sbi, addr); if (dn->level) new_node->dn.addr[ofs] = NULL_ADDR; else new_node->i.i_addr[ofs] = NULL_ADDR; setup_summary_of_delete_block(sbi, addr); nr_free++; } if (nr_free) { dec_valid_block_count(sbi, dn->inode, nr_free); mark_inode_dirty(dn->inode); } return nr_free; }
static int hmfs_unlink(struct inode *dir, struct dentry *dentry) { struct super_block *sb = dir->i_sb; struct hmfs_sb_info *sbi = HMFS_SB(sb); struct inode *inode = dentry->d_inode; struct hmfs_dir_entry *de; struct hmfs_dentry_block *res_blk; int err = -ENOENT; int bidx, ofs_in_blk, ilock; de = hmfs_find_entry(dir, &dentry->d_name, &bidx, &ofs_in_blk); if (!de) goto fail; err = check_orphan_space(sbi); if (err) goto fail; ilock = mutex_lock_op(sbi); inode_write_lock(dir); res_blk = get_dentry_block_for_write(dir, bidx); if (IS_ERR(res_blk)) { err = PTR_ERR(res_blk); mutex_unlock_op(sbi, ilock); goto fail; } de = &res_blk->dentry[ofs_in_blk]; hmfs_delete_entry(de, res_blk, dir, inode, bidx); inode_write_unlock(dir); mutex_unlock_op(sbi, ilock); mark_inode_dirty(dir); fail: return err; }
static int truncate_blocks(struct inode *inode, u64 from) { struct dnode_of_data dn; struct hmfs_sb_info *sbi = HMFS_SB(inode->i_sb); int count, err; u64 free_from; int ilock; free_from = (from + HMFS_PAGE_SIZE - 1) >> HMFS_PAGE_SIZE_BITS; ilock = mutex_lock_op(sbi); set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE); if (err) { goto free_next; } if (!dn.level) count = NORMAL_ADDRS_PER_INODE; else count = ADDRS_PER_BLOCK; count -= dn.ofs_in_node; BUG_ON(count < 0); if (dn.ofs_in_node || !dn.level) { truncate_data_blocks_range(&dn, count); free_from += count; } free_next:err = truncate_inode_blocks(inode, free_from); truncate_partial_data_page(inode, from); mutex_unlock_op(sbi, ilock); return err; }
static int hmfs_xattr_generic_set(struct dentry *dentry, const char *name, const void *value, size_t size, int flags, int handler_flags) { struct hmfs_sb_info *sbi = HMFS_SB(dentry->d_sb); switch (handler_flags) { case HMFS_XATTR_INDEX_USER: if (!test_opt(sbi, XATTR_USER)) return -EOPNOTSUPP; break; case HMFS_XATTR_INDEX_TRUSTED: if (!capable(CAP_SYS_ADMIN)) return -EPERM; break; case HMFS_XATTR_INDEX_SECURITY: break; default: return -EINVAL; } if (strcmp(name, "") == 0) return -EINVAL; return hmfs_setxattr(dentry->d_inode, handler_flags, name, value, size, flags); }
static int hmfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct super_block *sb = old_dir->i_sb; struct hmfs_sb_info *sbi = HMFS_SB(sb); struct inode *old_inode = old_dentry->d_inode; struct inode *new_inode = new_dentry->d_inode; struct hmfs_dentry_block *old_dentry_blk, *new_dentry_blk; struct hmfs_dir_entry *old_dir_entry = NULL, *old_entry, *new_entry; int err = -ENOENT, ilock; int new_ofs, new_bidx, old_bidx, old_ofs; old_entry = hmfs_find_entry(old_dir, &old_dentry->d_name, &old_bidx, &old_ofs); if (!old_entry) goto out; ilock = mutex_lock_op(sbi); if (S_ISDIR(old_inode->i_mode)) { err = -EIO; // .. in hmfs_dentry_block of old_inode old_dir_entry = hmfs_parent_dir(old_inode); if (!old_dir_entry) goto out_k; } inode_write_lock(new_dir); if (new_inode) { err = -ENOTEMPTY; if (old_dir_entry && !hmfs_empty_dir(new_inode)) goto out_k; err = -ENOENT; new_entry = hmfs_find_entry(new_dir, &new_dentry->d_name, &new_bidx, &new_ofs); if (!new_entry) goto out_k; new_dentry_blk = get_dentry_block_for_write(new_dir, new_bidx); if (IS_ERR(new_dentry_blk)) { err = PTR_ERR(new_dentry_blk); goto out_k; } new_entry = &new_dentry_blk->dentry[new_ofs]; hmfs_set_link(new_dir, new_entry, old_inode); new_inode->i_ctime = CURRENT_TIME; if (old_dir_entry) drop_nlink(new_inode); drop_nlink(new_inode); if (!new_inode->i_nlink) { err = check_orphan_space(sbi); if (err) goto out_k; add_orphan_inode(sbi, new_inode->i_ino); } mark_inode_dirty(new_inode); } else { err = hmfs_add_link(new_dentry, old_inode); if (err) goto out_k; if (old_dir_entry) { inc_nlink(new_dir); mark_inode_dirty(new_dir); } } old_inode->i_ctime = CURRENT_TIME; mark_inode_dirty(old_inode); if (old_dir != new_dir) inode_write_lock(old_dir); old_dentry_blk = get_dentry_block_for_write(old_dir, old_bidx); if (IS_ERR(old_dentry_blk)) { err = PTR_ERR(old_dentry_blk); goto unlock_old; } old_entry = &old_dentry_blk->dentry[old_ofs]; hmfs_delete_entry(old_entry, old_dentry_blk, old_dir, NULL, old_bidx); if (old_dir_entry) { if (old_dir != new_dir) { hmfs_set_link(old_inode, old_dir_entry, new_dir); } drop_nlink(old_dir); mark_inode_dirty(old_dir); } unlock_old: if (old_dir != new_dir) inode_write_unlock(old_dir); out_k: inode_write_unlock(new_dir); mutex_unlock_op(sbi, ilock); out: return err; }
static struct inode *hmfs_new_inode(struct inode *dir, umode_t mode) { struct super_block *sb = dir->i_sb; struct hmfs_sb_info *sbi = HMFS_SB(sb); struct hmfs_inode_info *i_info; struct inode *inode; nid_t ino; int err; bool nid_free = false; inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM); if (!alloc_nid(sbi, &ino)) { err = -ENOSPC; goto fail; } inode->i_uid = current_fsuid(); if (dir->i_mode & S_ISGID) { inode->i_gid = dir->i_gid; if (S_ISDIR(mode)) mode |= S_ISGID; } else { inode->i_gid = current_fsgid(); } inode->i_ino = ino; inode->i_mode = mode | HMFS_DEF_FILE_MODE; inode->i_blocks = 0; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; if (S_ISDIR(mode)) { set_inode_flag(HMFS_I(inode), FI_INC_LINK); inode->i_size = HMFS_PAGE_SIZE; } else if (S_ISLNK(mode)) { inode->i_size = HMFS_PAGE_SIZE; } else { inode->i_size = 0; } err = insert_inode_locked(inode); if (err) { err = -EINVAL; nid_free = true; goto out; } i_info = HMFS_I(inode); i_info->i_pino = dir->i_ino; if (hmfs_may_set_inline_data(dir)) { set_inode_flag(i_info, FI_INLINE_DATA); } hmfs_bug_on(sbi, !IS_ERR(get_node(sbi, ino))); err = sync_hmfs_inode(inode, false); if (!err) { inc_valid_inode_count(sbi); return inode; } out: clear_nlink(inode); clear_inode_flag(HMFS_I(inode), FI_INC_LINK); unlock_new_inode(inode); fail: make_bad_inode(inode); iput(inode); if (nid_free) alloc_nid_failed(sbi, ino); return ERR_PTR(err); }