static int hmfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = old_dentry->d_inode; struct hmfs_sb_info *sbi = HMFS_I_SB(inode); int err, ilock; inode->i_ctime = CURRENT_TIME; ihold(inode); set_inode_flag(HMFS_I(inode), FI_INC_LINK); ilock = mutex_lock_op(sbi); err = hmfs_add_link(dentry, inode); mutex_unlock_op(sbi, ilock); if (err) goto out; d_instantiate(dentry, inode); return 0; out: clear_inode_flag(HMFS_I(inode), FI_INC_LINK); iput(inode); return err; }
static int hmfs_xattr_advise_get(struct dentry *dentry, const char *name, void *buffer, size_t size, int flags) { struct inode *inode = dentry->d_inode; if (strcmp(name ,"") != 0) return -EINVAL; inode_read_lock(inode); if (buffer) *((char *)buffer) = HMFS_I(inode)->i_advise; inode_read_unlock(inode); return sizeof(char); }
static int hmfs_xattr_advise_set(struct dentry *dentry, const char *name, const void *value, size_t size, int flags, int handler_flag) { struct inode *inode = dentry->d_inode; if (strcmp(name, "") != 0) return -EINVAL; if (!inode_owner_or_capable(inode)) return -EPERM; if (value == NULL) return -EINVAL; inode_write_lock(inode); HMFS_I(inode)->i_advise = *(char *)value; inode_write_unlock(inode); mark_inode_dirty(inode); return 0; }
int hmfs_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = dentry->d_inode; struct hmfs_inode_info *fi = HMFS_I(inode); struct posix_acl *acl; int err = 0, ilock; struct hmfs_sb_info *sbi = HMFS_I_SB(inode); err = inode_change_ok(inode, attr); if (err) return err; ilock = mutex_lock_op(sbi); inode_write_lock(inode); if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size != i_size_read(inode)) { truncate_setsize(inode, attr->ia_size); hmfs_truncate(inode); } __setattr_copy(inode, attr); if (attr->ia_valid & ATTR_MODE) { acl = hmfs_get_acl(inode, ACL_TYPE_ACCESS); if (!acl || IS_ERR(acl)) { err = PTR_ERR(acl); goto out; } err = posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode); err = hmfs_set_acl(inode, acl, ACL_TYPE_ACCESS); if (err || is_inode_flag_set(fi, FI_ACL_MODE)) { inode->i_mode = fi->i_acl_mode; clear_inode_flag(fi, FI_ACL_MODE); } } out: inode_write_unlock(inode); mutex_unlock_op(sbi, ilock); mark_inode_dirty(inode); return err; }
int hmfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file->f_mapping->host; struct hmfs_inode_info *fi = HMFS_I(inode); // struct hmfs_sb_info *sbi = HMFS_I_SB(inode); // nid_t ino = inode->i_ino; int ret = 0; // TODO: In Place Update // i.e., If dirty page number is below threshold, commit random write to page cache. // [Inode Flag] HMFS Inode Info should contain # of dirty pages and sbi should contain min # of dirty pages for inode to write back. // If the inode itself is dirty, go to go_write straightly if (!datasync && is_inode_flag_set(fi, FI_DIRTY_INODE)) { // TODO: [inode] update inode page goto go_write; } // TODO: [CP] Check whether both inode and data are unmodified, if so, go to out. // Prepare to write go_write: // TODO: [Segment] (Balance) Check if there exists enough space (If not, GC.) // TODO: [CP] Check if making check point is necessary // There should be a boolean for each inode to indicate the need for CP. // Synchronize all the nodes // TODO: [Node] Make sure all the nodes in inode is up-to-date // TODO: [Node] Write back all the dirty nodes in inode // XXX: Write back is required to make this function work // TODO: [CP] Remove this dirty inode from dirty inode list of sbi // TODO: [Inode Flag] Clear inode flags if necessary // TODO: [Segment] Flush sbi return ret; }
static void __setattr_copy(struct inode *inode, const struct iattr *attr) { unsigned int ia_valid = attr->ia_valid; if (ia_valid & ATTR_UID) inode->i_uid = attr->ia_uid; if (ia_valid & ATTR_GID) inode->i_gid = attr->ia_gid; if (ia_valid & ATTR_ATIME) inode->i_atime = timespec_trunc(attr->ia_atime, inode->i_sb->s_time_gran); if (ia_valid & ATTR_MTIME) inode->i_mtime = timespec_trunc(attr->ia_mtime, inode->i_sb->s_time_gran); if (ia_valid & ATTR_CTIME) inode->i_ctime = timespec_trunc(attr->ia_ctime, inode->i_sb->s_time_gran); if (ia_valid & ATTR_MODE) { umode_t mode = attr->ia_mode; if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) mode &= ~S_ISGID; set_acl_inode(HMFS_I(inode), mode); } }
static struct inode *hmfs_new_inode(struct inode *dir, umode_t mode) { struct super_block *sb = dir->i_sb; struct hmfs_sb_info *sbi = HMFS_SB(sb); struct hmfs_inode_info *i_info; struct inode *inode; nid_t ino; int err; bool nid_free = false; inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM); if (!alloc_nid(sbi, &ino)) { err = -ENOSPC; goto fail; } inode->i_uid = current_fsuid(); if (dir->i_mode & S_ISGID) { inode->i_gid = dir->i_gid; if (S_ISDIR(mode)) mode |= S_ISGID; } else { inode->i_gid = current_fsgid(); } inode->i_ino = ino; inode->i_mode = mode | HMFS_DEF_FILE_MODE; inode->i_blocks = 0; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; if (S_ISDIR(mode)) { set_inode_flag(HMFS_I(inode), FI_INC_LINK); inode->i_size = HMFS_PAGE_SIZE; } else if (S_ISLNK(mode)) { inode->i_size = HMFS_PAGE_SIZE; } else { inode->i_size = 0; } err = insert_inode_locked(inode); if (err) { err = -EINVAL; nid_free = true; goto out; } i_info = HMFS_I(inode); i_info->i_pino = dir->i_ino; if (hmfs_may_set_inline_data(dir)) { set_inode_flag(i_info, FI_INLINE_DATA); } hmfs_bug_on(sbi, !IS_ERR(get_node(sbi, ino))); err = sync_hmfs_inode(inode, false); if (!err) { inc_valid_inode_count(sbi); return inode; } out: clear_nlink(inode); clear_inode_flag(HMFS_I(inode), FI_INC_LINK); unlock_new_inode(inode); fail: make_bad_inode(inode); iput(inode); if (nid_free) alloc_nid_failed(sbi, ino); return ERR_PTR(err); }