struct inode *myfs_get_inode(struct super_block *sb, const struct inode *dir, umode_t mode, dev_t dev) { struct inode *inode = new_inode(sb); if (inode) { inode->i_ino = get_next_ino(); inode_init_owner(inode, dir, mode); inode->i_mapping->a_ops = &myfs_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER); mapping_set_unevictable(inode->i_mapping); inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; switch(mode & S_IFMT) { default: init_special_inode(inode, mode, dev); break; case S_IFREG: inode->i_op = &myfs_file_inode_operations; inode->i_fop = &myfs_file_operations; break; case S_IFDIR: inode->i_op = &myfs_dir_inode_operations; inode->i_fop = &simple_dir_operations; inc_nlink(inode); break; case S_IFLNK: inode->i_op= &page_symlink_inode_operations; break; } } return inode; }
void jfs_read_inode(struct inode *inode) { if (diRead(inode)) { make_bad_inode(inode); return; } if (S_ISREG(inode->i_mode)) { inode->i_op = &jfs_file_inode_operations; inode->i_fop = &jfs_file_operations; inode->i_mapping->a_ops = &jfs_aops; } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &jfs_dir_inode_operations; inode->i_fop = &jfs_dir_operations; inode->i_mapping->a_ops = &jfs_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); } else if (S_ISLNK(inode->i_mode)) { if (inode->i_size >= IDATASIZE) { inode->i_op = &page_symlink_inode_operations; inode->i_mapping->a_ops = &jfs_aops; } else inode->i_op = &jfs_symlink_inode_operations; } else { inode->i_op = &jfs_file_inode_operations; init_special_inode(inode, inode->i_mode, inode->i_rdev); } }
struct inode *ramfs_get_inode(struct super_block *sb, int mode, dev_t dev) { struct inode * inode = new_inode(sb); if (inode) { inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); inode->i_mapping->a_ops = &ramfs_aops; inode->i_mapping->backing_dev_info = &ramfs_backing_dev_info; mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER); mapping_set_unevictable(inode->i_mapping); inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; switch (mode & S_IFMT) { default: init_special_inode(inode, mode, dev); break; case S_IFREG: inode->i_op = &ramfs_file_inode_operations; inode->i_fop = &ramfs_file_operations; break; case S_IFDIR: inode->i_op = &ramfs_dir_inode_operations; inode->i_fop = &simple_dir_operations; /* directory inodes start off with i_nlink == 2 (for "." entry) */ inc_nlink(inode); break; case S_IFLNK: inode->i_op = &page_symlink_inode_operations; break; } } return inode; }
//----------------------------------------------------------------------------- // //----------------------------------------------------------------------------- struct inode *janfs_get_inode(struct super_block *sb, const struct inode *dir, umode_t mode, dev_t dev) { struct inode * inode = new_inode(sb); if (inode) { inode->i_ino = get_next_ino(); inode_init_owner(inode, dir, mode); inode->i_mapping->a_ops = &janfs_aops; inode->i_mapping->backing_dev_info = &janfs_backing_dev_info; mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER); //mapping_set_unevictable(inode->i_mapping); inode->i_flags |= S_NOATIME|S_NOCMTIME; switch (mode & S_IFMT) { default: init_special_inode(inode, mode, dev); break; case S_IFREG: inode->i_op = &janfs_file_inode_operations; inode->i_fop = &janfs_file_operations; break; case S_IFDIR: inode->i_op = &janfs_dir_inode_operations; inode->i_fop = &simple_dir_operations; // directory inodes start off with i_nlink == 2 (for "." entry) inc_nlink(inode); break; case S_IFLNK: inode->i_op = &page_symlink_inode_operations; break; } } return inode; }
struct inode *ramfs_get_inode(struct super_block *sb, const struct inode *dir, umode_t mode, dev_t dev) { struct inode * inode = new_inode(sb); if (inode) { inode->i_ino = get_next_ino(); inode_init_owner(inode, dir, mode); inode->i_mapping->a_ops = &ramfs_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER); mapping_set_unevictable(inode->i_mapping); inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); switch (mode & S_IFMT) { default: init_special_inode(inode, mode, dev); break; case S_IFREG: inode->i_op = &ramfs_file_inode_operations; inode->i_fop = &ramfs_file_operations; break; case S_IFDIR: inode->i_op = &ramfs_dir_inode_operations; inode->i_fop = &simple_dir_operations; /* directory inodes start off with i_nlink == 2 (for "." entry) */ inc_nlink(inode); break; case S_IFLNK: inode->i_op = &page_symlink_inode_operations; inode_nohighmem(inode); break; } } return inode; }
void nilfs_mapping_init(struct address_space *mapping, struct inode *inode) { mapping->host = inode; mapping->flags = 0; mapping_set_gfp_mask(mapping, GFP_NOFS); mapping->private_data = NULL; mapping->a_ops = &empty_aops; }
/* * Initialize the Linux inode. * * When reading existing inodes from disk this is called directly from xfs_iget, * when creating a new inode it is called from xfs_ialloc after setting up the * inode. These callers have different criteria for clearing XFS_INEW, so leave * it up to the caller to deal with unlocking the inode appropriately. */ void xfs_setup_inode( struct xfs_inode *ip) { struct inode *inode = &ip->i_vnode; gfp_t gfp_mask; inode->i_ino = ip->i_ino; inode->i_state = I_NEW; inode_sb_list_add(inode); /* make the inode look hashed for the writeback code */ hlist_add_fake(&inode->i_hash); inode->i_uid = xfs_uid_to_kuid(ip->i_d.di_uid); inode->i_gid = xfs_gid_to_kgid(ip->i_d.di_gid); switch (inode->i_mode & S_IFMT) { case S_IFBLK: case S_IFCHR: inode->i_rdev = MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff, sysv_minor(ip->i_df.if_u2.if_rdev)); break; default: inode->i_rdev = 0; break; } i_size_write(inode, ip->i_d.di_size); xfs_diflags_to_iflags(inode, ip); if (S_ISDIR(inode->i_mode)) { lockdep_set_class(&ip->i_lock.mr_lock, &xfs_dir_ilock_class); ip->d_ops = ip->i_mount->m_dir_inode_ops; } else { ip->d_ops = ip->i_mount->m_nondir_inode_ops; lockdep_set_class(&ip->i_lock.mr_lock, &xfs_nondir_ilock_class); } /* * Ensure all page cache allocations are done from GFP_NOFS context to * prevent direct reclaim recursion back into the filesystem and blowing * stacks or deadlocking. */ gfp_mask = mapping_gfp_mask(inode->i_mapping); mapping_set_gfp_mask(inode->i_mapping, (gfp_mask & ~(__GFP_FS))); /* * If there is no attribute fork no ACL can exist on this inode, * and it can't have any file capabilities attached to it either. */ if (!XFS_IFORK_Q(ip)) { inode_has_no_xattr(inode); cache_no_acl(inode); } }
void nilfs_btnode_cache_init(struct address_space *btnc) { btnc->host = NULL; /* can safely set to host inode ? */ btnc->flags = 0; mapping_set_gfp_mask(btnc, GFP_NOFS); btnc->assoc_mapping = NULL; btnc->backing_dev_info = &default_backing_dev_info; btnc->a_ops = &def_btnode_aops; }
/* * NAME: jfs_mount_rw(sb, remount) * * FUNCTION: Completes read-write mount, or remounts read-only volume * as read-write */ int jfs_mount_rw(struct super_block *sb, int remount) { struct jfs_sb_info *sbi = JFS_SBI(sb); int rc; /* * If we are re-mounting a previously read-only volume, we want to * re-read the inode and block maps, since fsck.jfs may have updated * them. */ if (remount) { if (chkSuper(sb) || (sbi->state != FM_CLEAN)) return -EINVAL; truncate_inode_pages(sbi->ipimap->i_mapping, 0); truncate_inode_pages(sbi->ipbmap->i_mapping, 0); diUnmount(sbi->ipimap, 1); if ((rc = diMount(sbi->ipimap))) { jfs_err("jfs_mount_rw: diMount failed!"); return rc; } dbUnmount(sbi->ipbmap, 1); if ((rc = dbMount(sbi->ipbmap))) { jfs_err("jfs_mount_rw: dbMount failed!"); return rc; } } /* * open/initialize log */ if ((rc = lmLogOpen(sb))) return rc; /* * update file system superblock; */ if ((rc = updateSuper(sb, FM_MOUNT))) { jfs_err("jfs_mount: updateSuper failed w/rc = %d", rc); lmLogClose(sb); return rc; } /* * write MOUNT log record of the file system */ logMOUNT(sb); /* * Set page cache allocation policy */ mapping_set_gfp_mask(sb->s_bdev->bd_inode->i_mapping, GFP_NOFS); return rc; }
void nilfs_mapping_init(struct address_space *mapping, struct inode *inode, struct backing_dev_info *bdi) { mapping->host = inode; mapping->flags = 0; mapping_set_gfp_mask(mapping, GFP_NOFS); mapping->assoc_mapping = NULL; mapping->backing_dev_info = bdi; mapping->a_ops = &empty_aops; }
static int __nilfs_read_inode(struct super_block *sb, struct nilfs_root *root, unsigned long ino, struct inode *inode) { struct the_nilfs *nilfs = sb->s_fs_info; struct buffer_head *bh; struct nilfs_inode *raw_inode; int err; down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh); if (unlikely(err)) goto bad_inode; raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh); err = nilfs_read_inode_common(inode, raw_inode); if (err) goto failed_unmap; if (S_ISREG(inode->i_mode)) { inode->i_op = &nilfs_file_inode_operations; inode->i_fop = &nilfs_file_operations; inode->i_mapping->a_ops = &nilfs_aops; } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &nilfs_dir_inode_operations; inode->i_fop = &nilfs_dir_operations; inode->i_mapping->a_ops = &nilfs_aops; } else if (S_ISLNK(inode->i_mode)) { inode->i_op = &nilfs_symlink_inode_operations; inode_nohighmem(inode); inode->i_mapping->a_ops = &nilfs_aops; } else { inode->i_op = &nilfs_special_inode_operations; init_special_inode( inode, inode->i_mode, huge_decode_dev(le64_to_cpu(raw_inode->i_device_code))); } nilfs_ifile_unmap_inode(root->ifile, ino, bh); brelse(bh); up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); nilfs_set_inode_flags(inode); mapping_set_gfp_mask(inode->i_mapping, mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS)); return 0; failed_unmap: nilfs_ifile_unmap_inode(root->ifile, ino, bh); brelse(bh); bad_inode: up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); return err; }
void nilfs_mapping_init(struct address_space *mapping, struct backing_dev_info *bdi, const struct address_space_operations *aops) { mapping->host = NULL; mapping->flags = 0; mapping_set_gfp_mask(mapping, GFP_NOFS); mapping->assoc_mapping = NULL; mapping->backing_dev_info = bdi; mapping->a_ops = aops; }
struct inode *ramfs2_get_inode(struct super_block *sb, const struct inode *dir, umode_t mode, dev_t dev) { struct inode * inode; if(dir) { printk("ramfs2_get_inode(sb. dir.ino:%lu, m:%#0hx, dev)\n", dir->i_ino, (unsigned int) mode); } else { printk("ramfs2_get_inode(sb. dir.ino:NULL, m:%#0hx, dev)\n", (unsigned int) mode); } inode = new_inode(sb); if (inode) { inode->i_ino = get_next_ino(); printk("--- ret: %lu\n", inode->i_ino); inode_init_owner(inode, dir, mode); inode->i_mapping->a_ops = &ramfs2_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER); mapping_set_unevictable(inode->i_mapping); inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; switch (mode & S_IFMT) { default: init_special_inode(inode, mode, dev); break; case S_IFREG: inode->i_op = &sjfs_file_inode_operations; inode->i_fop = &sjfs_file_operations; break; case S_IFDIR: inode->i_op = &sjfs_dir_inode_operations; inode->i_fop = &sjfs_dir_operations; inc_nlink(inode); // directory inodes start off with i_nlink == 2 (for "." entry) break; case S_IFLNK: inode->i_op = &page_symlink_inode_operations; break; } } return inode; }
int nilfs_init_gcinode(struct inode *inode) { struct nilfs_inode_info *ii = NILFS_I(inode); inode->i_mode = S_IFREG; mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); inode->i_mapping->a_ops = &empty_aops; ii->i_flags = 0; nilfs_bmap_init_gc(ii->i_bmap); return 0; }
/* * In logfs inodes are written to an inode file. The inode file, like any * other file, is managed with a inode. The inode file's inode, aka master * inode, requires special handling in several respects. First, it cannot be * written to the inode file, so it is stored in the journal instead. * * Secondly, this inode cannot be written back and destroyed before all other * inodes have been written. The ordering is important. Linux' VFS is happily * unaware of the ordering constraint and would ordinarily destroy the master * inode at umount time while other inodes are still in use and dirty. Not * good. * * So logfs makes sure the master inode is not written until all other inodes * have been destroyed. Sadly, this method has another side-effect. The VFS * will notice one remaining inode and print a frightening warning message. * Worse, it is impossible to judge whether such a warning was caused by the * master inode or any other inodes have leaked as well. * * Our attempt of solving this is with logfs_new_meta_inode() below. Its * purpose is to create a new inode that will not trigger the warning if such * an inode is still in use. An ugly hack, no doubt. Suggections for * improvement are welcome. * * AV: that's what ->put_super() is for... */ struct inode *logfs_new_meta_inode(struct super_block *sb, u64 ino) { struct inode *inode; inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM); inode->i_mode = S_IFREG; inode->i_ino = ino; inode->i_data.a_ops = &logfs_reg_aops; mapping_set_gfp_mask(&inode->i_data, GFP_NOFS); return inode; }
struct inode *gfs2_aspace_get(struct gfs2_sbd *sdp) { struct inode *aspace; struct gfs2_inode *ip; aspace = new_inode(sdp->sd_vfs); if (aspace) { mapping_set_gfp_mask(aspace->i_mapping, GFP_NOFS); aspace->i_mapping->a_ops = &aspace_aops; aspace->i_size = ~0ULL; ip = GFS2_I(aspace); clear_bit(GIF_USER, &ip->i_flags); insert_inode_hash(aspace); } return aspace; }
/** * Allocate a GEM object of the specified size with shmfs backing store */ struct drm_gem_object * drm_gem_object_alloc(struct drm_device *dev, size_t size) { struct drm_gem_object *obj; BUG_ON((size & (PAGE_SIZE - 1)) != 0); obj = kzalloc(sizeof(*obj), GFP_KERNEL); if (!obj) goto free; obj->dev = dev; obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); if (IS_ERR(obj->filp)) goto free; /* Basically we want to disable the OOM killer and handle ENOMEM * ourselves by sacrificing pages from cached buffers. * XXX shmem_file_[gs]et_gfp_mask() */ mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, GFP_HIGHUSER | __GFP_COLD | __GFP_FS | __GFP_RECLAIMABLE | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC); kref_init(&obj->refcount); kref_init(&obj->handlecount); obj->size = size; if (dev->driver->gem_init_object != NULL && dev->driver->gem_init_object(obj) != 0) { goto fput; } atomic_inc(&dev->object_count); atomic_add(obj->size, &dev->object_memory); return obj; fput: fput(obj->filp); free: kfree(obj); return NULL; }
void nilfs_set_inode_flags(struct inode *inode) { unsigned int flags = NILFS_I(inode)->i_flags; inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC); if (flags & FS_SYNC_FL) inode->i_flags |= S_SYNC; if (flags & FS_APPEND_FL) inode->i_flags |= S_APPEND; if (flags & FS_IMMUTABLE_FL) inode->i_flags |= S_IMMUTABLE; if (flags & FS_NOATIME_FL) inode->i_flags |= S_NOATIME; if (flags & FS_DIRSYNC_FL) inode->i_flags |= S_DIRSYNC; mapping_set_gfp_mask(inode->i_mapping, mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); }
static struct page *reiserfs_get_page(struct inode *dir, size_t n) { struct address_space *mapping = dir->i_mapping; struct page *page; /* We can deadlock if we try to free dentries, and an unlink/rmdir has just occured - GFP_NOFS avoids this */ mapping_set_gfp_mask(mapping, GFP_NOFS); page = read_mapping_page(mapping, n >> PAGE_CACHE_SHIFT, NULL); if (!IS_ERR(page)) { kmap(page); if (PageError(page)) goto fail; } return page; fail: reiserfs_put_page(page); return ERR_PTR(-EIO); }
static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode; int err; err = dquot_initialize(dir); if (err) return err; inode = f2fs_new_inode(dir, S_IFDIR | mode); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = &f2fs_dir_inode_operations; inode->i_fop = &f2fs_dir_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_HIGH_ZERO); set_inode_flag(inode, FI_INC_LINK); f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); if (err) goto out_fail; f2fs_unlock_op(sbi); alloc_nid_done(sbi, inode->i_ino); d_instantiate(dentry, inode); unlock_new_inode(inode); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); f2fs_balance_fs(sbi, true); return 0; out_fail: clear_inode_flag(inode, FI_INC_LINK); handle_failed_inode(inode); return err; }
int nilfs_init_gcinode(struct inode *inode) { struct nilfs_inode_info *ii = NILFS_I(inode); inode->i_mode = S_IFREG; mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); inode->i_mapping->a_ops = &empty_aops; #if HAVE_MAPPING_BACKING_DEV_INFO inode->i_mapping->backing_dev_info = inode->i_sb->s_bdi; #endif ii->i_flags = 0; nilfs_bmap_init_gc(ii->i_bmap); #if defined(YANQIN) INIT_RADIX_TREE(&(ii->i_gc_blocks), GFP_NOFS); #endif return 0; }
static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); struct inode *inode; int err; f2fs_balance_fs(sbi); inode = f2fs_new_inode(dir, S_IFDIR | mode); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = &f2fs_dir_inode_operations; inode->i_fop = &f2fs_dir_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO); set_inode_flag(F2FS_I(inode), FI_INC_LINK); f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); f2fs_unlock_op(sbi); if (err) goto out_fail; alloc_nid_done(sbi, inode->i_ino); d_instantiate(dentry, inode); unlock_new_inode(inode); return 0; out_fail: clear_inode_flag(F2FS_I(inode), FI_INC_LINK); clear_nlink(inode); unlock_new_inode(inode); make_bad_inode(inode); iput(inode); alloc_nid_failed(sbi, inode->i_ino); return err; }
/** * psb_gem_create - create a mappable object * @file: the DRM file of the client * @dev: our device * @size: the size requested * @handlep: returned handle (opaque number) * * Create a GEM object, fill in the boilerplate and attach a handle to * it so that userspace can speak about it. This does the core work * for the various methods that do/will create GEM objects for things */ static int psb_gem_create(struct drm_file *file, struct drm_device *dev, uint64_t size, uint32_t *handlep) { struct gtt_range *r; int ret; u32 handle; size = roundup(size, PAGE_SIZE); /* Allocate our object - for now a direct gtt range which is not stolen memory backed */ r = psb_gtt_alloc_range(dev, size, "gem", 0); if (r == NULL) { dev_err(dev->dev, "no memory for %lld byte GEM object\n", size); return -ENOSPC; } /* Initialize the extra goodies GEM needs to do all the hard work */ if (drm_gem_object_init(dev, &r->gem, size) != 0) { psb_gtt_free_range(dev, r); /* GEM doesn't give an error code so use -ENOMEM */ dev_err(dev->dev, "GEM init failed for %lld\n", size); return -ENOMEM; } /* Limit the object to 32bit mappings */ mapping_set_gfp_mask(r->gem.filp->f_mapping, GFP_KERNEL | __GFP_DMA32); /* Give the object a handle so we can carry it more easily */ ret = drm_gem_handle_create(file, &r->gem, &handle); if (ret) { dev_err(dev->dev, "GEM handle failed for %p, %lld\n", &r->gem, size); drm_gem_object_release(&r->gem); psb_gtt_free_range(dev, r); return ret; } /* We have the initial and handle reference but need only one now */ drm_gem_object_unreference(&r->gem); *handlep = handle; return 0; }
struct inode *nilfs_new_inode(struct inode *dir, umode_t mode) { struct super_block *sb = dir->i_sb; struct the_nilfs *nilfs = sb->s_fs_info; struct inode *inode; struct nilfs_inode_info *ii; struct nilfs_root *root; int err = -ENOMEM; ino_t ino; inode = new_inode(sb); if (unlikely(!inode)) goto failed; mapping_set_gfp_mask(inode->i_mapping, mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); root = NILFS_I(dir)->i_root; ii = NILFS_I(inode); ii->i_state = 1 << NILFS_I_NEW; ii->i_root = root; err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh); if (unlikely(err)) goto failed_ifile_create_inode; /* reference count of i_bh inherits from nilfs_mdt_read_block() */ atomic_inc(&root->inodes_count); inode_init_owner(inode, dir, mode); inode->i_ino = ino; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) { err = nilfs_bmap_read(ii->i_bmap, NULL); if (err < 0) goto failed_bmap; set_bit(NILFS_I_BMAP, &ii->i_state); /* No lock is needed; iget() ensures it. */ } ii->i_flags = nilfs_mask_flags( mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED); /* ii->i_file_acl = 0; */ /* ii->i_dir_acl = 0; */ ii->i_dir_start_lookup = 0; nilfs_set_inode_flags(inode); spin_lock(&nilfs->ns_next_gen_lock); inode->i_generation = nilfs->ns_next_generation++; spin_unlock(&nilfs->ns_next_gen_lock); insert_inode_hash(inode); err = nilfs_init_acl(inode, dir); if (unlikely(err)) goto failed_acl; /* never occur. When supporting nilfs_init_acl(), proper cancellation of above jobs should be considered */ return inode; failed_acl: failed_bmap: clear_nlink(inode); iput(inode); /* raw_inode will be deleted through generic_delete_inode() */ goto failed; failed_ifile_create_inode: make_bad_inode(inode); iput(inode); /* if i_nlink == 1, generic_forget_inode() will be called */ failed: return ERR_PTR(err); }
/* * Initialize the Linux inode, set up the operation vectors and * unlock the inode. * * When reading existing inodes from disk this is called directly * from xfs_iget, when creating a new inode it is called from * xfs_ialloc after setting up the inode. * * We are always called with an uninitialised linux inode here. * We need to initialise the necessary fields and take a reference * on it. */ void xfs_setup_inode( struct xfs_inode *ip) { struct inode *inode = &ip->i_vnode; gfp_t gfp_mask; inode->i_ino = ip->i_ino; inode->i_state = I_NEW; inode_sb_list_add(inode); /* make the inode look hashed for the writeback code */ hlist_add_fake(&inode->i_hash); inode->i_mode = ip->i_d.di_mode; set_nlink(inode, ip->i_d.di_nlink); inode->i_uid = xfs_uid_to_kuid(ip->i_d.di_uid); inode->i_gid = xfs_gid_to_kgid(ip->i_d.di_gid); switch (inode->i_mode & S_IFMT) { case S_IFBLK: case S_IFCHR: inode->i_rdev = MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff, sysv_minor(ip->i_df.if_u2.if_rdev)); break; default: inode->i_rdev = 0; break; } inode->i_generation = ip->i_d.di_gen; i_size_write(inode, ip->i_d.di_size); inode->i_atime.tv_sec = ip->i_d.di_atime.t_sec; inode->i_atime.tv_nsec = ip->i_d.di_atime.t_nsec; inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec; inode->i_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec; inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec; inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec; xfs_diflags_to_iflags(inode, ip); ip->d_ops = ip->i_mount->m_nondir_inode_ops; lockdep_set_class(&ip->i_lock.mr_lock, &xfs_nondir_ilock_class); switch (inode->i_mode & S_IFMT) { case S_IFREG: inode->i_op = &xfs_inode_operations; inode->i_fop = &xfs_file_operations; inode->i_mapping->a_ops = &xfs_address_space_operations; break; case S_IFDIR: lockdep_set_class(&ip->i_lock.mr_lock, &xfs_dir_ilock_class); if (xfs_sb_version_hasasciici(&XFS_M(inode->i_sb)->m_sb)) inode->i_op = &xfs_dir_ci_inode_operations; else inode->i_op = &xfs_dir_inode_operations; inode->i_fop = &xfs_dir_file_operations; ip->d_ops = ip->i_mount->m_dir_inode_ops; break; case S_IFLNK: inode->i_op = &xfs_symlink_inode_operations; if (!(ip->i_df.if_flags & XFS_IFINLINE)) inode->i_mapping->a_ops = &xfs_address_space_operations; break; default: inode->i_op = &xfs_inode_operations; init_special_inode(inode, inode->i_mode, inode->i_rdev); break; } /* * Ensure all page cache allocations are done from GFP_NOFS context to * prevent direct reclaim recursion back into the filesystem and blowing * stacks or deadlocking. */ gfp_mask = mapping_gfp_mask(inode->i_mapping); mapping_set_gfp_mask(inode->i_mapping, (gfp_mask & ~(__GFP_FS))); /* * If there is no attribute fork no ACL can exist on this inode, * and it can't have any file capabilities attached to it either. */ if (!XFS_IFORK_Q(ip)) { inode_has_no_xattr(inode); cache_no_acl(inode); } xfs_iflags_clear(ip, XFS_INEW); barrier(); unlock_new_inode(inode); }
static struct gfs2_sbd *init_sbd(struct super_block *sb) { struct gfs2_sbd *sdp; struct address_space *mapping; sdp = kzalloc(sizeof(struct gfs2_sbd), GFP_KERNEL); if (!sdp) return NULL; sb->s_fs_info = sdp; sdp->sd_vfs = sb; sdp->sd_lkstats = alloc_percpu(struct gfs2_pcpu_lkstats); if (!sdp->sd_lkstats) { kfree(sdp); return NULL; } set_bit(SDF_NOJOURNALID, &sdp->sd_flags); gfs2_tune_init(&sdp->sd_tune); init_waitqueue_head(&sdp->sd_glock_wait); atomic_set(&sdp->sd_glock_disposal, 0); init_completion(&sdp->sd_locking_init); init_completion(&sdp->sd_wdack); spin_lock_init(&sdp->sd_statfs_spin); spin_lock_init(&sdp->sd_rindex_spin); sdp->sd_rindex_tree.rb_node = NULL; INIT_LIST_HEAD(&sdp->sd_jindex_list); spin_lock_init(&sdp->sd_jindex_spin); mutex_init(&sdp->sd_jindex_mutex); init_completion(&sdp->sd_journal_ready); INIT_LIST_HEAD(&sdp->sd_quota_list); mutex_init(&sdp->sd_quota_mutex); mutex_init(&sdp->sd_quota_sync_mutex); init_waitqueue_head(&sdp->sd_quota_wait); INIT_LIST_HEAD(&sdp->sd_trunc_list); spin_lock_init(&sdp->sd_trunc_lock); spin_lock_init(&sdp->sd_bitmap_lock); mapping = &sdp->sd_aspace; address_space_init_once(mapping); mapping->a_ops = &gfs2_rgrp_aops; mapping->host = sb->s_bdev->bd_inode; mapping->flags = 0; mapping_set_gfp_mask(mapping, GFP_NOFS); mapping->private_data = NULL; mapping->backing_dev_info = sb->s_bdi; mapping->writeback_index = 0; spin_lock_init(&sdp->sd_log_lock); atomic_set(&sdp->sd_log_pinned, 0); INIT_LIST_HEAD(&sdp->sd_log_le_revoke); INIT_LIST_HEAD(&sdp->sd_log_le_ordered); spin_lock_init(&sdp->sd_ordered_lock); init_waitqueue_head(&sdp->sd_log_waitq); init_waitqueue_head(&sdp->sd_logd_waitq); spin_lock_init(&sdp->sd_ail_lock); INIT_LIST_HEAD(&sdp->sd_ail1_list); INIT_LIST_HEAD(&sdp->sd_ail2_list); init_rwsem(&sdp->sd_log_flush_lock); atomic_set(&sdp->sd_log_in_flight, 0); init_waitqueue_head(&sdp->sd_log_flush_wait); init_waitqueue_head(&sdp->sd_log_frozen_wait); atomic_set(&sdp->sd_log_freeze, 0); atomic_set(&sdp->sd_frozen_root, 0); init_waitqueue_head(&sdp->sd_frozen_root_wait); return sdp; }
static int nilfs_insert_inode_locked(struct inode *inode, struct nilfs_root *root, unsigned long ino) { struct nilfs_iget_args args = { .ino = ino, .root = root, .cno = 0, .for_gc = 0 }; return insert_inode_locked4(inode, ino, nilfs_iget_test, &args); } struct inode *nilfs_new_inode(struct inode *dir, umode_t mode) { struct super_block *sb = dir->i_sb; struct the_nilfs *nilfs = sb->s_fs_info; struct inode *inode; struct nilfs_inode_info *ii; struct nilfs_root *root; int err = -ENOMEM; ino_t ino; inode = new_inode(sb); if (unlikely(!inode)) goto failed; mapping_set_gfp_mask(inode->i_mapping, mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS)); root = NILFS_I(dir)->i_root; ii = NILFS_I(inode); ii->i_state = BIT(NILFS_I_NEW); ii->i_root = root; err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh); if (unlikely(err)) goto failed_ifile_create_inode; /* reference count of i_bh inherits from nilfs_mdt_read_block() */ atomic64_inc(&root->inodes_count); inode_init_owner(inode, dir, mode); inode->i_ino = ino; inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) { err = nilfs_bmap_read(ii->i_bmap, NULL); if (err < 0) goto failed_after_creation; set_bit(NILFS_I_BMAP, &ii->i_state); /* No lock is needed; iget() ensures it. */ } ii->i_flags = nilfs_mask_flags( mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED); /* ii->i_file_acl = 0; */ /* ii->i_dir_acl = 0; */ ii->i_dir_start_lookup = 0; nilfs_set_inode_flags(inode); spin_lock(&nilfs->ns_next_gen_lock); inode->i_generation = nilfs->ns_next_generation++; spin_unlock(&nilfs->ns_next_gen_lock); if (nilfs_insert_inode_locked(inode, root, ino) < 0) { err = -EIO; goto failed_after_creation; } err = nilfs_init_acl(inode, dir); if (unlikely(err)) /* * Never occur. When supporting nilfs_init_acl(), * proper cancellation of above jobs should be considered. */ goto failed_after_creation; return inode; failed_after_creation: clear_nlink(inode); unlock_new_inode(inode); iput(inode); /* * raw_inode will be deleted through * nilfs_evict_inode(). */ goto failed; failed_ifile_create_inode: make_bad_inode(inode); iput(inode); failed: return ERR_PTR(err); } void nilfs_set_inode_flags(struct inode *inode) { unsigned int flags = NILFS_I(inode)->i_flags; unsigned int new_fl = 0; if (flags & FS_SYNC_FL) new_fl |= S_SYNC; if (flags & FS_APPEND_FL) new_fl |= S_APPEND; if (flags & FS_IMMUTABLE_FL) new_fl |= S_IMMUTABLE; if (flags & FS_NOATIME_FL) new_fl |= S_NOATIME; if (flags & FS_DIRSYNC_FL) new_fl |= S_DIRSYNC; inode_set_flags(inode, new_fl, S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC); }
struct inode *f2fs_iget_nowait(struct super_block *sb, unsigned long ino) { struct f2fs_iget_args args = { .ino = ino, .on_free = 0 }; struct inode *inode = ilookup5(sb, ino, f2fs_iget_test, &args); if (inode) return inode; if (!args.on_free) return f2fs_iget(sb, ino); return ERR_PTR(-ENOENT); } static int do_read_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); struct f2fs_inode_info *fi = F2FS_I(inode); struct page *node_page; struct f2fs_node *rn; struct f2fs_inode *ri; /* Check if ino is within scope */ check_nid_range(sbi, inode->i_ino); node_page = get_node_page(sbi, inode->i_ino); if (IS_ERR(node_page)) return PTR_ERR(node_page); rn = page_address(node_page); ri = &(rn->i); inode->i_mode = le16_to_cpu(ri->i_mode); i_uid_write(inode, le32_to_cpu(ri->i_uid)); i_gid_write(inode, le32_to_cpu(ri->i_gid)); set_nlink(inode, le32_to_cpu(ri->i_links)); inode->i_size = le64_to_cpu(ri->i_size); inode->i_blocks = le64_to_cpu(ri->i_blocks); inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime); inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime); inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime); inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec); inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec); inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec); inode->i_generation = le32_to_cpu(ri->i_generation); fi->i_current_depth = le32_to_cpu(ri->i_current_depth); fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid); fi->i_flags = le32_to_cpu(ri->i_flags); fi->flags = 0; fi->data_version = le64_to_cpu(F2FS_CKPT(sbi)->checkpoint_ver) - 1; fi->i_advise = ri->i_advise; fi->i_pino = le32_to_cpu(ri->i_pino); get_extent_info(&fi->ext, ri->i_ext); f2fs_put_page(node_page, 1); return 0; } struct inode *f2fs_iget(struct super_block *sb, unsigned long ino) { struct f2fs_sb_info *sbi = F2FS_SB(sb); struct inode *inode; int ret; inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi)) goto make_now; ret = do_read_inode(inode); if (ret) goto bad_inode; if (!sbi->por_doing && inode->i_nlink == 0) { ret = -ENOENT; goto bad_inode; } make_now: if (ino == F2FS_NODE_INO(sbi)) { inode->i_mapping->a_ops = &f2fs_node_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO); } else if (ino == F2FS_META_INO(sbi)) { inode->i_mapping->a_ops = &f2fs_meta_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO); } else if (S_ISREG(inode->i_mode)) { inode->i_op = &f2fs_file_inode_operations; inode->i_fop = &f2fs_file_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &f2fs_dir_inode_operations; inode->i_fop = &f2fs_dir_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER_MOVABLE | __GFP_ZERO); } else if (S_ISLNK(inode->i_mode)) { inode->i_op = &f2fs_symlink_inode_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { inode->i_op = &f2fs_special_inode_operations; init_special_inode(inode, inode->i_mode, inode->i_rdev); } else { ret = -EIO; goto bad_inode; } unlock_new_inode(inode); return inode; bad_inode: iget_failed(inode); return ERR_PTR(ret); } void update_inode(struct inode *inode, struct page *node_page) { struct f2fs_node *rn; struct f2fs_inode *ri; wait_on_page_writeback(node_page); rn = page_address(node_page); ri = &(rn->i); ri->i_mode = cpu_to_le16(inode->i_mode); ri->i_advise = F2FS_I(inode)->i_advise; ri->i_uid = cpu_to_le32(i_uid_read(inode)); ri->i_gid = cpu_to_le32(i_gid_read(inode)); ri->i_links = cpu_to_le32(inode->i_nlink); ri->i_size = cpu_to_le64(i_size_read(inode)); ri->i_blocks = cpu_to_le64(inode->i_blocks); set_raw_extent(&F2FS_I(inode)->ext, &ri->i_ext); ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec); ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec); ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec); ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); ri->i_current_depth = cpu_to_le32(F2FS_I(inode)->i_current_depth); ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid); ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags); ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino); ri->i_generation = cpu_to_le32(inode->i_generation); set_cold_node(inode, node_page); set_page_dirty(node_page); } int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc) { struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); struct page *node_page; bool need_lock = false; if (inode->i_ino == F2FS_NODE_INO(sbi) || inode->i_ino == F2FS_META_INO(sbi)) return 0; if (wbc) f2fs_balance_fs(sbi); node_page = get_node_page(sbi, inode->i_ino); if (IS_ERR(node_page)) return PTR_ERR(node_page); if (!PageDirty(node_page)) { need_lock = true; f2fs_put_page(node_page, 1); mutex_lock(&sbi->write_inode); node_page = get_node_page(sbi, inode->i_ino); if (IS_ERR(node_page)) { mutex_unlock(&sbi->write_inode); return PTR_ERR(node_page); } } update_inode(inode, node_page); f2fs_put_page(node_page, 1); if (need_lock) mutex_unlock(&sbi->write_inode); return 0; } /* * Called at the last iput() if i_nlink is zero */ void f2fs_evict_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); truncate_inode_pages(&inode->i_data, 0); if (inode->i_ino == F2FS_NODE_INO(sbi) || inode->i_ino == F2FS_META_INO(sbi)) goto no_delete; BUG_ON(atomic_read(&F2FS_I(inode)->dirty_dents)); remove_dirty_dir_inode(inode); if (inode->i_nlink || is_bad_inode(inode)) goto no_delete; set_inode_flag(F2FS_I(inode), FI_NO_ALLOC); i_size_write(inode, 0); if (F2FS_HAS_BLOCKS(inode)) f2fs_truncate(inode); remove_inode_page(inode); no_delete: clear_inode(inode); }
struct dentry *f2fs_get_parent(struct dentry *child) { struct qstr dotdot = {.len = 2, .name = ".."}; unsigned long ino = f2fs_inode_by_name(child->d_inode, &dotdot); if (!ino) return ERR_PTR(-ENOENT); return d_obtain_alias(f2fs_iget(child->d_inode->i_sb, ino)); } static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct inode *inode = NULL; struct f2fs_dir_entry *de; struct page *page; if (dentry->d_name.len > F2FS_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); de = f2fs_find_entry(dir, &dentry->d_name, &page); if (de) { nid_t ino = le32_to_cpu(de->ino); kunmap(page); f2fs_put_page(page, 0); inode = f2fs_iget(dir->i_sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); stat_inc_inline_inode(inode); } return d_splice_alias(inode, dentry); } static int f2fs_unlink(struct inode *dir, struct dentry *dentry) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode = dentry->d_inode; struct f2fs_dir_entry *de; struct page *page; int err = -ENOENT; trace_f2fs_unlink_enter(dir, dentry); f2fs_balance_fs(sbi); de = f2fs_find_entry(dir, &dentry->d_name, &page); if (!de) goto fail; f2fs_lock_op(sbi); err = acquire_orphan_inode(sbi); if (err) { f2fs_unlock_op(sbi); kunmap(page); f2fs_put_page(page, 0); goto fail; } f2fs_delete_entry(de, page, inode); f2fs_unlock_op(sbi); /* In order to evict this inode, we set it dirty */ mark_inode_dirty(inode); fail: trace_f2fs_unlink_exit(inode, err); return err; } static int f2fs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode; size_t symlen = strlen(symname) + 1; int err; f2fs_balance_fs(sbi); inode = f2fs_new_inode(dir, S_IFLNK | S_IRWXUGO); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = &f2fs_symlink_inode_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); f2fs_unlock_op(sbi); if (err) goto out; err = page_symlink(inode, symname, symlen); alloc_nid_done(sbi, inode->i_ino); d_instantiate(dentry, inode); unlock_new_inode(inode); return err; out: clear_nlink(inode); iget_failed(inode); alloc_nid_failed(sbi, inode->i_ino); return err; } static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode; int err; f2fs_balance_fs(sbi); inode = f2fs_new_inode(dir, S_IFDIR | mode); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = &f2fs_dir_inode_operations; inode->i_fop = &f2fs_dir_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO); set_inode_flag(F2FS_I(inode), FI_INC_LINK); f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); f2fs_unlock_op(sbi); if (err) goto out_fail; alloc_nid_done(sbi, inode->i_ino); d_instantiate(dentry, inode); unlock_new_inode(inode); return 0; out_fail: clear_inode_flag(F2FS_I(inode), FI_INC_LINK); clear_nlink(inode); iget_failed(inode); alloc_nid_failed(sbi, inode->i_ino); return err; } static int f2fs_rmdir(struct inode *dir, struct dentry *dentry) { struct inode *inode = dentry->d_inode; if (f2fs_empty_dir(inode)) return f2fs_unlink(dir, dentry); return -ENOTEMPTY; } static int f2fs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode; int err = 0; if (!new_valid_dev(rdev)) return -EINVAL; f2fs_balance_fs(sbi); inode = f2fs_new_inode(dir, mode); if (IS_ERR(inode)) return PTR_ERR(inode); init_special_inode(inode, inode->i_mode, rdev); inode->i_op = &f2fs_special_inode_operations; f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); f2fs_unlock_op(sbi); if (err) goto out; alloc_nid_done(sbi, inode->i_ino); d_instantiate(dentry, inode); unlock_new_inode(inode); return 0; out: clear_nlink(inode); iget_failed(inode); alloc_nid_failed(sbi, inode->i_ino); return err; } static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct f2fs_sb_info *sbi = F2FS_I_SB(old_dir); struct inode *old_inode = old_dentry->d_inode; struct inode *new_inode = new_dentry->d_inode; struct page *old_dir_page; struct page *old_page, *new_page; struct f2fs_dir_entry *old_dir_entry = NULL; struct f2fs_dir_entry *old_entry; struct f2fs_dir_entry *new_entry; int err = -ENOENT; f2fs_balance_fs(sbi); old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page); if (!old_entry) goto out; if (S_ISDIR(old_inode->i_mode)) { err = -EIO; old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_page); if (!old_dir_entry) goto out_old; } if (new_inode) { err = -ENOTEMPTY; if (old_dir_entry && !f2fs_empty_dir(new_inode)) goto out_dir; err = -ENOENT; new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_page); if (!new_entry) goto out_dir; f2fs_lock_op(sbi); err = acquire_orphan_inode(sbi); if (err) goto put_out_dir; if (update_dent_inode(old_inode, &new_dentry->d_name)) { release_orphan_inode(sbi); goto put_out_dir; } f2fs_set_link(new_dir, new_entry, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME; down_write(&F2FS_I(new_inode)->i_sem); if (old_dir_entry) drop_nlink(new_inode); drop_nlink(new_inode); up_write(&F2FS_I(new_inode)->i_sem); mark_inode_dirty(new_inode); if (!new_inode->i_nlink) add_orphan_inode(sbi, new_inode->i_ino); else release_orphan_inode(sbi); update_inode_page(old_inode); update_inode_page(new_inode); } else { f2fs_lock_op(sbi); err = f2fs_add_link(new_dentry, old_inode); if (err) { f2fs_unlock_op(sbi); goto out_dir; } if (old_dir_entry) { inc_nlink(new_dir); update_inode_page(new_dir); } } down_write(&F2FS_I(old_inode)->i_sem); file_lost_pino(old_inode); up_write(&F2FS_I(old_inode)->i_sem); old_inode->i_ctime = CURRENT_TIME; mark_inode_dirty(old_inode); f2fs_delete_entry(old_entry, old_page, NULL); if (old_dir_entry) { if (old_dir != new_dir) { f2fs_set_link(old_inode, old_dir_entry, old_dir_page, new_dir); update_inode_page(old_inode); } else { kunmap(old_dir_page); f2fs_put_page(old_dir_page, 0); } drop_nlink(old_dir); mark_inode_dirty(old_dir); update_inode_page(old_dir); } f2fs_unlock_op(sbi); return 0; put_out_dir: f2fs_unlock_op(sbi); kunmap(new_page); f2fs_put_page(new_page, 0); out_dir: if (old_dir_entry) { kunmap(old_dir_page); f2fs_put_page(old_dir_page, 0); } out_old: kunmap(old_page); f2fs_put_page(old_page, 0); out: return err; } const struct inode_operations f2fs_dir_inode_operations = { .create = f2fs_create, .lookup = f2fs_lookup, .link = f2fs_link, .unlink = f2fs_unlink, .symlink = f2fs_symlink, .mkdir = f2fs_mkdir, .rmdir = f2fs_rmdir, .mknod = f2fs_mknod, .rename = f2fs_rename, .getattr = f2fs_getattr, .setattr = f2fs_setattr, .get_acl = f2fs_get_acl, #ifdef CONFIG_F2FS_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = f2fs_listxattr, .removexattr = generic_removexattr, #endif }; const struct inode_operations f2fs_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, .getattr = f2fs_getattr, .setattr = f2fs_setattr, #ifdef CONFIG_F2FS_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = f2fs_listxattr, .removexattr = generic_removexattr, #endif }; const struct inode_operations f2fs_special_inode_operations = { .getattr = f2fs_getattr, .setattr = f2fs_setattr, .get_acl = f2fs_get_acl, #ifdef CONFIG_F2FS_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = f2fs_listxattr, .removexattr = generic_removexattr, #endif };
struct dentry *f2fs_get_parent(struct dentry *child) { struct qstr dotdot = {.len = 2, .name = ".."}; unsigned long ino = f2fs_inode_by_name(child->d_inode, &dotdot); if (!ino) return ERR_PTR(-ENOENT); return d_obtain_alias(f2fs_iget(child->d_inode->i_sb, ino)); } static int __recover_dot_dentries(struct inode *dir, nid_t pino) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct qstr dot = {.len = 1, .name = "."}; struct qstr dotdot = {.len = 2, .name = ".."}; struct f2fs_dir_entry *de; struct page *page; int err = 0; f2fs_lock_op(sbi); de = f2fs_find_entry(dir, &dot, &page, 0); if (de) { f2fs_dentry_kunmap(dir, page); f2fs_put_page(page, 0); } else { err = __f2fs_add_link(dir, &dot, NULL, dir->i_ino, S_IFDIR); if (err) goto out; } de = f2fs_find_entry(dir, &dotdot, &page, 0); if (de) { f2fs_dentry_kunmap(dir, page); f2fs_put_page(page, 0); } else { err = __f2fs_add_link(dir, &dotdot, NULL, pino, S_IFDIR); } out: if (!err) { clear_inode_flag(F2FS_I(dir), FI_INLINE_DOTS); mark_inode_dirty(dir); } f2fs_unlock_op(sbi); return err; } static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct inode *inode = NULL; struct f2fs_dir_entry *de; struct page *page; nid_t ino; int err = 0; if (dentry->d_name.len > F2FS_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); de = f2fs_find_entry(dir, &dentry->d_name, &page, nd ? nd->flags : 0); if (!de) return d_splice_alias(inode, dentry); ino = le32_to_cpu(de->ino); f2fs_dentry_kunmap(dir, page); f2fs_put_page(page, 0); inode = f2fs_iget(dir->i_sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); if (f2fs_has_inline_dots(inode)) { err = __recover_dot_dentries(inode, dir->i_ino); if (err) goto err_out; } return d_splice_alias(inode, dentry); err_out: iget_failed(inode); return ERR_PTR(err); } static int f2fs_unlink(struct inode *dir, struct dentry *dentry) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode = dentry->d_inode; struct f2fs_dir_entry *de; struct page *page; int err = -ENOENT; trace_f2fs_unlink_enter(dir, dentry); f2fs_balance_fs(sbi); de = f2fs_find_entry(dir, &dentry->d_name, &page, 0); if (!de) goto fail; f2fs_lock_op(sbi); err = acquire_orphan_inode(sbi); if (err) { f2fs_unlock_op(sbi); f2fs_dentry_kunmap(dir, page); f2fs_put_page(page, 0); goto fail; } f2fs_delete_entry(de, page, dir, inode); f2fs_unlock_op(sbi); /* In order to evict this inode, we set it dirty */ mark_inode_dirty(inode); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); fail: trace_f2fs_unlink_exit(inode, err); return err; } static void *f2fs_follow_link(struct dentry *dentry, struct nameidata *nd) { struct page *page; page = page_follow_link_light(dentry, nd); if (IS_ERR(page)) return page; /* this is broken symlink case */ if (*nd_get_link(nd) == 0) { kunmap(page); page_cache_release(page); return ERR_PTR(-ENOENT); } return page; } static int f2fs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode; size_t len = strlen(symname); size_t p_len; char *p_str; struct f2fs_str disk_link = FSTR_INIT(NULL, 0); struct f2fs_encrypted_symlink_data *sd = NULL; int err; if (len > dir->i_sb->s_blocksize) return -ENAMETOOLONG; f2fs_balance_fs(sbi); inode = f2fs_new_inode(dir, S_IFLNK | S_IRWXUGO); if (IS_ERR(inode)) return PTR_ERR(inode); if (f2fs_encrypted_inode(inode)) inode->i_op = &f2fs_encrypted_symlink_inode_operations; else inode->i_op = &f2fs_symlink_inode_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); if (err) goto out; f2fs_unlock_op(sbi); alloc_nid_done(sbi, inode->i_ino); if (f2fs_encrypted_inode(dir)) { struct qstr istr = QSTR_INIT(symname, len); err = f2fs_get_encryption_info(inode); if (err) goto err_out; err = f2fs_fname_crypto_alloc_buffer(inode, len, &disk_link); if (err) goto err_out; err = f2fs_fname_usr_to_disk(inode, &istr, &disk_link); if (err < 0) goto err_out; p_len = encrypted_symlink_data_len(disk_link.len) + 1; if (p_len > dir->i_sb->s_blocksize) { err = -ENAMETOOLONG; goto err_out; } sd = kzalloc(p_len, GFP_NOFS); if (!sd) { err = -ENOMEM; goto err_out; } memcpy(sd->encrypted_path, disk_link.name, disk_link.len); sd->len = cpu_to_le16(disk_link.len); p_str = (char *)sd; } else { p_len = len + 1; p_str = (char *)symname; } err = page_symlink(inode, p_str, p_len); err_out: d_instantiate(dentry, inode); unlock_new_inode(inode); /* * Let's flush symlink data in order to avoid broken symlink as much as * possible. Nevertheless, fsyncing is the best way, but there is no * way to get a file descriptor in order to flush that. * * Note that, it needs to do dir->fsync to make this recoverable. * If the symlink path is stored into inline_data, there is no * performance regression. */ if (!err) filemap_write_and_wait_range(inode->i_mapping, 0, p_len - 1); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); kfree(sd); f2fs_fname_crypto_free_buffer(&disk_link); return err; out: handle_failed_inode(inode); return err; } static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, int mode) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode; int err; f2fs_balance_fs(sbi); inode = f2fs_new_inode(dir, S_IFDIR | mode); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = &f2fs_dir_inode_operations; inode->i_fop = &f2fs_dir_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_HIGH_ZERO); set_inode_flag(F2FS_I(inode), FI_INC_LINK); f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); if (err) goto out_fail; f2fs_unlock_op(sbi); alloc_nid_done(sbi, inode->i_ino); d_instantiate(dentry, inode); unlock_new_inode(inode); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); return 0; out_fail: clear_inode_flag(F2FS_I(inode), FI_INC_LINK); handle_failed_inode(inode); return err; } static int f2fs_rmdir(struct inode *dir, struct dentry *dentry) { struct inode *inode = dentry->d_inode; if (f2fs_empty_dir(inode)) return f2fs_unlink(dir, dentry); return -ENOTEMPTY; } static int f2fs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode; int err = 0; if (!new_valid_dev(rdev)) return -EINVAL; f2fs_balance_fs(sbi); inode = f2fs_new_inode(dir, mode); if (IS_ERR(inode)) return PTR_ERR(inode); init_special_inode(inode, inode->i_mode, rdev); inode->i_op = &f2fs_special_inode_operations; f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); if (err) goto out; f2fs_unlock_op(sbi); alloc_nid_done(sbi, inode->i_ino); d_instantiate(dentry, inode); unlock_new_inode(inode); if (IS_DIRSYNC(dir)) f2fs_sync_fs(sbi->sb, 1); return 0; out: handle_failed_inode(inode); return err; } static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct f2fs_sb_info *sbi = F2FS_I_SB(old_dir); struct inode *old_inode = old_dentry->d_inode; struct inode *new_inode = new_dentry->d_inode; struct page *old_dir_page; struct page *old_page, *new_page; struct f2fs_dir_entry *old_dir_entry = NULL; struct f2fs_dir_entry *old_entry; struct f2fs_dir_entry *new_entry; int err = -ENOENT; if ((old_dir != new_dir) && f2fs_encrypted_inode(new_dir) && !f2fs_is_child_context_consistent_with_parent(new_dir, old_inode)) { err = -EPERM; goto out; } f2fs_balance_fs(sbi); old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page, 0); if (!old_entry) goto out; if (S_ISDIR(old_inode->i_mode)) { err = -EIO; old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_page); if (!old_dir_entry) goto out_old; } if (new_inode) { err = -ENOTEMPTY; if (old_dir_entry && !f2fs_empty_dir(new_inode)) goto out_dir; err = -ENOENT; new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_page, 0); if (!new_entry) goto out_dir; f2fs_lock_op(sbi); err = acquire_orphan_inode(sbi); if (err) goto put_out_dir; if (update_dent_inode(old_inode, new_inode, &new_dentry->d_name)) { release_orphan_inode(sbi); goto put_out_dir; } f2fs_set_link(new_dir, new_entry, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME; down_write(&F2FS_I(new_inode)->i_sem); if (old_dir_entry) drop_nlink(new_inode); drop_nlink(new_inode); up_write(&F2FS_I(new_inode)->i_sem); mark_inode_dirty(new_inode); if (!new_inode->i_nlink) add_orphan_inode(sbi, new_inode->i_ino); else release_orphan_inode(sbi); update_inode_page(old_inode); update_inode_page(new_inode); } else { f2fs_lock_op(sbi); err = f2fs_add_link(new_dentry, old_inode); if (err) { f2fs_unlock_op(sbi); goto out_dir; } if (old_dir_entry) { inc_nlink(new_dir); update_inode_page(new_dir); } } down_write(&F2FS_I(old_inode)->i_sem); file_lost_pino(old_inode); if (new_inode && file_enc_name(new_inode)) file_set_enc_name(old_inode); up_write(&F2FS_I(old_inode)->i_sem); old_inode->i_ctime = CURRENT_TIME; mark_inode_dirty(old_inode); f2fs_delete_entry(old_entry, old_page, old_dir, NULL); if (old_dir_entry) { if (old_dir != new_dir) { f2fs_set_link(old_inode, old_dir_entry, old_dir_page, new_dir); update_inode_page(old_inode); } else { f2fs_dentry_kunmap(old_inode, old_dir_page); f2fs_put_page(old_dir_page, 0); } drop_nlink(old_dir); mark_inode_dirty(old_dir); update_inode_page(old_dir); } f2fs_unlock_op(sbi); if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir)) f2fs_sync_fs(sbi->sb, 1); return 0; put_out_dir: f2fs_unlock_op(sbi); f2fs_dentry_kunmap(new_dir, new_page); f2fs_put_page(new_page, 0); out_dir: if (old_dir_entry) { f2fs_dentry_kunmap(old_inode, old_dir_page); f2fs_put_page(old_dir_page, 0); } out_old: f2fs_dentry_kunmap(old_dir, old_page); f2fs_put_page(old_page, 0); out: return err; } #ifdef CONFIG_F2FS_FS_ENCRYPTION static void *f2fs_encrypted_follow_link(struct dentry *dentry, struct nameidata *nd) { struct page *cpage = NULL; char *caddr, *paddr = NULL; struct f2fs_str cstr; struct f2fs_str pstr = FSTR_INIT(NULL, 0); struct inode *inode = dentry->d_inode; struct f2fs_encrypted_symlink_data *sd; loff_t size = min_t(loff_t, i_size_read(inode), PAGE_SIZE - 1); u32 max_size = inode->i_sb->s_blocksize; int res; res = f2fs_get_encryption_info(inode); if (res) return ERR_PTR(res); cpage = read_mapping_page(inode->i_mapping, 0, NULL); if (IS_ERR(cpage)) return cpage; caddr = kmap(cpage); caddr[size] = 0; /* Symlink is encrypted */ sd = (struct f2fs_encrypted_symlink_data *)caddr; cstr.name = sd->encrypted_path; cstr.len = le16_to_cpu(sd->len); /* this is broken symlink case */ if (cstr.name[0] == 0 && cstr.len == 0) { res = -ENOENT; goto errout; } if ((cstr.len + sizeof(struct f2fs_encrypted_symlink_data) - 1) > max_size) { /* Symlink data on the disk is corrupted */ res = -EIO; goto errout; } res = f2fs_fname_crypto_alloc_buffer(inode, cstr.len, &pstr); if (res) goto errout; res = f2fs_fname_disk_to_usr(inode, NULL, &cstr, &pstr); if (res < 0) goto errout; paddr = pstr.name; /* Null-terminate the name */ paddr[res] = '\0'; nd_set_link(nd, paddr); kunmap(cpage); page_cache_release(cpage); return NULL; errout: f2fs_fname_crypto_free_buffer(&pstr); kunmap(cpage); page_cache_release(cpage); return ERR_PTR(res); } void kfree_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) { char *s = nd_get_link(nd); if (!IS_ERR(s)) kfree(s); } const struct inode_operations f2fs_encrypted_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = f2fs_encrypted_follow_link, .put_link = kfree_put_link, .getattr = f2fs_getattr, .setattr = f2fs_setattr, .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = f2fs_listxattr, .removexattr = generic_removexattr, }; #endif const struct inode_operations f2fs_dir_inode_operations = { .create = f2fs_create, .lookup = f2fs_lookup, .link = f2fs_link, .unlink = f2fs_unlink, .symlink = f2fs_symlink, .mkdir = f2fs_mkdir, .rmdir = f2fs_rmdir, .mknod = f2fs_mknod, .rename = f2fs_rename, .getattr = f2fs_getattr, .setattr = f2fs_setattr, .check_acl = f2fs_check_acl, #ifdef CONFIG_F2FS_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = f2fs_listxattr, .removexattr = generic_removexattr, #endif }; const struct inode_operations f2fs_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = f2fs_follow_link, .put_link = page_put_link, .getattr = f2fs_getattr, .setattr = f2fs_setattr, #ifdef CONFIG_F2FS_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = f2fs_listxattr, .removexattr = generic_removexattr, #endif }; const struct inode_operations f2fs_special_inode_operations = { .getattr = f2fs_getattr, .setattr = f2fs_setattr, .check_acl = f2fs_check_acl, #ifdef CONFIG_F2FS_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = f2fs_listxattr, .removexattr = generic_removexattr, #endif };