/* * There are two policies for allocating an inode. If the new inode is * a directory, then a forward search is made for a block group with both * free space and a low directory-to-inode ratio; if that fails, then of * the groups with above-average free space, that group with the fewest * directories already is chosen. * * For other inodes, search forward from the parent directory's block * group to find a free inode. */ struct inode *ufs_new_inode(struct inode *dir, umode_t mode) { struct super_block * sb; struct ufs_sb_info * sbi; struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct ufs_cg_private_info * ucpi; struct ufs_cylinder_group * ucg; struct inode * inode; unsigned cg, bit, i, j, start; struct ufs_inode_info *ufsi; int err = -ENOSPC; UFSD("ENTER\n"); /* Cannot create files in a deleted directory */ if (!dir || !dir->i_nlink) return ERR_PTR(-EPERM); sb = dir->i_sb; inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM); ufsi = UFS_I(inode); sbi = UFS_SB(sb); uspi = sbi->s_uspi; usb1 = ubh_get_usb_first(uspi); lock_super (sb); /* * Try to place the inode in its parent directory */ i = ufs_inotocg(dir->i_ino); if (sbi->fs_cs(i).cs_nifree) { cg = i; goto cg_found; } /* * Use a quadratic hash to find a group with a free inode */ for ( j = 1; j < uspi->s_ncg; j <<= 1 ) { i += j; if (i >= uspi->s_ncg) i -= uspi->s_ncg; if (sbi->fs_cs(i).cs_nifree) { cg = i; goto cg_found; } } /* * That failed: try linear search for a free inode */ i = ufs_inotocg(dir->i_ino) + 1; for (j = 2; j < uspi->s_ncg; j++) { i++; if (i >= uspi->s_ncg) i = 0; if (sbi->fs_cs(i).cs_nifree) { cg = i; goto cg_found; } } goto failed; cg_found: ucpi = ufs_load_cylinder (sb, cg); if (!ucpi) { err = -EIO; goto failed; } ucg = ubh_get_ucg(UCPI_UBH(ucpi)); if (!ufs_cg_chkmagic(sb, ucg)) ufs_panic (sb, "ufs_new_inode", "internal error, bad cg magic number"); start = ucpi->c_irotor; bit = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_iusedoff, uspi->s_ipg, start); if (!(bit < uspi->s_ipg)) { bit = ubh_find_first_zero_bit (UCPI_UBH(ucpi), ucpi->c_iusedoff, start); if (!(bit < start)) { ufs_error (sb, "ufs_new_inode", "cylinder group %u corrupted - error in inode bitmap\n", cg); err = -EIO; goto failed; } } UFSD("start = %u, bit = %u, ipg = %u\n", start, bit, uspi->s_ipg); if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit)) ubh_setbit (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit); else { ufs_panic (sb, "ufs_new_inode", "internal error"); err = -EIO; goto failed; } if (uspi->fs_magic == UFS2_MAGIC) { u32 initediblk = fs32_to_cpu(sb, ucg->cg_u.cg_u2.cg_initediblk); if (bit + uspi->s_inopb > initediblk && initediblk < fs32_to_cpu(sb, ucg->cg_u.cg_u2.cg_niblk)) ufs2_init_inodes_chunk(sb, ucpi, ucg); } fs32_sub(sb, &ucg->cg_cs.cs_nifree, 1); uspi->cs_total.cs_nifree--; fs32_sub(sb, &sbi->fs_cs(cg).cs_nifree, 1); if (S_ISDIR(mode)) { fs32_add(sb, &ucg->cg_cs.cs_ndir, 1); uspi->cs_total.cs_ndir++; fs32_add(sb, &sbi->fs_cs(cg).cs_ndir, 1); } ubh_mark_buffer_dirty (USPI_UBH(uspi)); ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); if (sb->s_flags & MS_SYNCHRONOUS) ubh_sync_block(UCPI_UBH(ucpi)); sb->s_dirt = 1; inode->i_ino = cg * uspi->s_ipg + bit; inode_init_owner(inode, dir, mode); inode->i_blocks = 0; inode->i_generation = 0; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; ufsi->i_flags = UFS_I(dir)->i_flags; ufsi->i_lastfrag = 0; ufsi->i_shadow = 0; ufsi->i_osync = 0; ufsi->i_oeftflag = 0; ufsi->i_dir_start_lookup = 0; memset(&ufsi->i_u1, 0, sizeof(ufsi->i_u1)); insert_inode_hash(inode); mark_inode_dirty(inode); if (uspi->fs_magic == UFS2_MAGIC) { struct buffer_head *bh; struct ufs2_inode *ufs2_inode; /* * setup birth date, we do it here because of there is no sense * to hold it in struct ufs_inode_info, and lose 64 bit */ bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino)); if (!bh) { ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino); err = -EIO; goto fail_remove_inode; } lock_buffer(bh); ufs2_inode = (struct ufs2_inode *)bh->b_data; ufs2_inode += ufs_inotofsbo(inode->i_ino); ufs2_inode->ui_birthtime = cpu_to_fs64(sb, CURRENT_TIME.tv_sec); ufs2_inode->ui_birthnsec = cpu_to_fs32(sb, CURRENT_TIME.tv_nsec); mark_buffer_dirty(bh); unlock_buffer(bh); if (sb->s_flags & MS_SYNCHRONOUS) sync_dirty_buffer(bh); brelse(bh); } unlock_super (sb); UFSD("allocating inode %lu\n", inode->i_ino); UFSD("EXIT\n"); return inode; fail_remove_inode: unlock_super(sb); clear_nlink(inode); iput(inode); UFSD("EXIT (FAILED): err %d\n", err); return ERR_PTR(err); failed: unlock_super (sb); make_bad_inode(inode); iput (inode); UFSD("EXIT (FAILED): err %d\n", err); return ERR_PTR(err); }
int v9fs_init_inode(struct v9fs_session_info *v9ses, struct inode *inode, int mode) { int err = 0; inode_init_owner(inode, NULL, mode); inode->i_blocks = 0; inode->i_rdev = 0; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; inode->i_mapping->a_ops = &v9fs_addr_operations; switch (mode & S_IFMT) { case S_IFIFO: case S_IFBLK: case S_IFCHR: case S_IFSOCK: if (v9fs_proto_dotl(v9ses)) { inode->i_op = &v9fs_file_inode_operations_dotl; inode->i_fop = &v9fs_file_operations_dotl; } else if (v9fs_proto_dotu(v9ses)) { inode->i_op = &v9fs_file_inode_operations; inode->i_fop = &v9fs_file_operations; } else { P9_DPRINTK(P9_DEBUG_ERROR, "special files without extended mode\n"); err = -EINVAL; goto error; } init_special_inode(inode, inode->i_mode, inode->i_rdev); break; case S_IFREG: if (v9fs_proto_dotl(v9ses)) { inode->i_op = &v9fs_file_inode_operations_dotl; if (v9ses->cache) inode->i_fop = &v9fs_cached_file_operations_dotl; else inode->i_fop = &v9fs_file_operations_dotl; } else { inode->i_op = &v9fs_file_inode_operations; if (v9ses->cache) inode->i_fop = &v9fs_cached_file_operations; else inode->i_fop = &v9fs_file_operations; } break; case S_IFLNK: if (!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses)) { P9_DPRINTK(P9_DEBUG_ERROR, "extended modes used with " "legacy protocol.\n"); err = -EINVAL; goto error; } if (v9fs_proto_dotl(v9ses)) inode->i_op = &v9fs_symlink_inode_operations_dotl; else inode->i_op = &v9fs_symlink_inode_operations; break; case S_IFDIR: inc_nlink(inode); if (v9fs_proto_dotl(v9ses)) inode->i_op = &v9fs_dir_inode_operations_dotl; else if (v9fs_proto_dotu(v9ses)) inode->i_op = &v9fs_dir_inode_operations_dotu; else inode->i_op = &v9fs_dir_inode_operations; if (v9fs_proto_dotl(v9ses)) inode->i_fop = &v9fs_dir_operations_dotl; else inode->i_fop = &v9fs_dir_operations; break; default: P9_DPRINTK(P9_DEBUG_ERROR, "BAD mode 0x%x S_IFMT 0x%x\n", mode, mode & S_IFMT); err = -EINVAL; goto error; } error: return err; }
struct inode *ubifs_new_inode(struct ubifs_info *c, const struct inode *dir, int mode) { struct inode *inode; struct ubifs_inode *ui; inode = new_inode(c->vfs_sb); ui = ubifs_inode(inode); if (!inode) return ERR_PTR(-ENOMEM); /* * Set 'S_NOCMTIME' to prevent VFS form updating [mc]time of inodes and * marking them dirty in file write path (see 'file_update_time()'). * UBIFS has to fully control "clean <-> dirty" transitions of inodes * to make budgeting work. */ inode->i_flags |= (S_NOCMTIME); inode_init_owner(inode, dir, mode); inode->i_mtime = inode->i_atime = inode->i_ctime = ubifs_current_time(inode); inode->i_mapping->nrpages = 0; /* Disable readahead */ inode->i_mapping->backing_dev_info = &c->bdi; switch (mode & S_IFMT) { case S_IFREG: inode->i_mapping->a_ops = &ubifs_file_address_operations; inode->i_op = &ubifs_file_inode_operations; inode->i_fop = &ubifs_file_operations; break; case S_IFDIR: inode->i_op = &ubifs_dir_inode_operations; inode->i_fop = &ubifs_dir_operations; inode->i_size = ui->ui_size = UBIFS_INO_NODE_SZ; break; case S_IFLNK: inode->i_op = &ubifs_symlink_inode_operations; break; case S_IFSOCK: case S_IFIFO: case S_IFBLK: case S_IFCHR: inode->i_op = &ubifs_file_inode_operations; break; default: BUG(); } ui->flags = inherit_flags(dir, mode); ubifs_set_inode_flags(inode); if (S_ISREG(mode)) ui->compr_type = c->default_compr; else ui->compr_type = UBIFS_COMPR_NONE; ui->synced_i_size = 0; spin_lock(&c->cnt_lock); /* Inode number overflow is currently not supported */ if (c->highest_inum >= INUM_WARN_WATERMARK) { if (c->highest_inum >= INUM_WATERMARK) { spin_unlock(&c->cnt_lock); ubifs_err("out of inode numbers"); make_bad_inode(inode); iput(inode); return ERR_PTR(-EINVAL); } ubifs_warn("running out of inode numbers (current %lu, max %d)", (unsigned long)c->highest_inum, INUM_WATERMARK); } inode->i_ino = ++c->highest_inum; /* * The creation sequence number remains with this inode for its * lifetime. All nodes for this inode have a greater sequence number, * and so it is possible to distinguish obsolete nodes belonging to a * previous incarnation of the same inode number - for example, for the * purpose of rebuilding the index. */ ui->creat_sqnum = ++c->max_sqnum; spin_unlock(&c->cnt_lock); return inode; }
struct inode *nilfs_new_inode(struct inode *dir, umode_t mode) { struct super_block *sb = dir->i_sb; struct the_nilfs *nilfs = sb->s_fs_info; struct inode *inode; struct nilfs_inode_info *ii; struct nilfs_root *root; int err = -ENOMEM; ino_t ino; inode = new_inode(sb); if (unlikely(!inode)) goto failed; mapping_set_gfp_mask(inode->i_mapping, mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); root = NILFS_I(dir)->i_root; ii = NILFS_I(inode); ii->i_state = 1 << NILFS_I_NEW; ii->i_root = root; err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh); if (unlikely(err)) goto failed_ifile_create_inode; /* reference count of i_bh inherits from nilfs_mdt_read_block() */ atomic_inc(&root->inodes_count); inode_init_owner(inode, dir, mode); inode->i_ino = ino; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) { err = nilfs_bmap_read(ii->i_bmap, NULL); if (err < 0) goto failed_bmap; set_bit(NILFS_I_BMAP, &ii->i_state); /* No lock is needed; iget() ensures it. */ } ii->i_flags = nilfs_mask_flags( mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED); /* ii->i_file_acl = 0; */ /* ii->i_dir_acl = 0; */ ii->i_dir_start_lookup = 0; nilfs_set_inode_flags(inode); spin_lock(&nilfs->ns_next_gen_lock); inode->i_generation = nilfs->ns_next_generation++; spin_unlock(&nilfs->ns_next_gen_lock); insert_inode_hash(inode); err = nilfs_init_acl(inode, dir); if (unlikely(err)) goto failed_acl; /* never occur. When supporting nilfs_init_acl(), proper cancellation of above jobs should be considered */ return inode; failed_acl: failed_bmap: clear_nlink(inode); iput(inode); /* raw_inode will be deleted through generic_delete_inode() */ goto failed; failed_ifile_create_inode: make_bad_inode(inode); iput(inode); /* if i_nlink == 1, generic_forget_inode() will be called */ failed: return ERR_PTR(err); }
/* * There are two policies for allocating an inode. If the new inode is * a directory, then a forward search is made for a block group with both * free space and a low directory-to-inode ratio; if that fails, then of * the groups with above-average free space, that group with the fewest * directories already is chosen. * * For other inodes, search forward from the parent directory's block * group to find a free inode. */ struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, int mode) { struct super_block *sb; struct buffer_head *bitmap_bh = NULL; struct buffer_head *bh2; int group; unsigned long ino = 0; struct inode * inode; struct ext3_group_desc * gdp = NULL; struct ext3_super_block * es; struct ext3_inode_info *ei; struct ext3_sb_info *sbi; int err = 0; struct inode *ret; int i; /* Cannot create files in a deleted directory */ if (!dir || !dir->i_nlink) return ERR_PTR(-EPERM); sb = dir->i_sb; inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM); ei = EXT3_I(inode); sbi = EXT3_SB(sb); es = sbi->s_es; if (S_ISDIR(mode)) { if (test_opt (sb, OLDALLOC)) group = find_group_dir(sb, dir); else group = find_group_orlov(sb, dir); } else group = find_group_other(sb, dir); err = -ENOSPC; if (group == -1) goto out; for (i = 0; i < sbi->s_groups_count; i++) { err = -EIO; gdp = ext3_get_group_desc(sb, group, &bh2); if (!gdp) goto fail; brelse(bitmap_bh); bitmap_bh = read_inode_bitmap(sb, group); if (!bitmap_bh) goto fail; ino = 0; repeat_in_this_group: ino = ext3_find_next_zero_bit((unsigned long *) bitmap_bh->b_data, EXT3_INODES_PER_GROUP(sb), ino); if (ino < EXT3_INODES_PER_GROUP(sb)) { BUFFER_TRACE(bitmap_bh, "get_write_access"); err = ext3_journal_get_write_access(handle, bitmap_bh); if (err) goto fail; if (!ext3_set_bit_atomic(sb_bgl_lock(sbi, group), ino, bitmap_bh->b_data)) { /* we won it */ BUFFER_TRACE(bitmap_bh, "call ext3_journal_dirty_metadata"); err = ext3_journal_dirty_metadata(handle, bitmap_bh); if (err) goto fail; goto got; } /* we lost it */ journal_release_buffer(handle, bitmap_bh); if (++ino < EXT3_INODES_PER_GROUP(sb)) goto repeat_in_this_group; } /* * This case is possible in concurrent environment. It is very * rare. We cannot repeat the find_group_xxx() call because * that will simply return the same blockgroup, because the * group descriptor metadata has not yet been updated. * So we just go onto the next blockgroup. */ if (++group == sbi->s_groups_count) group = 0; } err = -ENOSPC; goto out; got: ino += group * EXT3_INODES_PER_GROUP(sb) + 1; if (ino < EXT3_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) { ext3_error (sb, "ext3_new_inode", "reserved inode or inode > inodes count - " "block_group = %d, inode=%lu", group, ino); err = -EIO; goto fail; } BUFFER_TRACE(bh2, "get_write_access"); err = ext3_journal_get_write_access(handle, bh2); if (err) goto fail; spin_lock(sb_bgl_lock(sbi, group)); le16_add_cpu(&gdp->bg_free_inodes_count, -1); if (S_ISDIR(mode)) { le16_add_cpu(&gdp->bg_used_dirs_count, 1); } spin_unlock(sb_bgl_lock(sbi, group)); BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata"); err = ext3_journal_dirty_metadata(handle, bh2); if (err) goto fail; percpu_counter_dec(&sbi->s_freeinodes_counter); if (S_ISDIR(mode)) percpu_counter_inc(&sbi->s_dirs_counter); if (test_opt(sb, GRPID)) { inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = dir->i_gid; } else inode_init_owner(inode, dir, mode); inode->i_ino = ino; /* This is the optimal IO size (for stat), not the fs block size */ inode->i_blocks = 0; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; memset(ei->i_data, 0, sizeof(ei->i_data)); ei->i_dir_start_lookup = 0; ei->i_disksize = 0; ei->i_flags = ext3_mask_flags(mode, EXT3_I(dir)->i_flags & EXT3_FL_INHERITED); #ifdef EXT3_FRAGMENTS ei->i_faddr = 0; ei->i_frag_no = 0; ei->i_frag_size = 0; #endif ei->i_file_acl = 0; ei->i_dir_acl = 0; ei->i_dtime = 0; ei->i_block_alloc_info = NULL; ei->i_block_group = group; ext3_set_inode_flags(inode); if (IS_DIRSYNC(inode)) handle->h_sync = 1; if (insert_inode_locked(inode) < 0) { err = -EINVAL; goto fail_drop; } spin_lock(&sbi->s_next_gen_lock); inode->i_generation = sbi->s_next_generation++; spin_unlock(&sbi->s_next_gen_lock); ei->i_state_flags = 0; ext3_set_inode_state(inode, EXT3_STATE_NEW); ei->i_extra_isize = (EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) ? sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0; ret = inode; dquot_initialize(inode); err = dquot_alloc_inode(inode); if (err) goto fail_drop; err = ext3_init_acl(handle, inode, dir); if (err) goto fail_free_drop; err = ext3_init_security(handle,inode, dir); if (err) goto fail_free_drop; err = ext3_mark_inode_dirty(handle, inode); if (err) { ext3_std_error(sb, err); goto fail_free_drop; } ext3_debug("allocating inode %lu\n", inode->i_ino); goto really_out; fail: ext3_std_error(sb, err); out: iput(inode); ret = ERR_PTR(err); really_out: brelse(bitmap_bh); return ret; fail_free_drop: dquot_free_inode(inode); fail_drop: dquot_drop(inode); inode->i_flags |= S_NOQUOTA; inode->i_nlink = 0; unlock_new_inode(inode); iput(inode); brelse(bitmap_bh); return ERR_PTR(err); }
static int nilfs_insert_inode_locked(struct inode *inode, struct nilfs_root *root, unsigned long ino) { struct nilfs_iget_args args = { .ino = ino, .root = root, .cno = 0, .for_gc = 0 }; return insert_inode_locked4(inode, ino, nilfs_iget_test, &args); } struct inode *nilfs_new_inode(struct inode *dir, umode_t mode) { struct super_block *sb = dir->i_sb; struct the_nilfs *nilfs = sb->s_fs_info; struct inode *inode; struct nilfs_inode_info *ii; struct nilfs_root *root; int err = -ENOMEM; ino_t ino; inode = new_inode(sb); if (unlikely(!inode)) goto failed; mapping_set_gfp_mask(inode->i_mapping, mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS)); root = NILFS_I(dir)->i_root; ii = NILFS_I(inode); ii->i_state = 1 << NILFS_I_NEW; ii->i_root = root; err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh); if (unlikely(err)) goto failed_ifile_create_inode; /* reference count of i_bh inherits from nilfs_mdt_read_block() */ atomic64_inc(&root->inodes_count); inode_init_owner(inode, dir, mode); inode->i_ino = ino; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) { err = nilfs_bmap_read(ii->i_bmap, NULL); if (err < 0) goto failed_after_creation; set_bit(NILFS_I_BMAP, &ii->i_state); /* No lock is needed; iget() ensures it. */ } ii->i_flags = nilfs_mask_flags( mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED); /* ii->i_file_acl = 0; */ /* ii->i_dir_acl = 0; */ ii->i_dir_start_lookup = 0; nilfs_set_inode_flags(inode); spin_lock(&nilfs->ns_next_gen_lock); inode->i_generation = nilfs->ns_next_generation++; spin_unlock(&nilfs->ns_next_gen_lock); if (nilfs_insert_inode_locked(inode, root, ino) < 0) { err = -EIO; goto failed_after_creation; } err = nilfs_init_acl(inode, dir); if (unlikely(err)) goto failed_after_creation; /* never occur. When supporting nilfs_init_acl(), proper cancellation of above jobs should be considered */ return inode; failed_after_creation: clear_nlink(inode); unlock_new_inode(inode); iput(inode); /* raw_inode will be deleted through nilfs_evict_inode() */ goto failed; failed_ifile_create_inode: make_bad_inode(inode); iput(inode); /* if i_nlink == 1, generic_forget_inode() will be called */ failed: return ERR_PTR(err); } void nilfs_set_inode_flags(struct inode *inode) { unsigned int flags = NILFS_I(inode)->i_flags; unsigned int new_fl = 0; if (flags & FS_SYNC_FL) new_fl |= S_SYNC; if (flags & FS_APPEND_FL) new_fl |= S_APPEND; if (flags & FS_IMMUTABLE_FL) new_fl |= S_IMMUTABLE; if (flags & FS_NOATIME_FL) new_fl |= S_NOATIME; if (flags & FS_DIRSYNC_FL) new_fl |= S_DIRSYNC; inode_set_flags(inode, new_fl, S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC); }
static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); nid_t ino; struct inode *inode; bool nid_free = false; int err; inode = new_inode(dir->i_sb); if (!inode) return ERR_PTR(-ENOMEM); f2fs_lock_op(sbi); if (!alloc_nid(sbi, &ino)) { f2fs_unlock_op(sbi); err = -ENOSPC; goto fail; } f2fs_unlock_op(sbi); inode_init_owner(inode, dir, mode); inode->i_ino = ino; inode->i_blocks = 0; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; inode->i_generation = sbi->s_next_generation++; err = insert_inode_locked(inode); if (err) { err = -EINVAL; nid_free = true; goto fail; } /* If the directory encrypted, then we should encrypt the inode. */ if (f2fs_encrypted_inode(dir) && f2fs_may_encrypt(inode)) f2fs_set_encrypted_inode(inode); set_inode_flag(inode, FI_NEW_INODE); if (test_opt(sbi, INLINE_XATTR)) set_inode_flag(inode, FI_INLINE_XATTR); if (test_opt(sbi, INLINE_DATA) && f2fs_may_inline_data(inode)) set_inode_flag(inode, FI_INLINE_DATA); if (f2fs_may_inline_dentry(inode)) set_inode_flag(inode, FI_INLINE_DENTRY); f2fs_init_extent_tree(inode, NULL); stat_inc_inline_xattr(inode); stat_inc_inline_inode(inode); stat_inc_inline_dir(inode); trace_f2fs_new_inode(inode, 0); return inode; fail: trace_f2fs_new_inode(inode, err); make_bad_inode(inode); if (nid_free) set_inode_flag(inode, FI_FREE_NID); iput(inode); return ERR_PTR(err); }
struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err) { struct super_block *sb = dir->i_sb; struct udf_sb_info *sbi = UDF_SB(sb); struct inode *inode; int block; uint32_t start = UDF_I(dir)->i_location.logicalBlockNum; struct udf_inode_info *iinfo; struct udf_inode_info *dinfo = UDF_I(dir); inode = new_inode(sb); if (!inode) { *err = -ENOMEM; return NULL; } *err = -ENOSPC; iinfo = UDF_I(inode); if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE)) { iinfo->i_efe = 1; if (UDF_VERS_USE_EXTENDED_FE > sbi->s_udfrev) sbi->s_udfrev = UDF_VERS_USE_EXTENDED_FE; iinfo->i_ext.i_data = kzalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL); } else { iinfo->i_efe = 0; iinfo->i_ext.i_data = kzalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL); } if (!iinfo->i_ext.i_data) { iput(inode); *err = -ENOMEM; return NULL; } block = udf_new_block(dir->i_sb, NULL, dinfo->i_location.partitionReferenceNum, start, err); if (*err) { iput(inode); return NULL; } if (sbi->s_lvid_bh) { struct logicalVolIntegrityDescImpUse *lvidiu; iinfo->i_unique = lvid_get_unique_id(sb); mutex_lock(&sbi->s_alloc_mutex); lvidiu = udf_sb_lvidiu(sbi); if (S_ISDIR(mode)) le32_add_cpu(&lvidiu->numDirs, 1); else le32_add_cpu(&lvidiu->numFiles, 1); udf_updated_lvid(sb); mutex_unlock(&sbi->s_alloc_mutex); } inode_init_owner(inode, dir, mode); iinfo->i_location.logicalBlockNum = block; iinfo->i_location.partitionReferenceNum = dinfo->i_location.partitionReferenceNum; inode->i_ino = udf_get_lb_pblock(sb, &iinfo->i_location, 0); inode->i_blocks = 0; iinfo->i_lenEAttr = 0; iinfo->i_lenAlloc = 0; iinfo->i_use = 0; if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_AD_IN_ICB)) iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB; else if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT; else iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG; inode->i_mtime = inode->i_atime = inode->i_ctime = iinfo->i_crtime = current_fs_time(inode->i_sb); insert_inode_hash(inode); mark_inode_dirty(inode); *err = 0; return inode; }
static int simplefs_create_fs_object(struct inode *dir, struct dentry *dentry, umode_t mode) { struct inode *inode; struct simplefs_inode *sfs_inode; struct simplefs_inode *inode_iterator; struct super_block *sb; struct simplefs_dir_record *record; struct simplefs_inode *parent_dir_inode; struct buffer_head *bh; struct simplefs_dir_record *dir_contents_datablock; uint64_t count; int ret; if (mutex_lock_interruptible(&simplefs_directory_children_update_lock)) { printk(KERN_ERR "Failed to acquire mutex lock %s +%d\n", __FILE__, __LINE__); return -EINTR; } sb = dir->i_sb; ret = simplefs_sb_get_objects_count(sb, &count); if (ret < 0) { mutex_unlock(&simplefs_directory_children_update_lock); return ret; } if (unlikely(count >= SIMPLEFS_MAX_FILESYSTEM_OBJECTS_SUPPORTED)) { /* The above condition can be just == insted of the >= */ printk(KERN_ERR "Maximum number of objects supported by simplefs is already reached"); mutex_unlock(&simplefs_directory_children_update_lock); return -ENOSPC; } if (!S_ISDIR(mode) && !S_ISREG(mode)) { printk(KERN_ERR "Creation request but for neither a file nor a directory"); mutex_unlock(&simplefs_directory_children_update_lock); return -EINVAL; } inode = new_inode(sb); if (!inode) { mutex_unlock(&simplefs_directory_children_update_lock); return -ENOMEM; } inode->i_sb = sb; inode->i_op = &simplefs_inode_ops; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; inode->i_ino = 10; /* Loop until we get an unique inode number */ while (simplefs_get_inode(sb, inode->i_ino)) { /* inode inode->i_ino already exists */ inode->i_ino++; } /* FIXME: This is leaking. We need to free all in-memory inodes sometime */ sfs_inode = kmalloc(sizeof(struct simplefs_inode), GFP_KERNEL); sfs_inode->inode_no = inode->i_ino; inode->i_private = sfs_inode; sfs_inode->mode = mode; if (S_ISDIR(mode)) { printk(KERN_INFO "New directory creation request\n"); sfs_inode->dir_children_count = 0; inode->i_fop = &simplefs_dir_operations; } else if (S_ISREG(mode)) { printk(KERN_INFO "New file creation request\n"); sfs_inode->file_size = 0; inode->i_fop = &simplefs_file_operations; } /* First get a free block and update the free map, * Then add inode to the inode store and update the sb inodes_count, * Then update the parent directory's inode with the new child. * * The above ordering helps us to maintain fs consistency * even in most crashes */ ret = simplefs_sb_get_a_freeblock(sb, &sfs_inode->data_block_number); if (ret < 0) { printk(KERN_ERR "simplefs could not get a freeblock"); mutex_unlock(&simplefs_directory_children_update_lock); return ret; } simplefs_inode_add(sb, sfs_inode); record = kmalloc(sizeof(struct simplefs_dir_record), GFP_KERNEL); record->inode_no = sfs_inode->inode_no; strcpy(record->filename, dentry->d_name.name); parent_dir_inode = SIMPLEFS_INODE(dir); bh = sb_bread(sb, parent_dir_inode->data_block_number); dir_contents_datablock = (struct simplefs_dir_record *)bh->b_data; /* Navigate to the last record in the directory contents */ dir_contents_datablock += parent_dir_inode->dir_children_count; memcpy(dir_contents_datablock, record, sizeof(struct simplefs_dir_record)); mark_buffer_dirty(bh); sync_dirty_buffer(bh); brelse(bh); if (mutex_lock_interruptible(&simplefs_inodes_mgmt_lock)) { mutex_unlock(&simplefs_directory_children_update_lock); printk(KERN_ERR "Failed to acquire mutex lock %s +%d\n", __FILE__, __LINE__); return -EINTR; } bh = (struct buffer_head *)sb_bread(sb, SIMPLEFS_INODESTORE_BLOCK_NUMBER); inode_iterator = (struct simplefs_inode *)bh->b_data; if (mutex_lock_interruptible(&simplefs_sb_lock)) { printk(KERN_ERR "Failed to acquire mutex lock %s +%d\n", __FILE__, __LINE__); return -EINTR; } count = 0; while (inode_iterator->inode_no != parent_dir_inode->inode_no && count < SIMPLEFS_SB(sb)->inodes_count) { count++; inode_iterator++; } if (likely(inode_iterator->inode_no == parent_dir_inode->inode_no)) { parent_dir_inode->dir_children_count++; inode_iterator->dir_children_count = parent_dir_inode->dir_children_count; /* Updated the parent inode's dir count to reflect the new child too */ mark_buffer_dirty(bh); sync_dirty_buffer(bh); } else { printk(KERN_ERR "The updated childcount could not be stored to the dir inode."); /* TODO: Remove the newly created inode from the disk and in-memory inode store * and also update the superblock, freemaps etc. to reflect the same. * Basically, Undo all actions done during this create call */ } brelse(bh); mutex_unlock(&simplefs_sb_lock); mutex_unlock(&simplefs_inodes_mgmt_lock); mutex_unlock(&simplefs_directory_children_update_lock); inode_init_owner(inode, dir, mode); d_add(dentry, inode); return 0; }
static int dedupfs_create_fs_object(struct inode *dir, struct dentry *dentry, umode_t mode) { struct inode *inode; struct dedupfs_inode *dfs_inode; struct super_block *sb; struct dedupfs_file_record *record; struct dedupfs_inode *parent_dir_inode; struct buffer_head *bh; struct dedupfs_file_record *dir_contents_datablock; __le32 count; int ret; if(mutex_lock_interruptible(&dedupfs_directory_children_update_lock)) { printk(KERN_ERR "Failed to acquire mutex lock %s +%d\n", __FILE__, __LINE__); return -EINTR; } sb = dir->i_sb; ret = dedupfs_sb_get_objects_count(sb, &count); if(ret < 0) { mutex_unlock(&dedupfs_directory_children_update_lock); return ret; } if(count >= DEDUPFS_MAX_FILESYSTEM_OBJECTS_SUPPORTED) { printk(KERN_ERR "Maximum number of objects supported " "by dedupfs is already reached\n"); mutex_unlock(&dedupfs_directory_children_update_lock); return -ENOSPC; } if(!S_ISDIR(mode) && !S_ISREG(mode)) { printk(KERN_ERR "Creation request for neither a file nor a directory\n"); mutex_unlock(&dedupfs_directory_children_update_lock); return -EINVAL; } inode = new_inode(sb); if(!inode) { printk(KERN_ERR "Can not allocate new inode\n"); mutex_unlock(&dedupfs_directory_children_update_lock); return -ENOMEM; } inode->i_sb = sb; inode->i_op = &dedupfs_inode_ops; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; inode->i_ino = 11; /* Loop until we get an unique inode number */ while(dedupfs_get_inode(sb, inode->i_ino)) { printk(KERN_INFO "inode [%lu] already exists\n", inode->i_ino); inode->i_ino++; } printk(KERN_INFO "Got new unique inode number [%lu]\n", inode->i_ino); dfs_inode = kmalloc(sizeof(struct dedupfs_inode), GFP_KERNEL); dfs_inode->inode_no = inode->i_ino; dfs_inode->mode = mode; inode->i_private = dfs_inode; if(S_ISDIR(mode)) { printk(KERN_INFO "New directory creation request\n"); dfs_inode->dir_children_count = 0; inode->i_fop = &dedupfs_dir_operations; } else if(S_ISREG(mode)) { printk(KERN_INFO "New file creation request\n"); dfs_inode->file_size = 0; inode->i_fop = &dedupfs_file_operations; } ret = dedupfs_sb_get_a_freeblock(sb, &dfs_inode->data_block_number); if(ret < 0) { printk(KERN_ERR "dedupfs could not get a freeblock\n"); mutex_unlock(&dedupfs_directory_children_update_lock); return ret; } dedupfs_inode_add(sb, dfs_inode); record = kmalloc(sizeof(struct dedupfs_file_record), GFP_KERNEL); record->inode_no = dfs_inode->inode_no; strcpy(record->filename, dentry->d_name.name); printk(KERN_INFO "New filename is %s\n", record->filename); parent_dir_inode = DEDUPFS_INODE(dir); bh = sb_bread(sb, parent_dir_inode->data_block_number); dir_contents_datablock = (struct dedupfs_file_record *)bh->b_data; /* Navigate to the last record in the directory contents */ dir_contents_datablock += parent_dir_inode->dir_children_count; memcpy(dir_contents_datablock, record, sizeof(struct dedupfs_file_record)); mark_buffer_dirty(bh); sync_dirty_buffer(bh); parent_dir_inode->dir_children_count++; if(mutex_lock_interruptible(&dedupfs_inodes_mgmt_lock)) { printk(KERN_ERR "Failed to acquire mutex lock %s +%d\n", __FILE__, __LINE__); mutex_unlock(&dedupfs_directory_children_update_lock); return -EINTR; } bh = (struct buffer_head *) sb_bread(sb, DEDUPFS_INODESTORE_BLOCK_NUMBER); mark_buffer_dirty(bh); sync_dirty_buffer(bh); mutex_unlock(&dedupfs_inodes_mgmt_lock); mutex_unlock(&dedupfs_directory_children_update_lock); printk(KERN_INFO "Returning success after creating the file\n"); inode_init_owner(inode, dir, mode); d_add(dentry, inode); /* This sleep is necessary to update the dentry cache */ msleep(5); return 0; }
struct inode *nilfs_new_inode(struct inode *dir, umode_t mode) { struct super_block *sb = dir->i_sb; struct the_nilfs *nilfs = sb->s_fs_info; struct inode *inode; struct nilfs_inode_info *ii; struct nilfs_root *root; int err = -ENOMEM; ino_t ino; inode = new_inode(sb); if (unlikely(!inode)) goto failed; mapping_set_gfp_mask(inode->i_mapping, mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); root = NILFS_I(dir)->i_root; ii = NILFS_I(inode); ii->i_state = 1 << NILFS_I_NEW; ii->i_root = root; err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh); if (unlikely(err)) goto failed_ifile_create_inode; atomic_inc(&root->inodes_count); inode_init_owner(inode, dir, mode); inode->i_ino = ino; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) { err = nilfs_bmap_read(ii->i_bmap, NULL); if (err < 0) goto failed_bmap; set_bit(NILFS_I_BMAP, &ii->i_state); } ii->i_flags = nilfs_mask_flags( mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED); ii->i_dir_start_lookup = 0; nilfs_set_inode_flags(inode); spin_lock(&nilfs->ns_next_gen_lock); inode->i_generation = nilfs->ns_next_generation++; spin_unlock(&nilfs->ns_next_gen_lock); insert_inode_hash(inode); err = nilfs_init_acl(inode, dir); if (unlikely(err)) goto failed_acl; return inode; failed_acl: failed_bmap: clear_nlink(inode); iput(inode); goto failed; failed_ifile_create_inode: make_bad_inode(inode); iput(inode); failed: return ERR_PTR(err); }