/** * nilfs_readpages() - implement readpages() method of nilfs_aops {} * address_space_operations. * @file - file struct of the file to be read * @mapping - address_space struct used for reading multiple pages * @pages - the pages to be read * @nr_pages - number of pages to be read */ static int nilfs_readpages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block); } static int nilfs_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct inode *inode = mapping->host; int err = 0; #if NEED_WB_SYNC_NONE_CHECK_FOR_DO_SYNC_MAPPING_RANGE if (wbc->sync_mode == WB_SYNC_ALL || (wbc->sync_mode == WB_SYNC_NONE && !current_is_pdflush())) #else if (wbc->sync_mode == WB_SYNC_ALL) #endif err = nilfs_construct_dsync_segment(inode->i_sb, inode, wbc->range_start, wbc->range_end); return err; } static int nilfs_writepage(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; int err; page_debug(3, "called (page=%p, index=%lu, wbc nonblocking %d, " "wbc for_reclaim %d)\n", page, page->index, wbc->nonblocking, wbc->for_reclaim); redirty_page_for_writepage(wbc, page); unlock_page(page); if (wbc->sync_mode == WB_SYNC_ALL) { err = nilfs_construct_segment(inode->i_sb); if (unlikely(err)) return err; } else if (wbc->for_reclaim) nilfs_flush_segment(inode->i_sb, inode->i_ino); return 0; } static int nilfs_set_page_dirty(struct page *page) { int ret = __set_page_dirty_buffers(page); page_debug(3, "called (page=%p)\n", page); if (ret) { struct inode *inode = page->mapping->host; struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb); unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits); nilfs_set_file_dirty(sbi, inode, nr_dirty); } page_debug(3, "done (ret=%d, page=%p)\n", ret, page); return ret; } #if HAVE_WRITE_BEGIN_WRITE_END static int nilfs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct inode *inode = mapping->host; int err = nilfs_transaction_begin(inode->i_sb, NULL, 1); if (unlikely(err)) return err; *pagep = NULL; err = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, nilfs_get_block); if (unlikely(err)) nilfs_transaction_abort(inode->i_sb); return err; } static int nilfs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = mapping->host; unsigned start = pos & (PAGE_CACHE_SIZE - 1); unsigned nr_dirty; int err; nr_dirty = nilfs_page_count_clean_buffers(page, start, start + copied); copied = generic_write_end(file, mapping, pos, len, copied, page, fsdata); nilfs_set_file_dirty(NILFS_SB(inode->i_sb), inode, nr_dirty); err = nilfs_transaction_commit(inode->i_sb); return err ? : copied; } #else /* HAVE_WRITE_BEGIN_WRITE_END */ static int nilfs_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to) { struct address_space *mapping = page->mapping; pgoff_t offset = page->index; struct inode *inode = mapping->host; int err; unlock_page(page); err = nilfs_transaction_begin(inode->i_sb, NULL, 1); lock_page(page); if (unlikely(page->mapping != mapping || page->index != offset)) { unlock_page(page); if (likely(!err)) nilfs_transaction_abort(inode->i_sb); return AOP_TRUNCATED_PAGE; } if (unlikely(err)) return err; err = block_prepare_write(page, from, to, nilfs_get_block); if (unlikely(err)) nilfs_transaction_abort(inode->i_sb); return err; }
static int nilfs_mdt_create_block(struct inode *inode, unsigned long block, struct buffer_head **out_bh, void (*init_block)(struct inode *, struct buffer_head *, void *)) { struct the_nilfs *nilfs = NILFS_MDT(inode)->mi_nilfs; struct super_block *sb = inode->i_sb; struct nilfs_transaction_info ti; struct buffer_head *bh; int err; if (!sb) { /* * Make sure this function is not called from any * read-only context. */ if (!nilfs->ns_writer) { WARN_ON(1); err = -EROFS; goto out; } sb = nilfs->ns_writer->s_super; } nilfs_transaction_begin(sb, &ti, 0); err = -ENOMEM; bh = nilfs_grab_buffer(inode, inode->i_mapping, block, 0); if (unlikely(!bh)) goto failed_unlock; err = -EEXIST; if (buffer_uptodate(bh)) goto failed_bh; wait_on_buffer(bh); if (buffer_uptodate(bh)) goto failed_bh; bh->b_bdev = nilfs->ns_bdev; err = nilfs_mdt_insert_new_block(inode, block, bh, init_block); if (likely(!err)) { get_bh(bh); *out_bh = bh; } failed_bh: unlock_page(bh->b_page); page_cache_release(bh->b_page); brelse(bh); failed_unlock: if (likely(!err)) err = nilfs_transaction_commit(sb); else nilfs_transaction_abort(sb); out: return err; }
static int nilfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev) { struct inode *inode; struct nilfs_transaction_info ti; int err; if (!new_valid_dev(rdev)) return -EINVAL; err = nilfs_transaction_begin(dir->i_sb, &ti, 1); if (err) return err; inode = nilfs_new_inode(dir, mode); err = PTR_ERR(inode); if (!IS_ERR(inode)) { init_special_inode(inode, inode->i_mode, rdev); mark_inode_dirty(inode); err = nilfs_add_nondir(dentry, inode); } if (!err) err = nilfs_transaction_commit(dir->i_sb); else nilfs_transaction_abort(dir->i_sb); return err; }
static int nilfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = old_dentry->d_inode; struct nilfs_transaction_info ti; int err; if (inode->i_nlink >= NILFS_LINK_MAX) return -EMLINK; err = nilfs_transaction_begin(dir->i_sb, &ti, 1); if (err) return err; inode->i_ctime = CURRENT_TIME; inode_inc_link_count(inode); atomic_inc(&inode->i_count); err = nilfs_add_nondir(dentry, inode); if (!err) err = nilfs_transaction_commit(dir->i_sb); else nilfs_transaction_abort(dir->i_sb); return err; }
static int nilfs_rmdir(struct inode *dir, struct dentry *dentry) { struct inode *inode = dentry->d_inode; struct nilfs_transaction_info ti; int err; err = nilfs_transaction_begin(dir->i_sb, &ti, 0); if (err) return err; err = -ENOTEMPTY; if (nilfs_empty_dir(inode)) { err = nilfs_unlink(dir, dentry); if (!err) { inode->i_size = 0; inode_dec_link_count(inode); inode_dec_link_count(dir); } } if (!err) err = nilfs_transaction_commit(dir->i_sb); else nilfs_transaction_abort(dir->i_sb); return err; }
/* * By the time this is called, we already have created * the directory cache entry for the new file, but it * is so far negative - it has no inode. * * If the create succeeds, we fill in the inode information * with d_instantiate(). */ static int nilfs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd) { struct inode *inode; struct nilfs_transaction_info ti; int err; err = nilfs_transaction_begin(dir->i_sb, &ti, 1); if (err) return err; inode = nilfs_new_inode(dir, mode); err = PTR_ERR(inode); if (!IS_ERR(inode)) { inode->i_op = &nilfs_file_inode_operations; inode->i_fop = &nilfs_file_operations; inode->i_mapping->a_ops = &nilfs_aops; mark_inode_dirty(inode); err = nilfs_add_nondir(dentry, inode); } if (!err) err = nilfs_transaction_commit(dir->i_sb); else nilfs_transaction_abort(dir->i_sb); return err; }
static int nilfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = d_inode(old_dentry); struct nilfs_transaction_info ti; int err; err = nilfs_transaction_begin(dir->i_sb, &ti, 1); if (err) return err; inode->i_ctime = current_time(inode); inode_inc_link_count(inode); ihold(inode); err = nilfs_add_link(dentry, inode); if (!err) { d_instantiate(dentry, inode); err = nilfs_transaction_commit(dir->i_sb); } else { inode_dec_link_count(inode); iput(inode); nilfs_transaction_abort(dir->i_sb); } return err; }
int nilfs_get_block(struct inode *inode, sector_t blkoff, struct buffer_head *bh_result, int create) { struct nilfs_inode_info *ii = NILFS_I(inode); struct the_nilfs *nilfs = inode->i_sb->s_fs_info; __u64 blknum = 0; int err = 0, ret; unsigned maxblocks = bh_result->b_size >> inode->i_blkbits; down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks); up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); if (ret >= 0) { map_bh(bh_result, inode->i_sb, blknum); if (ret > 0) bh_result->b_size = (ret << inode->i_blkbits); goto out; } if (ret == -ENOENT && create) { struct nilfs_transaction_info ti; bh_result->b_blocknr = 0; err = nilfs_transaction_begin(inode->i_sb, &ti, 1); if (unlikely(err)) goto out; err = nilfs_bmap_insert(ii->i_bmap, (unsigned long)blkoff, (unsigned long)bh_result); if (unlikely(err != 0)) { if (err == -EEXIST) { printk(KERN_WARNING "nilfs_get_block: a race condition " "while inserting a data block. " "(inode number=%lu, file block " "offset=%llu)\n", inode->i_ino, (unsigned long long)blkoff); err = 0; } nilfs_transaction_abort(inode->i_sb); goto out; } nilfs_mark_inode_dirty(inode); nilfs_transaction_commit(inode->i_sb); set_buffer_new(bh_result); set_buffer_delay(bh_result); map_bh(bh_result, inode->i_sb, 0); } else if (ret == -ENOENT) { ; } else { err = ret; } out: return err; }
static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page = vmf->page; struct inode *inode = vma->vm_file->f_dentry->d_inode; struct nilfs_transaction_info ti; int ret; if (unlikely(nilfs_near_disk_full(inode->i_sb->s_fs_info))) return VM_FAULT_SIGBUS; lock_page(page); if (page->mapping != inode->i_mapping || page_offset(page) >= i_size_read(inode) || !PageUptodate(page)) { unlock_page(page); return VM_FAULT_NOPAGE; } if (PageMappedToDisk(page)) goto mapped; if (page_has_buffers(page)) { struct buffer_head *bh, *head; int fully_mapped = 1; bh = head = page_buffers(page); do { if (!buffer_mapped(bh)) { fully_mapped = 0; break; } } while (bh = bh->b_this_page, bh != head); if (fully_mapped) { SetPageMappedToDisk(page); goto mapped; } } unlock_page(page); ret = nilfs_transaction_begin(inode->i_sb, &ti, 1); if (unlikely(ret)) return VM_FAULT_SIGBUS; ret = block_page_mkwrite(vma, vmf, nilfs_get_block); if (ret != VM_FAULT_LOCKED) { nilfs_transaction_abort(inode->i_sb); return ret; } nilfs_set_file_dirty(inode, 1 << (PAGE_SHIFT - inode->i_blkbits)); nilfs_transaction_commit(inode->i_sb); mapped: wait_on_page_writeback(page); return VM_FAULT_LOCKED; }
static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) { struct inode *inode; struct nilfs_transaction_info ti; int err; if (dir->i_nlink >= NILFS_LINK_MAX) return -EMLINK; err = nilfs_transaction_begin(dir->i_sb, &ti, 1); if (err) return err; inc_nlink(dir); inode = nilfs_new_inode(dir, S_IFDIR | mode); err = PTR_ERR(inode); if (IS_ERR(inode)) goto out_dir; inode->i_op = &nilfs_dir_inode_operations; inode->i_fop = &nilfs_dir_operations; inode->i_mapping->a_ops = &nilfs_aops; inc_nlink(inode); err = nilfs_make_empty(inode, dir); if (err) goto out_fail; err = nilfs_add_link(dentry, inode); if (err) goto out_fail; nilfs_mark_inode_dirty(inode); d_instantiate(dentry, inode); out: if (!err) err = nilfs_transaction_commit(dir->i_sb); else nilfs_transaction_abort(dir->i_sb); return err; out_fail: drop_nlink(inode); drop_nlink(inode); nilfs_mark_inode_dirty(inode); iput(inode); out_dir: drop_nlink(dir); nilfs_mark_inode_dirty(dir); goto out; }
static int nilfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { struct nilfs_transaction_info ti; struct super_block *sb = dir->i_sb; unsigned int l = strlen(symname) + 1; struct inode *inode; int err; if (l > sb->s_blocksize) return -ENAMETOOLONG; err = nilfs_transaction_begin(dir->i_sb, &ti, 1); if (err) return err; inode = nilfs_new_inode(dir, S_IFLNK | S_IRWXUGO); err = PTR_ERR(inode); if (IS_ERR(inode)) goto out; /* slow symlink */ inode->i_op = &nilfs_symlink_inode_operations; inode_nohighmem(inode); inode->i_mapping->a_ops = &nilfs_aops; err = page_symlink(inode, symname, l); if (err) goto out_fail; /* mark_inode_dirty(inode); */ /* page_symlink() do this */ err = nilfs_add_nondir(dentry, inode); out: if (!err) err = nilfs_transaction_commit(dir->i_sb); else nilfs_transaction_abort(dir->i_sb); return err; out_fail: drop_nlink(inode); nilfs_mark_inode_dirty(inode); unlock_new_inode(inode); iput(inode); goto out; }
static int nilfs_mdt_create_block(struct inode *inode, unsigned long block, struct buffer_head **out_bh, void (*init_block)(struct inode *, struct buffer_head *, void *)) { struct super_block *sb = inode->i_sb; struct nilfs_transaction_info ti; struct buffer_head *bh; int err; nilfs_transaction_begin(sb, &ti, 0); err = -ENOMEM; bh = nilfs_grab_buffer(inode, inode->i_mapping, block, 0); if (unlikely(!bh)) goto failed_unlock; err = -EEXIST; if (buffer_uptodate(bh)) goto failed_bh; wait_on_buffer(bh); if (buffer_uptodate(bh)) goto failed_bh; bh->b_bdev = sb->s_bdev; err = nilfs_mdt_insert_new_block(inode, block, bh, init_block); if (likely(!err)) { get_bh(bh); *out_bh = bh; } failed_bh: unlock_page(bh->b_page); page_cache_release(bh->b_page); brelse(bh); failed_unlock: if (likely(!err)) err = nilfs_transaction_commit(sb); else nilfs_transaction_abort(sb); return err; }
static int nilfs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct inode *inode = mapping->host; int err = nilfs_transaction_begin(inode->i_sb, NULL, 1); if (unlikely(err)) return err; *pagep = NULL; err = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, nilfs_get_block); if (unlikely(err)) nilfs_transaction_abort(inode->i_sb); return err; }
static int nilfs_unlink(struct inode *dir, struct dentry *dentry) { struct inode *inode; struct nilfs_dir_entry *de; struct page *page; struct nilfs_transaction_info ti; int err; err = nilfs_transaction_begin(dir->i_sb, &ti, 0); if (err) return err; err = -ENOENT; de = nilfs_find_entry(dir, dentry, &page); if (!de) goto out; inode = dentry->d_inode; err = -EIO; if (le64_to_cpu(de->inode) != inode->i_ino) goto out; if (!inode->i_nlink) { nilfs_warning(inode->i_sb, __func__, "deleting nonexistent file (%lu), %d\n", inode->i_ino, inode->i_nlink); inode->i_nlink = 1; } err = nilfs_delete_entry(de, page); if (err) goto out; inode->i_ctime = dir->i_ctime; inode_dec_link_count(inode); err = 0; out: if (!err) err = nilfs_transaction_commit(dir->i_sb); else nilfs_transaction_abort(dir->i_sb); return err; }
static int nilfs_unlink(struct inode *dir, struct dentry *dentry) { struct nilfs_transaction_info ti; int err; err = nilfs_transaction_begin(dir->i_sb, &ti, 0); if (err) return err; err = nilfs_do_unlink(dir, dentry); if (!err) { nilfs_mark_inode_dirty(dir); nilfs_mark_inode_dirty(dentry->d_inode); err = nilfs_transaction_commit(dir->i_sb); } else nilfs_transaction_abort(dir->i_sb); return err; }
int nilfs_setattr(struct dentry *dentry, struct iattr *iattr) { struct nilfs_transaction_info ti; struct inode *inode = dentry->d_inode; struct super_block *sb = inode->i_sb; int err; err = inode_change_ok(inode, iattr); if (err) return err; err = nilfs_transaction_begin(sb, &ti, 0); if (unlikely(err)) return err; if ((iattr->ia_valid & ATTR_SIZE) && iattr->ia_size != i_size_read(inode)) { inode_dio_wait(inode); err = vmtruncate(inode, iattr->ia_size); if (unlikely(err)) goto out_err; } setattr_copy(inode, iattr); mark_inode_dirty(inode); if (iattr->ia_valid & ATTR_MODE) { err = nilfs_acl_chmod(inode); if (unlikely(err)) goto out_err; } return nilfs_transaction_commit(sb); out_err: nilfs_transaction_abort(sb); return err; }
static int nilfs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct inode *inode = mapping->host; int err = nilfs_transaction_begin(inode->i_sb, NULL, 1); if (unlikely(err)) return err; err = block_write_begin(mapping, pos, len, flags, pagep, nilfs_get_block); if (unlikely(err)) { loff_t isize = mapping->host->i_size; if (pos + len > isize) vmtruncate(mapping->host, isize); nilfs_transaction_abort(inode->i_sb); } return err; }
int nilfs_setattr(struct dentry *dentry, struct iattr *iattr) { struct nilfs_transaction_info ti; struct inode *inode = dentry->d_inode; struct super_block *sb = inode->i_sb; int err; err = inode_change_ok(inode, iattr); if (err) return err; err = nilfs_transaction_begin(sb, &ti, 0); if (unlikely(err)) return err; err = inode_setattr(inode, iattr); if (!err && (iattr->ia_valid & ATTR_MODE)) err = nilfs_acl_chmod(inode); if (likely(!err)) err = nilfs_transaction_commit(sb); else nilfs_transaction_abort(sb); return err; }
/** * nilfs_get_block() - get a file block on the filesystem (callback function) * @inode - inode struct of the target file * @blkoff - file block number * @bh_result - buffer head to be mapped on * @create - indicate whether allocating the block or not when it has not * been allocated yet. * * This function does not issue actual read request of the specified data * block. It is done by VFS. */ int nilfs_get_block(struct inode *inode, sector_t blkoff, struct buffer_head *bh_result, int create) { struct nilfs_inode_info *ii = NILFS_I(inode); struct the_nilfs *nilfs = inode->i_sb->s_fs_info; __u64 blknum = 0; int err = 0, ret; unsigned maxblocks = bh_result->b_size >> inode->i_blkbits; down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks); up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); if (ret >= 0) { /* found */ map_bh(bh_result, inode->i_sb, blknum); if (ret > 0) bh_result->b_size = (ret << inode->i_blkbits); goto out; } /* data block was not found */ if (ret == -ENOENT && create) { struct nilfs_transaction_info ti; bh_result->b_blocknr = 0; err = nilfs_transaction_begin(inode->i_sb, &ti, 1); if (unlikely(err)) goto out; err = nilfs_bmap_insert(ii->i_bmap, (unsigned long)blkoff, (unsigned long)bh_result); if (unlikely(err != 0)) { if (err == -EEXIST) { /* * The get_block() function could be called * from multiple callers for an inode. * However, the page having this block must * be locked in this case. */ printk(KERN_WARNING "nilfs_get_block: a race condition " "while inserting a data block. " "(inode number=%lu, file block " "offset=%llu)\n", inode->i_ino, (unsigned long long)blkoff); err = 0; } nilfs_transaction_abort(inode->i_sb); goto out; } nilfs_mark_inode_dirty(inode); nilfs_transaction_commit(inode->i_sb); /* never fails */ /* Error handling should be detailed */ set_buffer_new(bh_result); set_buffer_delay(bh_result); map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed to proper value */ } else if (ret == -ENOENT) { /* not found is not error (e.g. hole); must return without the mapped state flag. */ ; } else { err = ret; } out: return err; }
static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct inode *old_inode = old_dentry->d_inode; struct inode *new_inode = new_dentry->d_inode; struct page *dir_page = NULL; struct nilfs_dir_entry *dir_de = NULL; struct page *old_page; struct nilfs_dir_entry *old_de; struct nilfs_transaction_info ti; int err; err = nilfs_transaction_begin(old_dir->i_sb, &ti, 1); if (unlikely(err)) return err; err = -ENOENT; old_de = nilfs_find_entry(old_dir, old_dentry, &old_page); if (!old_de) goto out; if (S_ISDIR(old_inode->i_mode)) { err = -EIO; dir_de = nilfs_dotdot(old_inode, &dir_page); if (!dir_de) goto out_old; } if (new_inode) { struct page *new_page; struct nilfs_dir_entry *new_de; err = -ENOTEMPTY; if (dir_de && !nilfs_empty_dir(new_inode)) goto out_dir; err = -ENOENT; new_de = nilfs_find_entry(new_dir, new_dentry, &new_page); if (!new_de) goto out_dir; inode_inc_link_count(old_inode); nilfs_set_link(new_dir, new_de, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME; if (dir_de) drop_nlink(new_inode); inode_dec_link_count(new_inode); } else { if (dir_de) { err = -EMLINK; if (new_dir->i_nlink >= NILFS_LINK_MAX) goto out_dir; } inode_inc_link_count(old_inode); err = nilfs_add_link(new_dentry, old_inode); if (err) { inode_dec_link_count(old_inode); goto out_dir; } if (dir_de) inode_inc_link_count(new_dir); } /* * Like most other Unix systems, set the ctime for inodes on a * rename. * inode_dec_link_count() will mark the inode dirty. */ old_inode->i_ctime = CURRENT_TIME; nilfs_delete_entry(old_de, old_page); inode_dec_link_count(old_inode); if (dir_de) { nilfs_set_link(old_inode, dir_de, dir_page, new_dir); inode_dec_link_count(old_dir); } err = nilfs_transaction_commit(old_dir->i_sb); return err; out_dir: if (dir_de) { kunmap(dir_page); page_cache_release(dir_page); } out_old: kunmap(old_page); page_cache_release(old_page); out: nilfs_transaction_abort(old_dir->i_sb); return err; }
/** * nilfs_get_block() - get a file block on the filesystem (callback function) * @inode - inode struct of the target file * @blkoff - file block number * @bh_result - buffer head to be mapped on * @create - indicate whether allocating the block or not when it has not * been allocated yet. * * This function does not issue actual read request of the specified data * block. It is done by VFS. * Bulk read for direct-io is not supported yet. (should be supported) */ int nilfs_get_block(struct inode *inode, sector_t blkoff, struct buffer_head *bh_result, int create) { struct nilfs_inode_info *ii = NILFS_I(inode); unsigned long blknum = 0; int err = 0, ret; struct inode *dat = nilfs_dat_inode(NILFS_I_NILFS(inode)); /* This exclusion control is a workaround; should be revised */ down_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ ret = nilfs_bmap_lookup(ii->i_bmap, (unsigned long)blkoff, &blknum); up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ if (ret == 0) { /* found */ map_bh(bh_result, inode->i_sb, blknum); goto out; } /* data block was not found */ if (ret == -ENOENT && create) { struct nilfs_transaction_info ti; bh_result->b_blocknr = 0; err = nilfs_transaction_begin(inode->i_sb, &ti, 1); if (unlikely(err)) goto out; err = nilfs_bmap_insert(ii->i_bmap, (unsigned long)blkoff, (unsigned long)bh_result); if (unlikely(err != 0)) { if (err == -EEXIST) { /* * The get_block() function could be called * from multiple callers for an inode. * However, the page having this block must * be locked in this case. */ printk(KERN_WARNING "nilfs_get_block: a race condition " "while inserting a data block. " "(inode number=%lu, file block " "offset=%llu)\n", inode->i_ino, (unsigned long long)blkoff); err = 0; } else if (err == -EINVAL) { nilfs_error(inode->i_sb, __func__, "broken bmap (inode=%lu)\n", inode->i_ino); err = -EIO; } nilfs_transaction_abort(inode->i_sb); goto out; } nilfs_transaction_commit(inode->i_sb); /* never fails */ /* Error handling should be detailed */ set_buffer_new(bh_result); map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed to proper value */ } else if (ret == -ENOENT) { /* not found is not error (e.g. hole); must return without the mapped state flag. */ ; } else { err = ret; } out: return err; }
static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct inode *old_inode = old_dentry->d_inode; struct inode *new_inode = new_dentry->d_inode; struct page *dir_page = NULL; struct nilfs_dir_entry *dir_de = NULL; struct page *old_page; struct nilfs_dir_entry *old_de; struct nilfs_transaction_info ti; int err; err = nilfs_transaction_begin(old_dir->i_sb, &ti, 1); if (unlikely(err)) return err; err = -ENOENT; old_de = nilfs_find_entry(old_dir, &old_dentry->d_name, &old_page); if (!old_de) goto out; if (S_ISDIR(old_inode->i_mode)) { err = -EIO; dir_de = nilfs_dotdot(old_inode, &dir_page); if (!dir_de) goto out_old; } if (new_inode) { struct page *new_page; struct nilfs_dir_entry *new_de; err = -ENOTEMPTY; if (dir_de && !nilfs_empty_dir(new_inode)) goto out_dir; err = -ENOENT; new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page); if (!new_de) goto out_dir; nilfs_set_link(new_dir, new_de, new_page, old_inode); nilfs_mark_inode_dirty(new_dir); new_inode->i_ctime = CURRENT_TIME; if (dir_de) drop_nlink(new_inode); drop_nlink(new_inode); nilfs_mark_inode_dirty(new_inode); } else { err = nilfs_add_link(new_dentry, old_inode); if (err) goto out_dir; if (dir_de) { inc_nlink(new_dir); nilfs_mark_inode_dirty(new_dir); } } old_inode->i_ctime = CURRENT_TIME; nilfs_delete_entry(old_de, old_page); if (dir_de) { nilfs_set_link(old_inode, dir_de, dir_page, new_dir); drop_nlink(old_dir); } nilfs_mark_inode_dirty(old_dir); nilfs_mark_inode_dirty(old_inode); err = nilfs_transaction_commit(old_dir->i_sb); return err; out_dir: if (dir_de) { kunmap(dir_page); page_cache_release(dir_page); } out_old: kunmap(old_page); page_cache_release(old_page); out: nilfs_transaction_abort(old_dir->i_sb); return err; }
static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { struct inode *old_inode = d_inode(old_dentry); struct inode *new_inode = d_inode(new_dentry); struct page *dir_page = NULL; struct nilfs_dir_entry *dir_de = NULL; struct page *old_page; struct nilfs_dir_entry *old_de; struct nilfs_transaction_info ti; int err; if (flags & ~RENAME_NOREPLACE) return -EINVAL; err = nilfs_transaction_begin(old_dir->i_sb, &ti, 1); if (unlikely(err)) return err; err = -ENOENT; old_de = nilfs_find_entry(old_dir, &old_dentry->d_name, &old_page); if (!old_de) goto out; if (S_ISDIR(old_inode->i_mode)) { err = -EIO; dir_de = nilfs_dotdot(old_inode, &dir_page); if (!dir_de) goto out_old; } if (new_inode) { struct page *new_page; struct nilfs_dir_entry *new_de; err = -ENOTEMPTY; if (dir_de && !nilfs_empty_dir(new_inode)) goto out_dir; err = -ENOENT; new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page); if (!new_de) goto out_dir; nilfs_set_link(new_dir, new_de, new_page, old_inode); nilfs_mark_inode_dirty(new_dir); new_inode->i_ctime = current_time(new_inode); if (dir_de) drop_nlink(new_inode); drop_nlink(new_inode); nilfs_mark_inode_dirty(new_inode); } else { err = nilfs_add_link(new_dentry, old_inode); if (err) goto out_dir; if (dir_de) { inc_nlink(new_dir); nilfs_mark_inode_dirty(new_dir); } } /* * Like most other Unix systems, set the ctime for inodes on a * rename. */ old_inode->i_ctime = current_time(old_inode); nilfs_delete_entry(old_de, old_page); if (dir_de) { nilfs_set_link(old_inode, dir_de, dir_page, new_dir); drop_nlink(old_dir); } nilfs_mark_inode_dirty(old_dir); nilfs_mark_inode_dirty(old_inode); err = nilfs_transaction_commit(old_dir->i_sb); return err; out_dir: if (dir_de) { kunmap(dir_page); put_page(dir_page); } out_old: kunmap(old_page); put_page(old_page); out: nilfs_transaction_abort(old_dir->i_sb); return err; }
static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page = vmf->page; struct inode *inode = file_inode(vma->vm_file); struct nilfs_transaction_info ti; int ret = 0; if (unlikely(nilfs_near_disk_full(inode->i_sb->s_fs_info))) return VM_FAULT_SIGBUS; /* -ENOSPC */ sb_start_pagefault(inode->i_sb); lock_page(page); if (page->mapping != inode->i_mapping || page_offset(page) >= i_size_read(inode) || !PageUptodate(page)) { unlock_page(page); ret = -EFAULT; /* make the VM retry the fault */ goto out; } /* * check to see if the page is mapped already (no holes) */ if (PageMappedToDisk(page)) goto mapped; if (page_has_buffers(page)) { struct buffer_head *bh, *head; int fully_mapped = 1; bh = head = page_buffers(page); do { if (!buffer_mapped(bh)) { fully_mapped = 0; break; } } while (bh = bh->b_this_page, bh != head); if (fully_mapped) { SetPageMappedToDisk(page); goto mapped; } } unlock_page(page); /* * fill hole blocks */ ret = nilfs_transaction_begin(inode->i_sb, &ti, 1); /* never returns -ENOMEM, but may return -ENOSPC */ if (unlikely(ret)) goto out; file_update_time(vma->vm_file); ret = __block_page_mkwrite(vma, vmf, nilfs_get_block); if (ret) { nilfs_transaction_abort(inode->i_sb); goto out; } nilfs_set_file_dirty(inode, 1 << (PAGE_SHIFT - inode->i_blkbits)); nilfs_transaction_commit(inode->i_sb); mapped: wait_for_stable_page(page); out: sb_end_pagefault(inode->i_sb); return block_page_mkwrite_return(ret); }