Пример #1
0
static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
				 struct dentry **root_dentry)
{
	struct the_nilfs *nilfs = NILFS_SB(s)->s_nilfs;
	struct nilfs_root *root;
	int ret;

	down_read(&nilfs->ns_segctor_sem);
	ret = nilfs_cpfile_is_snapshot(nilfs->ns_cpfile, cno);
	up_read(&nilfs->ns_segctor_sem);
	if (ret < 0) {
		ret = (ret == -ENOENT) ? -EINVAL : ret;
		goto out;
	} else if (!ret) {
		printk(KERN_ERR "NILFS: The specified checkpoint is "
		       "not a snapshot (checkpoint number=%llu).\n",
		       (unsigned long long)cno);
		ret = -EINVAL;
		goto out;
	}

	ret = nilfs_attach_checkpoint(NILFS_SB(s), cno, false, &root);
	if (ret) {
		printk(KERN_ERR "NILFS: error loading snapshot "
		       "(checkpoint number=%llu).\n",
	       (unsigned long long)cno);
		goto out;
	}
	ret = nilfs_get_root_dentry(s, root, root_dentry);
	nilfs_put_root(root);
 out:
	return ret;
}
Пример #2
0
int nilfs_checkpoint_is_mounted(struct super_block *sb, __u64 cno)
{
	struct the_nilfs *nilfs = NILFS_SB(sb)->s_nilfs;
	struct nilfs_root *root;
	struct inode *inode;
	struct dentry *dentry;
	int ret;

	if (cno < 0 || cno > nilfs->ns_cno)
		return false;

	if (cno >= nilfs_last_cno(nilfs))
		return true;	/* protect recent checkpoints */

	ret = false;
	root = nilfs_lookup_root(NILFS_SB(sb)->s_nilfs, cno);
	if (root) {
		inode = nilfs_ilookup(sb, root, NILFS_ROOT_INO);
		if (inode) {
			dentry = d_find_alias(inode);
			if (dentry) {
				if (nilfs_tree_was_touched(dentry))
					ret = nilfs_try_to_shrink_tree(dentry);
				dput(dentry);
			}
			iput(inode);
		}
		nilfs_put_root(root);
	}
	return ret;
}
static void nilfs_put_super(struct super_block *sb)
{
	struct nilfs_sb_info *sbi = NILFS_SB(sb);
	struct the_nilfs *nilfs = sbi->s_nilfs;

	nilfs_debug(1, "started\n");
	nilfs_debug(2, "Deactivating segment constructor\n");
	nilfs_detach_segment_constructor(sbi);

	if (!(sb->s_flags & MS_RDONLY)) {
		down_write(&nilfs->ns_sem);
		nilfs_debug(1, "Closing on-disk superblock\n");
		nilfs->ns_sbp[0]->s_state = cpu_to_le16(nilfs->ns_mount_state);
		nilfs_commit_super(sbi, 1);
		up_write(&nilfs->ns_sem);
	}

	nilfs_debug(2, "Detaching checkpoint\n");
	nilfs_detach_checkpoint(sbi);

	nilfs_debug(2, "Releasing the_nilfs\n");
	put_nilfs(sbi->s_nilfs);
	sbi->s_super = NULL;
	sb->s_fs_info = NULL;

	kfree(sbi);
	nilfs_debug(1, "done\n");
}
Пример #4
0
/**
 * nilfs_error() - report failure condition on a filesystem
 *
 * nilfs_error() sets an ERROR_FS flag on the superblock as well as
 * reporting an error message.  It should be called when NILFS detects
 * incoherences or defects of meta data on disk.  As for sustainable
 * errors such as a single-shot I/O error, nilfs_warning() or the printk()
 * function should be used instead.
 *
 * The segment constructor must not call this function because it can
 * kill itself.
 */
void nilfs_error(struct super_block *sb, const char *function,
		 const char *fmt, ...)
{
	struct nilfs_sb_info *sbi = NILFS_SB(sb);
	struct va_format vaf;
	va_list args;

	va_start(args, fmt);

	vaf.fmt = fmt;
	vaf.va = &args;

	printk(KERN_CRIT "NILFS error (device %s): %s: %pV\n",
	       sb->s_id, function, &vaf);

	va_end(args);

	if (!(sb->s_flags & MS_RDONLY)) {
		nilfs_set_error(sbi);

		if (nilfs_test_opt(sbi, ERRORS_RO)) {
			printk(KERN_CRIT "Remounting filesystem read-only\n");
			sb->s_flags |= MS_RDONLY;
		}
	}

	if (nilfs_test_opt(sbi, ERRORS_PANIC))
		panic("NILFS (device %s): panic forced after error\n",
		      sb->s_id);
}
Пример #5
0
static int parse_options(char *options, struct super_block *sb)
{
	struct nilfs_sb_info *sbi = NILFS_SB(sb);
	char *p;
	substring_t args[MAX_OPT_ARGS];
	int option;

	if (!options)
		return 1;

	while ((p = strsep(&options, ",")) != NULL) {
		int token;
		if (!*p)
			continue;

		token = match_token(p, tokens, args);
		switch (token) {
		case Opt_barrier:
			if (match_bool(&args[0], &option))
				return 0;
			if (option)
				nilfs_set_opt(sbi, BARRIER);
			else
				nilfs_clear_opt(sbi, BARRIER);
			break;
		case Opt_order:
			if (strcmp(args[0].from, "relaxed") == 0)
				/* Ordered data semantics */
				nilfs_clear_opt(sbi, STRICT_ORDER);
			else if (strcmp(args[0].from, "strict") == 0)
				/* Strict in-order semantics */
				nilfs_set_opt(sbi, STRICT_ORDER);
			else
				return 0;
			break;
		case Opt_err_panic:
			nilfs_write_opt(sbi, ERROR_MODE, ERRORS_PANIC);
			break;
		case Opt_err_ro:
			nilfs_write_opt(sbi, ERROR_MODE, ERRORS_RO);
			break;
		case Opt_err_cont:
			nilfs_write_opt(sbi, ERROR_MODE, ERRORS_CONT);
			break;
		case Opt_snapshot:
			if (match_int(&args[0], &option) || option <= 0)
				return 0;
			if (!(sb->s_flags & MS_RDONLY))
				return 0;
			sbi->s_snapshot_cno = option;
			nilfs_set_opt(sbi, SNAPSHOT);
			break;
		default:
			printk(KERN_ERR
			       "NILFS: Unrecognized mount option \"%s\"\n", p);
			return 0;
		}
	}
	return 1;
}
Пример #6
0
static void nilfs_put_super(struct super_block *sb)
{
	struct nilfs_sb_info *sbi = NILFS_SB(sb);
	struct the_nilfs *nilfs = sbi->s_nilfs;

	lock_kernel();

	nilfs_detach_segment_constructor(sbi);

	if (!(sb->s_flags & MS_RDONLY)) {
		down_write(&nilfs->ns_sem);
		nilfs->ns_sbp[0]->s_state = cpu_to_le16(nilfs->ns_mount_state);
		nilfs_commit_super(sbi, 1);
		up_write(&nilfs->ns_sem);
	}
	down_write(&nilfs->ns_super_sem);
	if (nilfs->ns_current == sbi)
		nilfs->ns_current = NULL;
	up_write(&nilfs->ns_super_sem);

	nilfs_detach_checkpoint(sbi);
	put_nilfs(sbi->s_nilfs);
	sbi->s_super = NULL;
	sb->s_fs_info = NULL;
	nilfs_put_sbinfo(sbi);

	unlock_kernel();
}
Пример #7
0
static int nilfs_test_bdev_super2(struct super_block *s, void *data)
{
	struct nilfs_super_data *sd = data;
	int ret;

	if (s->s_bdev != sd->bdev)
		return 0;

	if (!((s->s_flags | sd->flags) & MS_RDONLY))
		return 1; /* Reuse an old R/W-mode super_block */

	if (s->s_flags & sd->flags & MS_RDONLY) {
		if (down_read_trylock(&s->s_umount)) {
			ret = s->s_root &&
				(sd->cno == NILFS_SB(s)->s_snapshot_cno);
			up_read(&s->s_umount);
			/*
			 * This path is locked with sb_lock by sget().
			 * So, drop_super() causes deadlock.
			 */
			return ret;
		}
	}
	return 0;
}
Пример #8
0
/**
 * nilfs_error() - report failure condition on a filesystem
 *
 * nilfs_error() sets an ERROR_FS flag on the superblock as well as
 * reporting an error message.  It should be called when NILFS detects
 * incoherences or defects of meta data on disk.  As for sustainable
 * errors such as a single-shot I/O error, nilfs_warning() or the printk()
 * function should be used instead.
 *
 * The segment constructor must not call this function because it can
 * kill itself.
 */
void nilfs_error(struct super_block *sb, const char *function,
		 const char *fmt, ...)
{
	struct nilfs_sb_info *sbi = NILFS_SB(sb);
	va_list args;

	va_start(args, fmt);
	printk(KERN_CRIT "NILFS error (device %s): %s: ", sb->s_id, function);
	vprintk(fmt, args);
	printk("\n");
	va_end(args);

	if (!(sb->s_flags & MS_RDONLY)) {
		struct the_nilfs *nilfs = sbi->s_nilfs;

		down_write(&nilfs->ns_sem);
		if (!(nilfs->ns_mount_state & NILFS_ERROR_FS)) {
			nilfs->ns_mount_state |= NILFS_ERROR_FS;
			nilfs->ns_sbp[0]->s_state |=
				cpu_to_le16(NILFS_ERROR_FS);
			nilfs_commit_super(sbi, 1);
		}
		up_write(&nilfs->ns_sem);

		if (nilfs_test_opt(sbi, ERRORS_RO)) {
			printk(KERN_CRIT "Remounting filesystem read-only\n");
			sb->s_flags |= MS_RDONLY;
		}
	}

	if (nilfs_test_opt(sbi, ERRORS_PANIC))
		panic("NILFS (device %s): panic forced after error\n",
		      sb->s_id);
}
Пример #9
0
void nilfs_truncate(struct inode *inode)
{
	unsigned long blkoff;
	unsigned int blocksize;
	struct nilfs_transaction_info ti;
	struct super_block *sb = inode->i_sb;
	struct nilfs_inode_info *ii = NILFS_I(inode);

	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
		return;
	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
		return;

	blocksize = sb->s_blocksize;
	blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
	nilfs_transaction_begin(sb, &ti, 0); /* never fails */

	block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);

	nilfs_truncate_bmap(ii, blkoff);

	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
	if (IS_SYNC(inode))
		nilfs_set_transaction_flag(NILFS_TI_SYNC);

	nilfs_set_file_dirty(NILFS_SB(sb), inode, 0);
	nilfs_transaction_commit(sb);
	/* May construct a logical segment and may fail in sync mode.
	   But truncate has no return value. */
}
Пример #10
0
void nilfs_free_inode(struct inode *inode)
{
	struct super_block *sb = inode->i_sb;
	struct nilfs_sb_info *sbi = NILFS_SB(sb);

	clear_inode(inode);
	/* XXX: check error code? Is there any thing I can do? */
	(void) nilfs_ifile_delete_inode(sbi->s_ifile, inode->i_ino);
	atomic_dec(&sbi->s_inodes_count);
}
Пример #11
0
static int __nilfs_read_inode(struct super_block *sb, unsigned long ino,
			      struct inode *inode)
{
	struct nilfs_sb_info *sbi = NILFS_SB(sb);
	struct inode *dat = nilfs_dat_inode(sbi->s_nilfs);
	struct buffer_head *bh;
	struct nilfs_inode *raw_inode;
	int err;

	down_read(&NILFS_MDT(dat)->mi_sem);	/* XXX */
	err = nilfs_ifile_get_inode_block(sbi->s_ifile, ino, &bh);
	if (unlikely(err))
		goto bad_inode;

	raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, bh);

#ifdef CONFIG_NILFS_FS_POSIX_ACL
	ii->i_acl = NILFS_ACL_NOT_CACHED;
	ii->i_default_acl = NILFS_ACL_NOT_CACHED;
#endif
	if (nilfs_read_inode_common(inode, raw_inode))
		goto failed_unmap;

	if (S_ISREG(inode->i_mode)) {
		inode->i_op = &nilfs_file_inode_operations;
		inode->i_fop = &nilfs_file_operations;
		inode->i_mapping->a_ops = &nilfs_aops;
	} else if (S_ISDIR(inode->i_mode)) {
		inode->i_op = &nilfs_dir_inode_operations;
		inode->i_fop = &nilfs_dir_operations;
		inode->i_mapping->a_ops = &nilfs_aops;
	} else if (S_ISLNK(inode->i_mode)) {
		inode->i_op = &nilfs_symlink_inode_operations;
		inode->i_mapping->a_ops = &nilfs_aops;
	} else {
		inode->i_op = &nilfs_special_inode_operations;
		init_special_inode(
			inode, inode->i_mode,
			new_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
	}
	nilfs_ifile_unmap_inode(sbi->s_ifile, ino, bh);
	brelse(bh);
	up_read(&NILFS_MDT(dat)->mi_sem);	/* XXX */
	nilfs_set_inode_flags(inode);
	return 0;

 failed_unmap:
	nilfs_ifile_unmap_inode(sbi->s_ifile, ino, bh);
	brelse(bh);

 bad_inode:
	up_read(&NILFS_MDT(dat)->mi_sem);	/* XXX */
	return err;
}
static int nilfs_commit_write(struct file *file, struct page *page,
			      unsigned from, unsigned to)
{
	struct inode *inode = page->mapping->host;
	unsigned nr_dirty = nilfs_page_count_clean_buffers(page, from, to);
	int err;

	generic_commit_write(file, page, from, to);
	nilfs_set_file_dirty(NILFS_SB(inode->i_sb), inode, nr_dirty);
	err = nilfs_transaction_commit(inode->i_sb);
	return err;
}
Пример #13
0
static int nilfs_unfreeze(struct super_block *sb)
{
	struct nilfs_sb_info *sbi = NILFS_SB(sb);
	struct the_nilfs *nilfs = sbi->s_nilfs;

	if (sb->s_flags & MS_RDONLY)
		return 0;

	down_write(&nilfs->ns_sem);
	nilfs_setup_super(sbi, false);
	up_write(&nilfs->ns_sem);
	return 0;
}
Пример #14
0
static int nilfs_set_page_dirty(struct page *page)
{
	int ret = __set_page_dirty_buffers(page);

	if (ret) {
		struct inode *inode = page->mapping->host;
		struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb);
		unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);

		nilfs_set_file_dirty(sbi, inode, nr_dirty);
	}
	return ret;
}
Пример #15
0
int nilfs_inode_dirty(struct inode *inode)
{
	struct nilfs_inode_info *ii = NILFS_I(inode);
	struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb);
	int ret = 0;

	if (!list_empty(&ii->i_dirty)) {
		spin_lock(&sbi->s_inode_lock);
		ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
			test_bit(NILFS_I_BUSY, &ii->i_state);
		spin_unlock(&sbi->s_inode_lock);
	}
	return ret;
}
Пример #16
0
static int nilfs_freeze(struct super_block *sb)
{
	struct nilfs_sb_info *sbi = NILFS_SB(sb);
	struct the_nilfs *nilfs = sbi->s_nilfs;
	int err;

	if (sb->s_flags & MS_RDONLY)
		return 0;

	/* Mark super block clean */
	down_write(&nilfs->ns_sem);
	err = nilfs_cleanup_super(sbi);
	up_write(&nilfs->ns_sem);
	return err;
}
Пример #17
0
static int nilfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
	struct super_block *sb = dentry->d_sb;
	struct nilfs_sb_info *sbi = NILFS_SB(sb);
	struct the_nilfs *nilfs = sbi->s_nilfs;
	u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
	unsigned long long blocks;
	unsigned long overhead;
	unsigned long nrsvblocks;
	sector_t nfreeblocks;
	int err;

	/*
	 * Compute all of the segment blocks
	 *
	 * The blocks before first segment and after last segment
	 * are excluded.
	 */
	blocks = nilfs->ns_blocks_per_segment * nilfs->ns_nsegments
		- nilfs->ns_first_data_block;
	nrsvblocks = nilfs->ns_nrsvsegs * nilfs->ns_blocks_per_segment;

	/*
	 * Compute the overhead
	 *
	 * When distributing meta data blocks outside segment structure,
	 * We must count them as the overhead.
	 */
	overhead = 0;

	err = nilfs_count_free_blocks(nilfs, &nfreeblocks);
	if (unlikely(err))
		return err;

	buf->f_type = NILFS_SUPER_MAGIC;
	buf->f_bsize = sb->s_blocksize;
	buf->f_blocks = blocks - overhead;
	buf->f_bfree = nfreeblocks;
	buf->f_bavail = (buf->f_bfree >= nrsvblocks) ?
		(buf->f_bfree - nrsvblocks) : 0;
	buf->f_files = atomic_read(&sbi->s_inodes_count);
	buf->f_ffree = 0; /* nilfs_count_free_inodes(sb); */
	buf->f_namelen = NILFS_NAME_LEN;
	buf->f_fsid.val[0] = (u32)id;
	buf->f_fsid.val[1] = (u32)(id >> 32);

	return 0;
}
Пример #18
0
static int nilfs_sync_fs(struct super_block *sb, int wait)
{
	struct nilfs_sb_info *sbi = NILFS_SB(sb);
	struct the_nilfs *nilfs = sbi->s_nilfs;
	int err = 0;

	/* This function is called when super block should be written back */
	if (wait)
		err = nilfs_construct_segment(sb);

	down_write(&nilfs->ns_sem);
	if (nilfs_sb_dirty(nilfs))
		nilfs_commit_super(sbi, 1);
	up_write(&nilfs->ns_sem);

	return err;
}
Пример #19
0
static int nilfs_write_end(struct file *file, struct address_space *mapping,
			   loff_t pos, unsigned len, unsigned copied,
			   struct page *page, void *fsdata)
{
	struct inode *inode = mapping->host;
	unsigned start = pos & (PAGE_CACHE_SIZE - 1);
	unsigned nr_dirty;
	int err;

	nr_dirty = nilfs_page_count_clean_buffers(page, start,
						  start + copied);
	copied = generic_write_end(file, mapping, pos, len, copied, page,
				   fsdata);
	nilfs_set_file_dirty(NILFS_SB(inode->i_sb), inode, nr_dirty);
	err = nilfs_transaction_commit(inode->i_sb);
	return err ? : copied;
}
Пример #20
0
static int nilfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
{
	struct super_block *sb = vfs->mnt_sb;
	struct nilfs_sb_info *sbi = NILFS_SB(sb);

	if (!nilfs_test_opt(sbi, BARRIER))
		seq_printf(seq, ",barrier=off");
	if (nilfs_test_opt(sbi, SNAPSHOT))
		seq_printf(seq, ",cp=%llu",
			   (unsigned long long int)sbi->s_snapshot_cno);
	if (nilfs_test_opt(sbi, ERRORS_RO))
		seq_printf(seq, ",errors=remount-ro");
	if (nilfs_test_opt(sbi, ERRORS_PANIC))
		seq_printf(seq, ",errors=panic");
	if (nilfs_test_opt(sbi, STRICT_ORDER))
		seq_printf(seq, ",order=strict");

	return 0;
}
Пример #21
0
/**
 * nilfs_read_super_root_block - read super root block
 * @sb: super_block
 * @sr_block: disk block number of the super root block
 * @pbh: address of a buffer_head pointer to return super root buffer
 * @check: CRC check flag
 */
int nilfs_read_super_root_block(struct super_block *sb, sector_t sr_block,
				struct buffer_head **pbh, int check)
{
	struct buffer_head *bh_sr;
	struct nilfs_super_root *sr;
	u32 crc;
	int ret;

	*pbh = NULL;
	bh_sr = sb_bread(sb, sr_block);
	if (unlikely(!bh_sr)) {
		ret = NILFS_SEG_FAIL_IO;
		goto failed;
	}

	sr = (struct nilfs_super_root *)bh_sr->b_data;
	if (check) {
		unsigned bytes = le16_to_cpu(sr->sr_bytes);

		if (bytes == 0 || bytes > sb->s_blocksize) {
			ret = NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT;
			goto failed_bh;
		}
		if (calc_crc_cont(NILFS_SB(sb), bh_sr, &crc,
				  sizeof(sr->sr_sum), bytes, sr_block, 1)) {
			ret = NILFS_SEG_FAIL_IO;
			goto failed_bh;
		}
		if (crc != le32_to_cpu(sr->sr_sum)) {
			ret = NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT;
			goto failed_bh;
		}
	}
	*pbh = bh_sr;
	return 0;

 failed_bh:
	brelse(bh_sr);

 failed:
	return nilfs_warn_segment_error(ret);
}
Пример #22
0
int nilfs_mark_inode_dirty(struct inode *inode)
{
	struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb);
	struct buffer_head *ibh;
	int err;

	err = nilfs_load_inode_block(sbi, inode, &ibh);
	if (unlikely(err)) {
		nilfs_warning(inode->i_sb, __func__,
			      "failed to reget inode block.\n");
		return err;
	}
	lock_buffer(ibh);
	nilfs_update_inode(inode, ibh);
	unlock_buffer(ibh);
	nilfs_mdt_mark_buffer_dirty(ibh);
	nilfs_mdt_mark_dirty(sbi->s_ifile);
	brelse(ibh);
	return 0;
}
Пример #23
0
void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh)
{
	ino_t ino = inode->i_ino;
	struct nilfs_inode_info *ii = NILFS_I(inode);
	struct super_block *sb = inode->i_sb;
	struct nilfs_sb_info *sbi = NILFS_SB(sb);
	struct nilfs_inode *raw_inode;

	raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, ibh);

	/* The buffer is guarded with lock_buffer() by the caller */
	if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
		memset(raw_inode, 0, NILFS_MDT(sbi->s_ifile)->mi_entry_size);
	set_bit(NILFS_I_INODE_DIRTY, &ii->i_state);

	nilfs_write_inode_common(inode, raw_inode, 0);
		/* XXX: call with has_bmap = 0 is a workaround to avoid
		   deadlock of bmap. This delays update of i_bmap to just
		   before writing */
	nilfs_ifile_unmap_inode(sbi->s_ifile, ino, ibh);
}
Пример #24
0
int nilfs_store_magic_and_option(struct super_block *sb,
				 struct nilfs_super_block *sbp,
				 char *data)
{
	struct nilfs_sb_info *sbi = NILFS_SB(sb);

	sb->s_magic = le16_to_cpu(sbp->s_magic);

	/* FS independent flags */
#ifdef NILFS_ATIME_DISABLE
	sb->s_flags |= MS_NOATIME;
#endif

	nilfs_set_default_options(sbi, sbp);

	sbi->s_resuid = le16_to_cpu(sbp->s_def_resuid);
	sbi->s_resgid = le16_to_cpu(sbp->s_def_resgid);
	sbi->s_interval = le32_to_cpu(sbp->s_c_interval);
	sbi->s_watermark = le32_to_cpu(sbp->s_c_block_max);

	return !parse_options(data, sb, 0) ? -EINVAL : 0 ;
}
Пример #25
0
/**
 * nilfs_write_super - write super block(s) of NILFS
 * @sb: super_block
 *
 * nilfs_write_super() gets a fs-dependent lock, writes super block(s), and
 * clears s_dirt.  This function is called in the section protected by
 * lock_super().
 *
 * The s_dirt flag is managed by each filesystem and we protect it by ns_sem
 * of the struct the_nilfs.  Lock order must be as follows:
 *
 *   1. lock_super()
 *   2.    down_write(&nilfs->ns_sem)
 *
 * Inside NILFS, locking ns_sem is enough to protect s_dirt and the buffer
 * of the super block (nilfs->ns_sbp[]).
 *
 * In most cases, VFS functions call lock_super() before calling these
 * methods.  So we must be careful not to bring on deadlocks when using
 * lock_super();  see generic_shutdown_super(), write_super(), and so on.
 *
 * Note that order of lock_kernel() and lock_super() depends on contexts
 * of VFS.  We should also note that lock_kernel() can be used in its
 * protective section and only the outermost one has an effect.
 */
static void nilfs_write_super(struct super_block *sb)
{
	struct nilfs_sb_info *sbi = NILFS_SB(sb);
	struct the_nilfs *nilfs = sbi->s_nilfs;

	down_write(&nilfs->ns_sem);
	if (!(sb->s_flags & MS_RDONLY)) {
		struct nilfs_super_block **sbp = nilfs->ns_sbp;
		u64 t = get_seconds();
		int dupsb;

		if (!nilfs_discontinued(nilfs) && t >= nilfs->ns_sbwtime[0] &&
		    t < nilfs->ns_sbwtime[0] + NILFS_SB_FREQ) {
			up_write(&nilfs->ns_sem);
			return;
		}
		dupsb = sbp[1] && t > nilfs->ns_sbwtime[1] + NILFS_ALTSB_FREQ;
		nilfs_commit_super(sbi, dupsb);
	}
	sb->s_dirt = 0;
	up_write(&nilfs->ns_sem);
}
Пример #26
0
static void nilfs_put_super(struct super_block *sb)
{
	struct nilfs_sb_info *sbi = NILFS_SB(sb);
	struct the_nilfs *nilfs = sbi->s_nilfs;

	nilfs_detach_segment_constructor(sbi);

	if (!(sb->s_flags & MS_RDONLY)) {
		down_write(&nilfs->ns_sem);
		nilfs_cleanup_super(sbi);
		up_write(&nilfs->ns_sem);
	}

	iput(nilfs->ns_sufile);
	iput(nilfs->ns_cpfile);
	iput(nilfs->ns_dat);

	destroy_nilfs(nilfs);
	sbi->s_super = NULL;
	sb->s_fs_info = NULL;
	kfree(sbi);
}
Пример #27
0
static int nilfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
{
	struct super_block *sb = vfs->mnt_sb;
	struct nilfs_sb_info *sbi = NILFS_SB(sb);
	struct nilfs_root *root = NILFS_I(vfs->mnt_root->d_inode)->i_root;

	if (!nilfs_test_opt(sbi, BARRIER))
		seq_puts(seq, ",nobarrier");
	if (root->cno != NILFS_CPTREE_CURRENT_CNO)
		seq_printf(seq, ",cp=%llu", (unsigned long long)root->cno);
	if (nilfs_test_opt(sbi, ERRORS_PANIC))
		seq_puts(seq, ",errors=panic");
	if (nilfs_test_opt(sbi, ERRORS_CONT))
		seq_puts(seq, ",errors=continue");
	if (nilfs_test_opt(sbi, STRICT_ORDER))
		seq_puts(seq, ",order=strict");
	if (nilfs_test_opt(sbi, NORECOVERY))
		seq_puts(seq, ",norecovery");
	if (nilfs_test_opt(sbi, DISCARD))
		seq_puts(seq, ",discard");

	return 0;
}
Пример #28
0
static int nilfs_sync_fs(struct super_block *sb, int wait)
{
	struct nilfs_sb_info *sbi = NILFS_SB(sb);
	struct the_nilfs *nilfs = sbi->s_nilfs;
	struct nilfs_super_block **sbp;
	int err = 0;

	/* This function is called when super block should be written back */
	if (wait)
		err = nilfs_construct_segment(sb);

	down_write(&nilfs->ns_sem);
	if (nilfs_sb_dirty(nilfs)) {
		sbp = nilfs_prepare_super(sbi, nilfs_sb_will_flip(nilfs));
		if (likely(sbp)) {
			nilfs_set_log_cursor(sbp[0], nilfs);
			nilfs_commit_super(sbi, NILFS_SB_COMMIT);
		}
	}
	up_write(&nilfs->ns_sem);

	return err;
}
Пример #29
0
static int nilfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
{
	struct super_block *sb = vfs->mnt_sb;
	struct nilfs_sb_info *sbi = NILFS_SB(sb);

	if (!nilfs_test_opt(sbi, BARRIER))
		seq_printf(seq, ",nobarrier");
	if (nilfs_test_opt(sbi, SNAPSHOT))
		seq_printf(seq, ",cp=%llu",
			   (unsigned long long int)sbi->s_snapshot_cno);
	if (nilfs_test_opt(sbi, ERRORS_PANIC))
		seq_printf(seq, ",errors=panic");
	if (nilfs_test_opt(sbi, ERRORS_CONT))
		seq_printf(seq, ",errors=continue");
	if (nilfs_test_opt(sbi, STRICT_ORDER))
		seq_printf(seq, ",order=strict");
	if (nilfs_test_opt(sbi, NORECOVERY))
		seq_printf(seq, ",norecovery");
	if (nilfs_test_opt(sbi, DISCARD))
		seq_printf(seq, ",discard");

	return 0;
}
Пример #30
0
static int parse_options(char *options, struct super_block *sb, int is_remount)
{
	struct nilfs_sb_info *sbi = NILFS_SB(sb);
	char *p;
	substring_t args[MAX_OPT_ARGS];

	if (!options)
		return 1;

	while ((p = strsep(&options, ",")) != NULL) {
		int token;
		if (!*p)
			continue;

		token = match_token(p, tokens, args);
		switch (token) {
		case Opt_barrier:
			nilfs_set_opt(sbi, BARRIER);
			break;
		case Opt_nobarrier:
			nilfs_clear_opt(sbi, BARRIER);
			break;
		case Opt_order:
			if (strcmp(args[0].from, "relaxed") == 0)
				/* Ordered data semantics */
				nilfs_clear_opt(sbi, STRICT_ORDER);
			else if (strcmp(args[0].from, "strict") == 0)
				/* Strict in-order semantics */
				nilfs_set_opt(sbi, STRICT_ORDER);
			else
				return 0;
			break;
		case Opt_err_panic:
			nilfs_write_opt(sbi, ERROR_MODE, ERRORS_PANIC);
			break;
		case Opt_err_ro:
			nilfs_write_opt(sbi, ERROR_MODE, ERRORS_RO);
			break;
		case Opt_err_cont:
			nilfs_write_opt(sbi, ERROR_MODE, ERRORS_CONT);
			break;
		case Opt_snapshot:
			if (is_remount) {
				printk(KERN_ERR
				       "NILFS: \"%s\" option is invalid "
				       "for remount.\n", p);
				return 0;
			}
			break;
		case Opt_norecovery:
			nilfs_set_opt(sbi, NORECOVERY);
			break;
		case Opt_discard:
			nilfs_set_opt(sbi, DISCARD);
			break;
		case Opt_nodiscard:
			nilfs_clear_opt(sbi, DISCARD);
			break;
		default:
			printk(KERN_ERR
			       "NILFS: Unrecognized mount option \"%s\"\n", p);
			return 0;
		}
	}
	return 1;
}