Пример #1
0
struct inode *gfs2_inode_lookup(struct super_block *sb,
				unsigned int type,
				u64 no_addr,
				u64 no_formal_ino)
{
	struct inode *inode;
	struct gfs2_inode *ip;
	struct gfs2_glock *io_gl = NULL;
	int error;

	inode = gfs2_iget(sb, no_addr);
	ip = GFS2_I(inode);

	if (!inode)
		return ERR_PTR(-ENOBUFS);

	if (inode->i_state & I_NEW) {
		struct gfs2_sbd *sdp = GFS2_SB(inode);
		ip->i_no_formal_ino = no_formal_ino;

		error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
		if (unlikely(error))
			goto fail;
		ip->i_gl->gl_object = ip;

		error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
		if (unlikely(error))
			goto fail_put;

		set_bit(GIF_INVALID, &ip->i_flags);
		error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
		if (unlikely(error))
			goto fail_iopen;
		ip->i_iopen_gh.gh_gl->gl_object = ip;

		gfs2_glock_put(io_gl);
		io_gl = NULL;

		if ((type == DT_UNKNOWN) && (no_formal_ino == 0))
			goto gfs2_nfsbypass;

		inode->i_mode = DT2IF(type);

		/*
		 * We must read the inode in order to work out its type in
		 * this case. Note that this doesn't happen often as we normally
		 * know the type beforehand. This code path only occurs during
		 * unlinked inode recovery (where it is safe to do this glock,
		 * which is not true in the general case).
		 */
		if (type == DT_UNKNOWN) {
			struct gfs2_holder gh;
			error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
			if (unlikely(error))
				goto fail_glock;
			/* Inode is now uptodate */
			gfs2_glock_dq_uninit(&gh);
		}

		gfs2_set_iop(inode);
	}

gfs2_nfsbypass:
	return inode;
fail_glock:
	gfs2_glock_dq(&ip->i_iopen_gh);
fail_iopen:
	if (io_gl)
		gfs2_glock_put(io_gl);
fail_put:
	if (inode->i_state & I_NEW)
		ip->i_gl->gl_object = NULL;
	gfs2_glock_put(ip->i_gl);
fail:
	if (inode->i_state & I_NEW)
		iget_failed(inode);
	else
		iput(inode);
	return ERR_PTR(error);
}
Пример #2
0
void gfs2_process_unlinked_inode(struct super_block *sb, u64 no_addr)
{
	struct gfs2_sbd *sdp;
	struct gfs2_inode *ip;
	struct gfs2_glock *io_gl = NULL;
	int error;
	struct gfs2_holder gh;
	struct inode *inode;

	inode = gfs2_iget_skip(sb, no_addr);

	if (!inode)
		return;

	/* If it's not a new inode, someone's using it, so leave it alone. */
	if (!(inode->i_state & I_NEW)) {
		iput(inode);
		return;
	}

	ip = GFS2_I(inode);
	sdp = GFS2_SB(inode);
	ip->i_no_formal_ino = -1;

	error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
	if (unlikely(error))
		goto fail;
	ip->i_gl->gl_object = ip;

	error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
	if (unlikely(error))
		goto fail_put;

	set_bit(GIF_INVALID, &ip->i_flags);
	error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, LM_FLAG_TRY | GL_EXACT,
				   &ip->i_iopen_gh);
	if (unlikely(error))
		goto fail_iopen;

	ip->i_iopen_gh.gh_gl->gl_object = ip;
	gfs2_glock_put(io_gl);
	io_gl = NULL;

	inode->i_mode = DT2IF(DT_UNKNOWN);

	/*
	 * We must read the inode in order to work out its type in
	 * this case. Note that this doesn't happen often as we normally
	 * know the type beforehand. This code path only occurs during
	 * unlinked inode recovery (where it is safe to do this glock,
	 * which is not true in the general case).
	 */
	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, LM_FLAG_TRY,
				   &gh);
	if (unlikely(error))
		goto fail_glock;

	/* Inode is now uptodate */
	gfs2_glock_dq_uninit(&gh);
	gfs2_set_iop(inode);

	/* The iput will cause it to be deleted. */
	iput(inode);
	return;

fail_glock:
	gfs2_glock_dq(&ip->i_iopen_gh);
fail_iopen:
	if (io_gl)
		gfs2_glock_put(io_gl);
fail_put:
	ip->i_gl->gl_object = NULL;
	gfs2_glock_put(ip->i_gl);
fail:
	iget_failed(inode);
	return;
}
Пример #3
0
struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
				u64 no_addr, u64 no_formal_ino, int non_block)
{
	struct inode *inode;
	struct gfs2_inode *ip;
	struct gfs2_glock *io_gl = NULL;
	int error;

	inode = gfs2_iget(sb, no_addr, non_block);
	ip = GFS2_I(inode);

	if (!inode)
		return ERR_PTR(-ENOMEM);

	if (inode->i_state & I_NEW) {
		struct gfs2_sbd *sdp = GFS2_SB(inode);
		ip->i_no_formal_ino = no_formal_ino;

		error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
		if (unlikely(error))
			goto fail;
		ip->i_gl->gl_object = ip;

		error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
		if (unlikely(error))
			goto fail_put;

		set_bit(GIF_INVALID, &ip->i_flags);
		error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
		if (unlikely(error))
			goto fail_iopen;

		ip->i_iopen_gh.gh_gl->gl_object = ip;
		gfs2_glock_put(io_gl);
		io_gl = NULL;

		if (type == DT_UNKNOWN) {
			/* Inode glock must be locked already */
			error = gfs2_inode_refresh(GFS2_I(inode));
			if (error)
				goto fail_refresh;
		} else {
			inode->i_mode = DT2IF(type);
		}

		gfs2_set_iop(inode);
		unlock_new_inode(inode);
	}

	return inode;

fail_refresh:
	ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
	ip->i_iopen_gh.gh_gl->gl_object = NULL;
	gfs2_glock_dq_uninit(&ip->i_iopen_gh);
fail_iopen:
	if (io_gl)
		gfs2_glock_put(io_gl);
fail_put:
	ip->i_gl->gl_object = NULL;
	gfs2_glock_put(ip->i_gl);
fail:
	iget_failed(inode);
	return ERR_PTR(error);
}
Пример #4
0
void make_sure_lf_exists(struct gfs2_inode *ip)
{
	struct dir_info *di;
	struct gfs2_sbd *sdp = ip->i_sbd;
	uint32_t mode;
	int root_entries;

	if (lf_dip)
		return;

	root_entries = sdp->md.rooti->i_di.di_entries;
	log_info( _("Locating/Creating lost+found directory\n"));

	/* if this is gfs1, we have to trick createi into using
	   no_formal_ino = no_addr, so we set next_inum to the
	   free block we're about to allocate. */
	if (sdp->gfs1)
		sdp->md.next_inum = find_free_blk(sdp);
	mode = (sdp->gfs1 ? DT2IF(GFS_FILE_DIR) : S_IFDIR) | 0700;
	if (sdp->gfs1)
		lf_dip = gfs_createi(sdp->md.rooti, "lost+found", mode, 0);
	else
		lf_dip = createi(sdp->md.rooti, "lost+found",
				 S_IFDIR | 0700, 0);
	if (lf_dip == NULL) {
		log_crit(_("Error creating lost+found: %s\n"),
			 strerror(errno));
		exit(FSCK_ERROR);
	}

	/* createi will have incremented the di_nlink link count for the root
	   directory.  We must set the nlink value in the hash table to keep
	   them in sync so that pass4 can detect and fix any descrepancies. */
	set_di_nlink(sdp->md.rooti);

	if (sdp->md.rooti->i_di.di_entries > root_entries) {
		lf_was_created = 1;
		/* This is a new lost+found directory, so set its block type
		   and increment link counts for the directories */
		/* FIXME: i'd feel better about this if fs_mkdir returned
		   whether it created a new directory or just found an old one,
		   and we used that instead of the bitmap_type to run this */
		dirtree_insert(lf_dip->i_di.di_num);
		/* Set the bitmap AFTER the dirtree insert so that function
		   check_n_fix_bitmap will realize it's a dinode and adjust
		   the rgrp counts properly. */
		fsck_bitmap_set(ip, lf_dip->i_di.di_num.no_addr,
				_("lost+found dinode"), GFS2_BLKST_DINODE);
		/* root inode links to lost+found */
		incr_link_count(sdp->md.rooti->i_di.di_num, lf_dip, _("root"));
		/* lost+found link for '.' from itself */
		incr_link_count(lf_dip->i_di.di_num, lf_dip, "\".\"");
		/* lost+found link for '..' back to root */
		incr_link_count(lf_dip->i_di.di_num, sdp->md.rooti, "\"..\"");
		if (sdp->gfs1)
			lf_dip->i_di.__pad1 = GFS_FILE_DIR;
	}
	log_info( _("lost+found directory is dinode %lld (0x%llx)\n"),
		  (unsigned long long)lf_dip->i_di.di_num.no_addr,
		  (unsigned long long)lf_dip->i_di.di_num.no_addr);
	di = dirtree_find(lf_dip->i_di.di_num.no_addr);
	if (di) {
		log_info( _("Marking lost+found inode connected\n"));
		di->checked = 1;
		di = NULL;
	}
}
Пример #5
0
struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
				u64 no_addr, u64 no_formal_ino,
				unsigned int blktype)
{
	struct inode *inode;
	struct gfs2_inode *ip;
	struct gfs2_glock *io_gl = NULL;
	struct gfs2_holder i_gh;
	int error;

	gfs2_holder_mark_uninitialized(&i_gh);
	inode = gfs2_iget(sb, no_addr);
	if (!inode)
		return ERR_PTR(-ENOMEM);

	ip = GFS2_I(inode);

	if (inode->i_state & I_NEW) {
		struct gfs2_sbd *sdp = GFS2_SB(inode);
		ip->i_no_formal_ino = no_formal_ino;

		error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
		if (unlikely(error))
			goto fail;
		flush_delayed_work(&ip->i_gl->gl_work);

		error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
		if (unlikely(error))
			goto fail_put;

		if (type == DT_UNKNOWN || blktype != GFS2_BLKST_FREE) {
			/*
			 * The GL_SKIP flag indicates to skip reading the inode
			 * block.  We read the inode with gfs2_inode_refresh
			 * after possibly checking the block type.
			 */
			error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE,
						   GL_SKIP, &i_gh);
			if (error)
				goto fail_put;

			if (blktype != GFS2_BLKST_FREE) {
				error = gfs2_check_blk_type(sdp, no_addr,
							    blktype);
				if (error)
					goto fail_put;
			}
		}

		glock_set_object(ip->i_gl, ip);
		set_bit(GIF_INVALID, &ip->i_flags);
		error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
		if (unlikely(error))
			goto fail_put;
		glock_set_object(ip->i_iopen_gh.gh_gl, ip);
		gfs2_glock_put(io_gl);
		io_gl = NULL;

		if (type == DT_UNKNOWN) {
			/* Inode glock must be locked already */
			error = gfs2_inode_refresh(GFS2_I(inode));
			if (error)
				goto fail_refresh;
		} else {
			inode->i_mode = DT2IF(type);
		}

		gfs2_set_iop(inode);

		/* Lowest possible timestamp; will be overwritten in gfs2_dinode_in. */
		inode->i_atime.tv_sec = 1LL << (8 * sizeof(inode->i_atime.tv_sec) - 1);
		inode->i_atime.tv_nsec = 0;

		unlock_new_inode(inode);
	}

	if (gfs2_holder_initialized(&i_gh))
		gfs2_glock_dq_uninit(&i_gh);
	return inode;

fail_refresh:
	ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
	glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
	gfs2_glock_dq_uninit(&ip->i_iopen_gh);
fail_put:
	if (io_gl)
		gfs2_glock_put(io_gl);
	glock_clear_object(ip->i_gl, ip);
	if (gfs2_holder_initialized(&i_gh))
		gfs2_glock_dq_uninit(&i_gh);
fail:
	iget_failed(inode);
	return ERR_PTR(error);
}