예제 #1
0
void gfs2_process_unlinked_inode(struct super_block *sb, u64 no_addr)
{
	struct gfs2_sbd *sdp;
	struct gfs2_inode *ip;
	struct gfs2_glock *io_gl = NULL;
	int error;
	struct gfs2_holder gh;
	struct inode *inode;

	inode = gfs2_iget_skip(sb, no_addr);

	if (!inode)
		return;

	/* If it's not a new inode, someone's using it, so leave it alone. */
	if (!(inode->i_state & I_NEW)) {
		iput(inode);
		return;
	}

	ip = GFS2_I(inode);
	sdp = GFS2_SB(inode);
	ip->i_no_formal_ino = -1;

	error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
	if (unlikely(error))
		goto fail;
	ip->i_gl->gl_object = ip;

	error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
	if (unlikely(error))
		goto fail_put;

	set_bit(GIF_INVALID, &ip->i_flags);
	error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, LM_FLAG_TRY | GL_EXACT,
				   &ip->i_iopen_gh);
	if (unlikely(error))
		goto fail_iopen;

	ip->i_iopen_gh.gh_gl->gl_object = ip;
	gfs2_glock_put(io_gl);
	io_gl = NULL;

	inode->i_mode = DT2IF(DT_UNKNOWN);

	/*
	 * We must read the inode in order to work out its type in
	 * this case. Note that this doesn't happen often as we normally
	 * know the type beforehand. This code path only occurs during
	 * unlinked inode recovery (where it is safe to do this glock,
	 * which is not true in the general case).
	 */
	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, LM_FLAG_TRY,
				   &gh);
	if (unlikely(error))
		goto fail_glock;

	/* Inode is now uptodate */
	gfs2_glock_dq_uninit(&gh);
	gfs2_set_iop(inode);

	/* The iput will cause it to be deleted. */
	iput(inode);
	return;

fail_glock:
	gfs2_glock_dq(&ip->i_iopen_gh);
fail_iopen:
	if (io_gl)
		gfs2_glock_put(io_gl);
fail_put:
	ip->i_gl->gl_object = NULL;
	gfs2_glock_put(ip->i_gl);
fail:
	iget_failed(inode);
	return;
}
예제 #2
0
struct inode *gfs2_inode_lookup(struct super_block *sb,
                                unsigned int type,
                                u64 no_addr,
                                u64 no_formal_ino, int skip_freeing)
{
    struct inode *inode;
    struct gfs2_inode *ip;
    struct gfs2_glock *io_gl;
    int error;

    if (skip_freeing)
        inode = gfs2_iget_skip(sb, no_addr);
    else
        inode = gfs2_iget(sb, no_addr);
    ip = GFS2_I(inode);

    if (!inode)
        return ERR_PTR(-ENOBUFS);

    if (inode->i_state & I_NEW) {
        struct gfs2_sbd *sdp = GFS2_SB(inode);
        ip->i_no_formal_ino = no_formal_ino;

        error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
        if (unlikely(error))
            goto fail;
        ip->i_gl->gl_object = ip;

        error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
        if (unlikely(error))
            goto fail_put;

        set_bit(GIF_INVALID, &ip->i_flags);
        error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
        if (unlikely(error))
            goto fail_iopen;
        ip->i_iopen_gh.gh_gl->gl_object = ip;

        gfs2_glock_put(io_gl);

        if ((type == DT_UNKNOWN) && (no_formal_ino == 0))
            goto gfs2_nfsbypass;

        inode->i_mode = DT2IF(type);

        /*
         * We must read the inode in order to work out its type in
         * this case. Note that this doesn't happen often as we normally
         * know the type beforehand. This code path only occurs during
         * unlinked inode recovery (where it is safe to do this glock,
         * which is not true in the general case).
         */
        if (type == DT_UNKNOWN) {
            struct gfs2_holder gh;
            error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
            if (unlikely(error))
                goto fail_glock;
            /* Inode is now uptodate */
            gfs2_glock_dq_uninit(&gh);
        }

        gfs2_set_iop(inode);
    }

gfs2_nfsbypass:
    return inode;
fail_glock:
    gfs2_glock_dq(&ip->i_iopen_gh);
fail_iopen:
    gfs2_glock_put(io_gl);
fail_put:
    ip->i_gl->gl_object = NULL;
    gfs2_glock_put(ip->i_gl);
fail:
    iget_failed(inode);
    return ERR_PTR(error);
}