Exemplo n.º 1
0
void
vnode_pager_update_writecount(vm_object_t object, vm_offset_t start,
    vm_offset_t end)
{
	struct vnode *vp;
	vm_ooffset_t old_wm;

	VM_OBJECT_WLOCK(object);
	if (object->type != OBJT_VNODE) {
		VM_OBJECT_WUNLOCK(object);
		return;
	}
	old_wm = object->un_pager.vnp.writemappings;
	object->un_pager.vnp.writemappings += (vm_ooffset_t)end - start;
	vp = object->handle;
	if (old_wm == 0 && object->un_pager.vnp.writemappings != 0) {
		ASSERT_VOP_ELOCKED(vp, "v_writecount inc");
		VOP_ADD_WRITECOUNT(vp, 1);
		CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d",
		    __func__, vp, vp->v_writecount);
	} else if (old_wm != 0 && object->un_pager.vnp.writemappings == 0) {
		ASSERT_VOP_ELOCKED(vp, "v_writecount dec");
		VOP_ADD_WRITECOUNT(vp, -1);
		CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d",
		    __func__, vp, vp->v_writecount);
	}
	VM_OBJECT_WUNLOCK(object);
}
Exemplo n.º 2
0
static int
fuse_vnode_alloc(struct mount *mp,
    struct thread *td,
    uint64_t nodeid,
    enum vtype vtyp,
    struct vnode **vpp)
{
	struct fuse_vnode_data *fvdat;
	struct vnode *vp2;
	int err = 0;

	FS_DEBUG("been asked for vno #%ju\n", (uintmax_t)nodeid);

	if (vtyp == VNON) {
		return EINVAL;
	}
	*vpp = NULL;
	err = vfs_hash_get(mp, fuse_vnode_hash(nodeid), LK_EXCLUSIVE, td, vpp,
	    fuse_vnode_cmp, &nodeid);
	if (err)
		return (err);

	if (*vpp) {
		MPASS((*vpp)->v_type == vtyp && (*vpp)->v_data != NULL);
		FS_DEBUG("vnode taken from hash\n");
		return (0);
	}
	fvdat = malloc(sizeof(*fvdat), M_FUSEVN, M_WAITOK | M_ZERO);
	err = getnewvnode("fuse", mp, &fuse_vnops, vpp);
	if (err) {
		free(fvdat, M_FUSEVN);
		return (err);
	}
	lockmgr((*vpp)->v_vnlock, LK_EXCLUSIVE, NULL);
	fuse_vnode_init(*vpp, fvdat, nodeid, vtyp);
	err = insmntque(*vpp, mp);
	ASSERT_VOP_ELOCKED(*vpp, "fuse_vnode_alloc");
	if (err) {
		free(fvdat, M_FUSEVN);
		*vpp = NULL;
		return (err);
	}
	err = vfs_hash_insert(*vpp, fuse_vnode_hash(nodeid), LK_EXCLUSIVE,
	    td, &vp2, fuse_vnode_cmp, &nodeid);
	if (err)
		return (err);
	if (vp2 != NULL) {
		*vpp = vp2;
		return (0);
	}

	ASSERT_VOP_ELOCKED(*vpp, "fuse_vnode_alloc");

	return (0);
}
Exemplo n.º 3
0
void
zfs_rmnode(znode_t *zp)
{
    zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
    objset_t	*os = zfsvfs->z_os;
    znode_t		*xzp = NULL;
    dmu_tx_t	*tx;
    uint64_t	acl_obj;
    uint64_t	xattr_obj;
    int		error;

    ASSERT(zp->z_links == 0);
    ASSERT_VOP_ELOCKED(ZTOV(zp), __func__);

    /*
     * If this is an attribute directory, purge its contents.
     */
    if (ZTOV(zp) != NULL && ZTOV(zp)->v_type == VDIR &&
            (zp->z_pflags & ZFS_XATTR)) {
        if (zfs_purgedir(zp) != 0) {
            /*
             * Not enough space to delete some xattrs.
             * Leave it in the unlinked set.
             */
            zfs_znode_dmu_fini(zp);
            zfs_znode_free(zp);
            return;
        }
    } else {
        /*
         * Free up all the data in the file.  We don't do this for
         * XATTR directories because we need truncate and remove to be
         * in the same tx, like in zfs_znode_delete(). Otherwise, if
         * we crash here we'll end up with an inconsistent truncated
         * zap object in the delete queue.  Note a truncated file is
         * harmless since it only contains user data.
         */
        error = dmu_free_long_range(os, zp->z_id, 0, DMU_OBJECT_END);
        if (error) {
            /*
             * Not enough space.  Leave the file in the unlinked
             * set.
             */
            zfs_znode_dmu_fini(zp);
            zfs_znode_free(zp);
            return;
        }
    }

    /*
     * If the file has extended attributes, we're going to unlink
     * the xattr dir.
     */
    error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
                      &xattr_obj, sizeof (xattr_obj));
    if (error == 0 && xattr_obj) {
        error = zfs_zget(zfsvfs, xattr_obj, &xzp);
        ASSERT3S(error, ==, 0);
        vn_lock(ZTOV(xzp), LK_EXCLUSIVE | LK_RETRY);
    }
Exemplo n.º 4
0
void
vnode_destroy_vobject(struct vnode *vp)
{
	struct vm_object *obj;

	obj = vp->v_object;
	if (obj == NULL)
		return;
	ASSERT_VOP_ELOCKED(vp, "vnode_destroy_vobject");
	VM_OBJECT_WLOCK(obj);
	if (obj->ref_count == 0) {
		/*
		 * don't double-terminate the object
		 */
		if ((obj->flags & OBJ_DEAD) == 0)
			vm_object_terminate(obj);
		else
			VM_OBJECT_WUNLOCK(obj);
	} else {
		/*
		 * Woe to the process that tries to page now :-).
		 */
		vm_pager_deallocate(obj);
		VM_OBJECT_WUNLOCK(obj);
	}
	vp->v_object = NULL;
}
Exemplo n.º 5
0
void
fuse_vnode_open(struct vnode *vp, int32_t fuse_open_flags, struct thread *td)
{
	/*
	 * Funcation is called for every vnode open.
	 * Merge fuse_open_flags it may be 0
	 */
	/*
	 * Ideally speaking, direct io should be enabled on
	 * fd's but do not see of any way of providing that
	 * this implementation.
	 *
	 * Also cannot think of a reason why would two
	 * different fd's on same vnode would like
	 * have DIRECT_IO turned on and off. But linux
	 * based implementation works on an fd not an
	 * inode and provides such a feature.
	 *
	 * XXXIP: Handle fd based DIRECT_IO
	 */
	if (fuse_open_flags & FOPEN_DIRECT_IO) {
		ASSERT_VOP_ELOCKED(vp, __func__);
		VTOFUD(vp)->flag |= FN_DIRECTIO;
		fuse_io_invalbuf(vp, td);
	} else {
		if ((fuse_open_flags & FOPEN_KEEP_CACHE) == 0)
			fuse_io_invalbuf(vp, td);
	        VTOFUD(vp)->flag &= ~FN_DIRECTIO;
	}

	if (vnode_vtype(vp) == VREG) {
		/* XXXIP prevent getattr, by using cached node size */
		vnode_create_vobject(vp, 0, td);
	}
}
Exemplo n.º 6
0
/*
 * Update the access, modified, and inode change times as specified by the
 * IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively.  Write the inode
 * to disk if the IN_MODIFIED flag is set (it may be set initially, or by
 * the timestamp update).  The IN_LAZYMOD flag is set to force a write
 * later if not now.  If we write now, then clear both IN_MODIFIED and
 * IN_LAZYMOD to reflect the presumably successful write, and if waitfor is
 * set, then wait for the write to complete.
 */
int
ext2_update(struct vnode *vp, int waitfor)
{
	struct m_ext2fs *fs;
	struct buf *bp;
	struct inode *ip;
	int error;

	ASSERT_VOP_ELOCKED(vp, "ext2_update");
	ext2_itimes(vp);
	ip = VTOI(vp);
	if ((ip->i_flag & IN_MODIFIED) == 0 && waitfor == 0)
		return (0);
	ip->i_flag &= ~(IN_LAZYACCESS | IN_LAZYMOD | IN_MODIFIED);
	fs = ip->i_e2fs;
	if(fs->e2fs_ronly)
		return (0);
	if ((error = bread(ip->i_devvp,
	    fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
		(int)fs->e2fs_bsize, NOCRED, &bp)) != 0) {
		brelse(bp);
		return (error);
	}
	ext2_i2ei(ip, (struct ext2fs_dinode *)((char *)bp->b_data +
	    EXT2_INODE_SIZE(fs) * ino_to_fsbo(fs, ip->i_number)));
	if (waitfor && !DOINGASYNC(vp))
		return (bwrite(bp));
	else {
		bdwrite(bp);
		return (0);
	}
}
Exemplo n.º 7
0
/*
 *	The object must be locked.
 */
static void
vnode_pager_dealloc(vm_object_t object)
{
	struct vnode *vp;
	int refs;

	vp = object->handle;
	if (vp == NULL)
		panic("vnode_pager_dealloc: pager already dealloced");

	VM_OBJECT_ASSERT_WLOCKED(object);
	vm_object_pip_wait(object, "vnpdea");
	refs = object->ref_count;

	object->handle = NULL;
	object->type = OBJT_DEAD;
	if (object->flags & OBJ_DISCONNECTWNT) {
		vm_object_clear_flag(object, OBJ_DISCONNECTWNT);
		wakeup(object);
	}
	ASSERT_VOP_ELOCKED(vp, "vnode_pager_dealloc");
	if (object->un_pager.vnp.writemappings > 0) {
		object->un_pager.vnp.writemappings = 0;
		VOP_ADD_WRITECOUNT(vp, -1);
		CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d",
		    __func__, vp, vp->v_writecount);
	}
	vp->v_object = NULL;
	VOP_UNSET_TEXT(vp);
	VM_OBJECT_WUNLOCK(object);
	while (refs-- > 0)
		vunref(vp);
	VM_OBJECT_WLOCK(object);
}
Exemplo n.º 8
0
static void
ncl_releasesillyrename(struct vnode *vp, struct thread *td)
{
	struct nfsnode *np;
	struct sillyrename *sp;

	ASSERT_VOP_ELOCKED(vp, "releasesillyrename");
	np = VTONFS(vp);
	mtx_assert(&np->n_mtx, MA_OWNED);
	if (vp->v_type != VDIR) {
		sp = np->n_sillyrename;
		np->n_sillyrename = NULL;
	} else
		sp = NULL;
	if (sp != NULL) {
		mtx_unlock(&np->n_mtx);
		(void) ncl_vinvalbuf(vp, 0, td, 1);
		/*
		 * Remove the silly file that was rename'd earlier
		 */
		ncl_removeit(sp, vp);
		crfree(sp->s_cred);
		sysmon_task_queue_sched(0, nfs_freesillyrename, sp);
		mtx_lock(&np->n_mtx);
	}
}
Exemplo n.º 9
0
void
vnode_destroy_vobject(struct vnode *vp)
{
	struct vm_object *obj;

	obj = vp->v_object;
	if (obj == NULL)
		return;
	ASSERT_VOP_ELOCKED(vp, "vnode_destroy_vobject");
	VM_OBJECT_LOCK(obj);
	if (obj->ref_count == 0) {
		/*
		 * vclean() may be called twice. The first time
		 * removes the primary reference to the object,
		 * the second time goes one further and is a
		 * special-case to terminate the object.
		 *
		 * don't double-terminate the object
		 */
		if ((obj->flags & OBJ_DEAD) == 0)
			vm_object_terminate(obj);
		else
			VM_OBJECT_UNLOCK(obj);
	} else {
		/*
		 * Woe to the process that tries to page now :-).
		 */
		vm_pager_deallocate(obj);
		VM_OBJECT_UNLOCK(obj);
	}
	vp->v_object = NULL;
}
Exemplo n.º 10
0
int
smbfs_smb_lookup(struct smbnode *dnp, const char *name, int nmlen,
	struct smbfattr *fap, struct smb_cred *scred)
{
	struct smbfs_fctx *ctx;
	int error;

	if (dnp == NULL || (dnp->n_ino == 2 && name == NULL)) {
		bzero(fap, sizeof(*fap));
		fap->fa_attr = SMB_FA_DIR;
		fap->fa_ino = 2;
		return 0;
	}
	MPASS(!(nmlen == 2 && name[0] == '.' && name[1] == '.'));
	MPASS(!(nmlen == 1 && name[0] == '.'));
	ASSERT_VOP_ELOCKED(dnp->n_vnode, "smbfs_smb_lookup");
	error = smbfs_findopen(dnp, name, nmlen,
	    SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR, scred, &ctx);
	if (error)
		return error;
	ctx->f_flags |= SMBFS_RDD_FINDSINGLE;
	error = smbfs_findnext(ctx, 1, scred);
	if (error == 0) {
		*fap = ctx->f_attr;
		if (name == NULL)
			fap->fa_ino = dnp->n_ino;
	}
	smbfs_findclose(ctx, scred);
	return error;
}
Exemplo n.º 11
0
void
fuse_internal_vnode_disappear(struct vnode *vp)
{
	struct fuse_vnode_data *fvdat = VTOFUD(vp);

	ASSERT_VOP_ELOCKED(vp, "fuse_internal_vnode_disappear");
	fvdat->flag |= FN_REVOKED;
	cache_purge(vp);
}
Exemplo n.º 12
0
/*
 * Flush and invalidate all dirty buffers. If another process is already
 * doing the flush, just wait for completion.
 */
int
fuse_io_invalbuf(struct vnode *vp, struct thread *td)
{
	struct fuse_vnode_data *fvdat = VTOFUD(vp);
	int error = 0;

	if (vp->v_iflag & VI_DOOMED)
		return 0;

	ASSERT_VOP_ELOCKED(vp, "fuse_io_invalbuf");

	while (fvdat->flag & FN_FLUSHINPROG) {
		struct proc *p = td->td_proc;

		if (vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF)
			return EIO;
		fvdat->flag |= FN_FLUSHWANT;
		tsleep(&fvdat->flag, PRIBIO + 2, "fusevinv", 2 * hz);
		error = 0;
		if (p != NULL) {
			PROC_LOCK(p);
			if (SIGNOTEMPTY(p->p_siglist) ||
			    SIGNOTEMPTY(td->td_siglist))
				error = EINTR;
			PROC_UNLOCK(p);
		}
		if (error == EINTR)
			return EINTR;
	}
	fvdat->flag |= FN_FLUSHINPROG;

	if (vp->v_bufobj.bo_object != NULL) {
		VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
		vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
		VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
	}
	error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
	while (error) {
		if (error == ERESTART || error == EINTR) {
			fvdat->flag &= ~FN_FLUSHINPROG;
			if (fvdat->flag & FN_FLUSHWANT) {
				fvdat->flag &= ~FN_FLUSHWANT;
				wakeup(&fvdat->flag);
			}
			return EINTR;
		}
		error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
	}
	fvdat->flag &= ~FN_FLUSHINPROG;
	if (fvdat->flag & FN_FLUSHWANT) {
		fvdat->flag &= ~FN_FLUSHWANT;
		wakeup(&fvdat->flag);
	}
	return (error);
}
Exemplo n.º 13
0
int
fuse_vnode_savesize(struct vnode *vp, struct ucred *cred)
{
    struct fuse_vnode_data *fvdat = VTOFUD(vp);
    struct thread *td = curthread;
    struct fuse_filehandle *fufh = NULL;
    struct fuse_dispatcher  fdi;
    struct fuse_setattr_in *fsai;
    int err = 0;

    DEBUG("inode=%jd size=%jd\n", VTOI(vp), fvdat->filesize);
    ASSERT_VOP_ELOCKED(vp, "fuse_io_extend");

    if (fuse_isdeadfs(vp)) {
        return EBADF;
    }

    if (vnode_vtype(vp) == VDIR) {
        return EISDIR;
    }

    if (vfs_isrdonly(vnode_mount(vp))) {
        return EROFS;
    }

    if (cred == NULL) {
        cred = td->td_ucred;
    }

    fdisp_init(&fdi, sizeof(*fsai));
    fdisp_make_vp(&fdi, FUSE_SETATTR, vp, td, cred);
    fsai = fdi.indata;
    fsai->valid = 0;

    // Truncate to a new value.
    fsai->size = fvdat->filesize;
    fsai->valid |= FATTR_SIZE;

    fuse_filehandle_getrw(vp, FUFH_WRONLY, &fufh);
    if (fufh) {
        fsai->fh = fufh->fh_id;
        fsai->valid |= FATTR_FH;
    }

    err = fdisp_wait_answ(&fdi);
    fdisp_destroy(&fdi);
    if (err == 0)
        fvdat->flag &= ~FN_SIZECHANGE;

    fuse_invalidate_attr(vp);

    return err;
}
Exemplo n.º 14
0
int
fuse_vnode_setsize(struct vnode *vp, struct ucred *cred, off_t newsize)
{
	struct fuse_vnode_data *fvdat = VTOFUD(vp);
	off_t oldsize;
	int err = 0;

	ASSERT_VOP_ELOCKED(vp, "fuse_vnode_setsize");

	oldsize = fvdat->filesize;
	fvdat->filesize = newsize;
	fvdat->flag |= FN_SIZECHANGE;

	if (newsize < oldsize) {
		err = vtruncbuf(vp, cred, newsize, fuse_iosize(vp));
	}
	vnode_pager_setsize(vp, newsize);
	return err;
}
Exemplo n.º 15
0
void
vnode_destroy_vobject(struct vnode *vp)
{
	struct vm_object *obj;

	obj = vp->v_object;
	if (obj == NULL)
		return;
	ASSERT_VOP_ELOCKED(vp, "vnode_destroy_vobject");
	VM_OBJECT_WLOCK(obj);
	umtx_shm_object_terminated(obj);
	if (obj->ref_count == 0) {
		/*
		 * don't double-terminate the object
		 */
		if ((obj->flags & OBJ_DEAD) == 0) {
			vm_object_terminate(obj);
		} else {
			/*
			 * Waiters were already handled during object
			 * termination.  The exclusive vnode lock hopefully
			 * prevented new waiters from referencing the dying
			 * object.
			 */
			KASSERT((obj->flags & OBJ_DISCONNECTWNT) == 0,
			    ("OBJ_DISCONNECTWNT set obj %p flags %x",
			    obj, obj->flags));
			vp->v_object = NULL;
			VM_OBJECT_WUNLOCK(obj);
		}
	} else {
		/*
		 * Woe to the process that tries to page now :-).
		 */
		vm_pager_deallocate(obj);
		VM_OBJECT_WUNLOCK(obj);
	}
	KASSERT(vp->v_object == NULL, ("vp %p obj %p", vp, vp->v_object));
}
Exemplo n.º 16
0
int
fuse_vnode_setsize(struct vnode *vp, struct ucred *cred, off_t newsize)
{
	struct fuse_vnode_data *fvdat = VTOFUD(vp);
	off_t oldsize;
	int err = 0;

	FS_DEBUG("inode=%ju oldsize=%ju newsize=%ju\n",
	    (uintmax_t)VTOI(vp), (uintmax_t)fvdat->filesize,
	    (uintmax_t)newsize);
	ASSERT_VOP_ELOCKED(vp, "fuse_vnode_setsize");

	oldsize = fvdat->filesize;
	fvdat->filesize = newsize;
	fvdat->flag |= FN_SIZECHANGE;

	if (newsize < oldsize) {
		err = vtruncbuf(vp, cred, newsize, fuse_iosize(vp));
	}
	vnode_pager_setsize(vp, newsize);
	fuse_invalidate_attr(vp);

	return err;
}
Exemplo n.º 17
0
static int
fuse_vnode_alloc(struct mount *mp,
                 struct thread *td,
                 uint64_t nodeid,
                 enum vtype vtyp,
                 struct vnode **vpp)
{
    const int lkflags = LK_EXCLUSIVE | LK_RETRY;
    struct fuse_vnode_data *fvdat;
    struct vnode *vp2;
    int err = 0;

    DEBUG("been asked for vno #%ju\n", (uintmax_t)nodeid);

    if (vtyp == VNON) {
        return EINVAL;
    }

    *vpp = NULL;
    err = vfs_hash_get(mp, fuse_vnode_hash(nodeid), lkflags, td, vpp,
                       fuse_vnode_cmp, &nodeid);
    if (err)
        return (err);

    if (*vpp) {
        MPASS((*vpp)->v_type == vtyp && (*vpp)->v_data != NULL);
        DEBUG("vnode taken from hash\n");
        return (0);
    }

    fvdat = malloc(sizeof(*fvdat), M_FUSEVN, M_WAITOK | M_ZERO);
    err = getnewvnode("fuse", mp, &fuse_vnops, vpp);
    if (err) {
        free(fvdat, M_FUSEVN);
        return (err);
    }

    vn_lock(*vpp, lkflags);
    err = insmntque(*vpp, mp);
    ASSERT_VOP_ELOCKED(*vpp, "fuse_vnode_alloc");
    if (err) {
        VOP_UNLOCK(*vpp, 0);
        free(fvdat, M_FUSEVN);
        *vpp = NULL;
        return (err);
    }

    fuse_vnode_init(*vpp, fvdat, nodeid, vtyp);
    err = vfs_hash_insert(*vpp, fuse_vnode_hash(nodeid), lkflags,
                          td, &vp2, fuse_vnode_cmp, &nodeid);

    if (err) {
        VOP_UNLOCK(*vpp, 0);
        fuse_vnode_destroy(*vpp);
        *vpp = NULL;
        return (err);
    }

    /*
     * XXXIP: Prevent silent vnode reuse. It may happen because several fuse
     * filesystems ignore inode numbers
     */
    KASSERT(vp2 == NULL,
            ("vfs hash collision for node #%ju\n", (uintmax_t)nodeid));
    ASSERT_VOP_ELOCKED(*vpp, "fuse_vnode_alloc");

    return (0);
}
Exemplo n.º 18
0
/*
 * Update the disk quota in the quota file.
 */
static int
dqsync(struct vnode *vp, struct dquot *dq)
{
    uint8_t buf[sizeof(struct dqblk64)];
    off_t base, recsize;
    struct vnode *dqvp;
    struct iovec aiov;
    struct uio auio;
    int error;
    struct mount *mp;
    struct ufsmount *ump;

#ifdef DEBUG_VFS_LOCKS
    if (vp != NULL)
        ASSERT_VOP_ELOCKED(vp, "dqsync");
#endif

    mp = NULL;
    error = 0;
    if (dq == NODQUOT)
        panic("dqsync: dquot");
    if ((ump = dq->dq_ump) == NULL)
        return (0);
    UFS_LOCK(ump);
    if ((dqvp = ump->um_quotas[dq->dq_type]) == NULLVP) {
        if (vp == NULL) {
            UFS_UNLOCK(ump);
            return (0);
        } else
            panic("dqsync: file");
    }
    vref(dqvp);
    UFS_UNLOCK(ump);

    DQI_LOCK(dq);
    if ((dq->dq_flags & DQ_MOD) == 0) {
        DQI_UNLOCK(dq);
        vrele(dqvp);
        return (0);
    }
    DQI_UNLOCK(dq);

    (void) vn_start_secondary_write(dqvp, &mp, V_WAIT);
    if (vp != dqvp)
        vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY);

    DQI_LOCK(dq);
    DQI_WAIT(dq, PINOD+2, "dqsync");
    if ((dq->dq_flags & DQ_MOD) == 0)
        goto out;
    dq->dq_flags |= DQ_LOCK;
    DQI_UNLOCK(dq);

    /*
     * Write the quota record to the quota file, performing any
     * necessary conversions.  See dqget() for additional details.
     */
    if (ump->um_qflags[dq->dq_type] & QTF_64BIT) {
        dq_dqb64(dq, (struct dqblk64 *)buf);
        recsize = sizeof(struct dqblk64);
        base = sizeof(struct dqhdr64);
    } else {
        dq_dqb32(dq, (struct dqblk32 *)buf);
        recsize = sizeof(struct dqblk32);
        base = 0;
    }

    auio.uio_iov = &aiov;
    auio.uio_iovcnt = 1;
    aiov.iov_base = buf;
    aiov.iov_len = recsize;
    auio.uio_resid = recsize;
    auio.uio_offset = base + dq->dq_id * recsize;
    auio.uio_segflg = UIO_SYSSPACE;
    auio.uio_rw = UIO_WRITE;
    auio.uio_td = (struct thread *)0;
    error = VOP_WRITE(dqvp, &auio, 0, dq->dq_ump->um_cred[dq->dq_type]);
    if (auio.uio_resid && error == 0)
        error = EIO;

    DQI_LOCK(dq);
    DQI_WAKEUP(dq);
    dq->dq_flags &= ~DQ_MOD;
out:
    DQI_UNLOCK(dq);
    if (vp != dqvp)
        vput(dqvp);
    else
        vrele(dqvp);
    vn_finished_secondary_write(mp);
    return (error);
}
Exemplo n.º 19
0
static int
tmpfs_rename(struct vop_rename_args *v)
{
	struct vnode *fdvp = v->a_fdvp;
	struct vnode *fvp = v->a_fvp;
	struct componentname *fcnp = v->a_fcnp;
	struct vnode *tdvp = v->a_tdvp;
	struct vnode *tvp = v->a_tvp;
	struct componentname *tcnp = v->a_tcnp;
	struct mount *mp = NULL;

	char *newname;
	int error;
	struct tmpfs_dirent *de;
	struct tmpfs_mount *tmp;
	struct tmpfs_node *fdnode;
	struct tmpfs_node *fnode;
	struct tmpfs_node *tnode;
	struct tmpfs_node *tdnode;

	MPASS(VOP_ISLOCKED(tdvp));
	MPASS(IMPLIES(tvp != NULL, VOP_ISLOCKED(tvp)));
	MPASS(fcnp->cn_flags & HASBUF);
	MPASS(tcnp->cn_flags & HASBUF);

	/* Disallow cross-device renames.
	 * XXX Why isn't this done by the caller? */
	if (fvp->v_mount != tdvp->v_mount ||
	    (tvp != NULL && fvp->v_mount != tvp->v_mount)) {
		error = EXDEV;
		goto out;
	}

	/* If source and target are the same file, there is nothing to do. */
	if (fvp == tvp) {
		error = 0;
		goto out;
	}

	/* If we need to move the directory between entries, lock the
	 * source so that we can safely operate on it. */
	if (fdvp != tdvp && fdvp != tvp) {
		if (vn_lock(fdvp, LK_EXCLUSIVE | LK_NOWAIT) != 0) {
			mp = tdvp->v_mount;
			error = vfs_busy(mp, 0);
			if (error != 0) {
				mp = NULL;
				goto out;
			}
			error = tmpfs_rename_relock(fdvp, &fvp, tdvp, &tvp,
			    fcnp, tcnp);
			if (error != 0) {
				vfs_unbusy(mp);
				return (error);
			}
			ASSERT_VOP_ELOCKED(fdvp,
			    "tmpfs_rename: fdvp not locked");
			ASSERT_VOP_ELOCKED(tdvp,
			    "tmpfs_rename: tdvp not locked");
			if (tvp != NULL)
				ASSERT_VOP_ELOCKED(tvp,
				    "tmpfs_rename: tvp not locked");
			if (fvp == tvp) {
				error = 0;
				goto out_locked;
			}
		}
	}

	tmp = VFS_TO_TMPFS(tdvp->v_mount);
	tdnode = VP_TO_TMPFS_DIR(tdvp);
	tnode = (tvp == NULL) ? NULL : VP_TO_TMPFS_NODE(tvp);
	fdnode = VP_TO_TMPFS_DIR(fdvp);
	fnode = VP_TO_TMPFS_NODE(fvp);
	de = tmpfs_dir_lookup(fdnode, fnode, fcnp);

	/* Entry can disappear before we lock fdvp,
	 * also avoid manipulating '.' and '..' entries. */
	if (de == NULL) {
		if ((fcnp->cn_flags & ISDOTDOT) != 0 ||
		    (fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.'))
			error = EINVAL;
		else
			error = ENOENT;
		goto out_locked;
	}
	MPASS(de->td_node == fnode);

	/* If re-naming a directory to another preexisting directory
	 * ensure that the target directory is empty so that its
	 * removal causes no side effects.
	 * Kern_rename guarantees the destination to be a directory
	 * if the source is one. */
	if (tvp != NULL) {
		MPASS(tnode != NULL);

		if ((tnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) ||
		    (tdnode->tn_flags & (APPEND | IMMUTABLE))) {
			error = EPERM;
			goto out_locked;
		}

		if (fnode->tn_type == VDIR && tnode->tn_type == VDIR) {
			if (tnode->tn_size > 0) {
				error = ENOTEMPTY;
				goto out_locked;
			}
		} else if (fnode->tn_type == VDIR && tnode->tn_type != VDIR) {
			error = ENOTDIR;
			goto out_locked;
		} else if (fnode->tn_type != VDIR && tnode->tn_type == VDIR) {
			error = EISDIR;
			goto out_locked;
		} else {
			MPASS(fnode->tn_type != VDIR &&
				tnode->tn_type != VDIR);
		}
	}

	if ((fnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND))
	    || (fdnode->tn_flags & (APPEND | IMMUTABLE))) {
		error = EPERM;
		goto out_locked;
	}

	/* Ensure that we have enough memory to hold the new name, if it
	 * has to be changed. */
	if (fcnp->cn_namelen != tcnp->cn_namelen ||
	    bcmp(fcnp->cn_nameptr, tcnp->cn_nameptr, fcnp->cn_namelen) != 0) {
		newname = malloc(tcnp->cn_namelen, M_TMPFSNAME, M_WAITOK);
	} else
		newname = NULL;

	/* If the node is being moved to another directory, we have to do
	 * the move. */
	if (fdnode != tdnode) {
		/* In case we are moving a directory, we have to adjust its
		 * parent to point to the new parent. */
		if (de->td_node->tn_type == VDIR) {
			struct tmpfs_node *n;

			/* Ensure the target directory is not a child of the
			 * directory being moved.  Otherwise, we'd end up
			 * with stale nodes. */
			n = tdnode;
			/* TMPFS_LOCK garanties that no nodes are freed while
			 * traversing the list. Nodes can only be marked as
			 * removed: tn_parent == NULL. */
			TMPFS_LOCK(tmp);
			TMPFS_NODE_LOCK(n);
			while (n != n->tn_dir.tn_parent) {
				struct tmpfs_node *parent;

				if (n == fnode) {
					TMPFS_NODE_UNLOCK(n);
					TMPFS_UNLOCK(tmp);
					error = EINVAL;
					if (newname != NULL)
						    free(newname, M_TMPFSNAME);
					goto out_locked;
				}
				parent = n->tn_dir.tn_parent;
				TMPFS_NODE_UNLOCK(n);
				if (parent == NULL) {
					n = NULL;
					break;
				}
				TMPFS_NODE_LOCK(parent);
				if (parent->tn_dir.tn_parent == NULL) {
					TMPFS_NODE_UNLOCK(parent);
					n = NULL;
					break;
				}
				n = parent;
			}
			TMPFS_UNLOCK(tmp);
			if (n == NULL) {
				error = EINVAL;
				if (newname != NULL)
					    free(newname, M_TMPFSNAME);
				goto out_locked;
			}
			TMPFS_NODE_UNLOCK(n);

			/* Adjust the parent pointer. */
			TMPFS_VALIDATE_DIR(fnode);
			TMPFS_NODE_LOCK(de->td_node);
			de->td_node->tn_dir.tn_parent = tdnode;
			TMPFS_NODE_UNLOCK(de->td_node);

			/* As a result of changing the target of the '..'
			 * entry, the link count of the source and target
			 * directories has to be adjusted. */
			TMPFS_NODE_LOCK(tdnode);
			TMPFS_ASSERT_LOCKED(tdnode);
			tdnode->tn_links++;
			TMPFS_NODE_UNLOCK(tdnode);

			TMPFS_NODE_LOCK(fdnode);
			TMPFS_ASSERT_LOCKED(fdnode);
			fdnode->tn_links--;
			TMPFS_NODE_UNLOCK(fdnode);
		}
	}

	/* Do the move: just remove the entry from the source directory
	 * and insert it into the target one. */
	tmpfs_dir_detach(fdvp, de);

	if (fcnp->cn_flags & DOWHITEOUT)
		tmpfs_dir_whiteout_add(fdvp, fcnp);
	if (tcnp->cn_flags & ISWHITEOUT)
		tmpfs_dir_whiteout_remove(tdvp, tcnp);

	/* If the name has changed, we need to make it effective by changing
	 * it in the directory entry. */
	if (newname != NULL) {
		MPASS(tcnp->cn_namelen <= MAXNAMLEN);

		free(de->ud.td_name, M_TMPFSNAME);
		de->ud.td_name = newname;
		tmpfs_dirent_init(de, tcnp->cn_nameptr, tcnp->cn_namelen);

		fnode->tn_status |= TMPFS_NODE_CHANGED;
		tdnode->tn_status |= TMPFS_NODE_MODIFIED;
	}

	/* If we are overwriting an entry, we have to remove the old one
	 * from the target directory. */
	if (tvp != NULL) {
		struct tmpfs_dirent *tde;

		/* Remove the old entry from the target directory. */
		tde = tmpfs_dir_lookup(tdnode, tnode, tcnp);
		tmpfs_dir_detach(tdvp, tde);

		/* Free the directory entry we just deleted.  Note that the
		 * node referred by it will not be removed until the vnode is
		 * really reclaimed. */
		tmpfs_free_dirent(VFS_TO_TMPFS(tvp->v_mount), tde);
	}

	tmpfs_dir_attach(tdvp, de);

	cache_purge(fvp);
	if (tvp != NULL)
		cache_purge(tvp);
	cache_purge_negative(tdvp);

	error = 0;

out_locked:
	if (fdvp != tdvp && fdvp != tvp)
		VOP_UNLOCK(fdvp, 0);

out:
	/* Release target nodes. */
	/* XXX: I don't understand when tdvp can be the same as tvp, but
	 * other code takes care of this... */
	if (tdvp == tvp)
		vrele(tdvp);
	else
		vput(tdvp);
	if (tvp != NULL)
		vput(tvp);

	/* Release source nodes. */
	vrele(fdvp);
	vrele(fvp);

	if (mp != NULL)
		vfs_unbusy(mp);

	return error;
}
Exemplo n.º 20
0
/*
    struct vnop_create_args {
	struct vnode *a_dvp;
	struct vnode **a_vpp;
	struct componentname *a_cnp;
	struct vattr *a_vap;
    };
*/
static int
fuse_vnop_create(struct vop_create_args *ap)
{
	struct vnode *dvp = ap->a_dvp;
	struct vnode **vpp = ap->a_vpp;
	struct componentname *cnp = ap->a_cnp;
	struct vattr *vap = ap->a_vap;
	struct thread *td = cnp->cn_thread;
	struct ucred *cred = cnp->cn_cred;

	struct fuse_open_in *foi;
	struct fuse_entry_out *feo;
	struct fuse_dispatcher fdi;
	struct fuse_dispatcher *fdip = &fdi;

	int err;

	struct mount *mp = vnode_mount(dvp);
	uint64_t parentnid = VTOFUD(dvp)->nid;
	mode_t mode = MAKEIMODE(vap->va_type, vap->va_mode);
	uint64_t x_fh_id;
	uint32_t x_open_flags;

	fuse_trace_printf_vnop();

	if (fuse_isdeadfs(dvp)) {
		return ENXIO;
	}
	bzero(&fdi, sizeof(fdi));

	/* XXX:	Will we ever want devices ? */
	if ((vap->va_type != VREG)) {
		MPASS(vap->va_type != VFIFO);
		goto bringup;
	}
	debug_printf("parent nid = %ju, mode = %x\n", (uintmax_t)parentnid,
	    mode);

	fdisp_init(fdip, sizeof(*foi) + cnp->cn_namelen + 1);
	if (!fsess_isimpl(mp, FUSE_CREATE)) {
		debug_printf("eh, daemon doesn't implement create?\n");
		return (EINVAL);
	}
	fdisp_make(fdip, FUSE_CREATE, vnode_mount(dvp), parentnid, td, cred);

	foi = fdip->indata;
	foi->mode = mode;
	foi->flags = O_CREAT | O_RDWR;

	memcpy((char *)fdip->indata + sizeof(*foi), cnp->cn_nameptr,
	    cnp->cn_namelen);
	((char *)fdip->indata)[sizeof(*foi) + cnp->cn_namelen] = '\0';

	err = fdisp_wait_answ(fdip);

	if (err) {
		if (err == ENOSYS)
			fsess_set_notimpl(mp, FUSE_CREATE);
		debug_printf("create: got err=%d from daemon\n", err);
		goto out;
	}
bringup:
	feo = fdip->answ;

	if ((err = fuse_internal_checkentry(feo, VREG))) {
		goto out;
	}
	err = fuse_vnode_get(mp, feo->nodeid, dvp, vpp, cnp, VREG);
	if (err) {
		struct fuse_release_in *fri;
		uint64_t nodeid = feo->nodeid;
		uint64_t fh_id = ((struct fuse_open_out *)(feo + 1))->fh;

		fdisp_init(fdip, sizeof(*fri));
		fdisp_make(fdip, FUSE_RELEASE, mp, nodeid, td, cred);
		fri = fdip->indata;
		fri->fh = fh_id;
		fri->flags = OFLAGS(mode);
		fuse_insert_callback(fdip->tick, fuse_internal_forget_callback);
		fuse_insert_message(fdip->tick);
		return err;
	}
	ASSERT_VOP_ELOCKED(*vpp, "fuse_vnop_create");

	fdip->answ = feo + 1;

	x_fh_id = ((struct fuse_open_out *)(feo + 1))->fh;
	x_open_flags = ((struct fuse_open_out *)(feo + 1))->open_flags;
	fuse_filehandle_init(*vpp, FUFH_RDWR, NULL, x_fh_id);
	fuse_vnode_open(*vpp, x_open_flags, td);
	cache_purge_negative(dvp);

out:
	fdisp_destroy(fdip);
	return err;
}
Exemplo n.º 21
0
/*
 * Update the access, modified, and inode change times as specified by the
 * IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively.  Write the inode
 * to disk if the IN_MODIFIED flag is set (it may be set initially, or by
 * the timestamp update).  The IN_LAZYMOD flag is set to force a write
 * later if not now.  The IN_LAZYACCESS is set instead of IN_MODIFIED if the fs
 * is currently being suspended (or is suspended) and vnode has been accessed.
 * If we write now, then clear IN_MODIFIED, IN_LAZYACCESS and IN_LAZYMOD to
 * reflect the presumably successful write, and if waitfor is set, then wait
 * for the write to complete.
 */
int 
ffs_update (vnode *vp, int waitfor)
{
	int error = 0;
	print("HARVEY TODO: %s\n", __func__);
#if 0
	struct fs *fs;
	struct buf *bp;
	struct inode *ip;
	int flags, error;

	ASSERT_VOP_ELOCKED(vp, "ffs_update");
	ufs_itimes(vp);
	ip = VTOI(vp);
	if ((ip->i_flag & IN_MODIFIED) == 0 && waitfor == 0)
		return (0);
	ip->i_flag &= ~(IN_LAZYACCESS | IN_LAZYMOD | IN_MODIFIED);
	fs = ITOFS(ip);
	if (fs->fs_ronly && ITOUMP(ip)->um_fsckpid == 0)
		return (0);
	/*
	 * If we are updating a snapshot and another process is currently
	 * writing the buffer containing the inode for this snapshot then
	 * a deadlock can occur when it tries to check the snapshot to see
	 * if that block needs to be copied. Thus when updating a snapshot
	 * we check to see if the buffer is already locked, and if it is
	 * we drop the snapshot lock until the buffer has been written
	 * and is available to us. We have to grab a reference to the
	 * snapshot vnode to prevent it from being removed while we are
	 * waiting for the buffer.
	 */
	flags = 0;
	if (IS_SNAPSHOT(ip))
		flags = GB_LOCK_NOWAIT;
loop:
	error = breadn_flags(ITODEVVP(ip),
	     fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
	     (int) fs->fs_bsize, 0, 0, 0, NOCRED, flags, &bp);
	if (error != 0) {
		if (error != EBUSY)
			return (error);
		KASSERT((IS_SNAPSHOT(ip)), ("EBUSY from non-snapshot"));
		/*
		 * Wait for our inode block to become available.
		 *
		 * Hold a reference to the vnode to protect against
		 * ffs_snapgone(). Since we hold a reference, it can only
		 * get reclaimed (VI_DOOMED flag) in a forcible downgrade
		 * or unmount. For an unmount, the entire filesystem will be
		 * gone, so we cannot attempt to touch anything associated
		 * with it while the vnode is unlocked; all we can do is 
		 * pause briefly and try again. If when we relock the vnode
		 * we discover that it has been reclaimed, updating it is no
		 * longer necessary and we can just return an error.
		 */
		vref(vp);
		VOP_UNLOCK(vp, 0);
		pause("ffsupd", 1);
		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
		vrele(vp);
		if ((vp->v_iflag & VI_DOOMED) != 0)
			return (ENOENT);
		goto loop;
	}
	if (DOINGSOFTDEP(vp))
		softdep_update_inodeblock(ip, bp, waitfor);
	else if (ip->i_effnlink != ip->i_nlink)
		panic("ffs_update: bad link cnt");
	if (I_IS_UFS1(ip)) {
		*((struct ufs1_dinode *)bp->b_data +
		    ino_to_fsbo(fs, ip->i_number)) = *ip->i_din1;
		/* XXX: FIX? The entropy here is desirable, but the harvesting may be expensive */
		random_harvest_queue(&(ip->i_din1), sizeof(ip->i_din1), 1, RANDOM_FS_ATIME);
	} else {
		*((struct ufs2_dinode *)bp->b_data +
		    ino_to_fsbo(fs, ip->i_number)) = *ip->i_din2;
		/* XXX: FIX? The entropy here is desirable, but the harvesting may be expensive */
		random_harvest_queue(&(ip->i_din2), sizeof(ip->i_din2), 1, RANDOM_FS_ATIME);
	}
	if (waitfor)
		error = bwrite(bp);
	else if (vm_page_count_severe() || buf_dirty_count_severe()) {
		bawrite(bp);
		error = 0;
	} else {
		if (bp->b_bufsize == fs->fs_bsize)
			bp->b_flags |= B_CLUSTEROK;
		bdwrite(bp);
		error = 0;
	}
#endif // 0
	return (error);
}
Exemplo n.º 22
0
/*
 * Obtain a dquot structure for the specified identifier and quota file
 * reading the information from the file if necessary.
 */
static int
dqget(struct vnode *vp, u_long id, struct ufsmount *ump, int type,
      struct dquot **dqp)
{
    uint8_t buf[sizeof(struct dqblk64)];
    off_t base, recsize;
    struct dquot *dq, *dq1;
    struct dqhash *dqh;
    struct vnode *dqvp;
    struct iovec aiov;
    struct uio auio;
    int dqvplocked, error;

#ifdef DEBUG_VFS_LOCKS
    if (vp != NULLVP)
        ASSERT_VOP_ELOCKED(vp, "dqget");
#endif

    if (vp != NULLVP && *dqp != NODQUOT) {
        return (0);
    }

    /* XXX: Disallow negative id values to prevent the
    * creation of 100GB+ quota data files.
    */
    if ((int)id < 0)
        return (EINVAL);

    UFS_LOCK(ump);
    dqvp = ump->um_quotas[type];
    if (dqvp == NULLVP || (ump->um_qflags[type] & QTF_CLOSING)) {
        *dqp = NODQUOT;
        UFS_UNLOCK(ump);
        return (EINVAL);
    }
    vref(dqvp);
    UFS_UNLOCK(ump);
    error = 0;
    dqvplocked = 0;

    /*
     * Check the cache first.
     */
    dqh = DQHASH(dqvp, id);
    DQH_LOCK();
    dq = dqhashfind(dqh, id, dqvp);
    if (dq != NULL) {
        DQH_UNLOCK();
hfound:
        DQI_LOCK(dq);
        DQI_WAIT(dq, PINOD+1, "dqget");
        DQI_UNLOCK(dq);
        if (dq->dq_ump == NULL) {
            dqrele(vp, dq);
            dq = NODQUOT;
            error = EIO;
        }
        *dqp = dq;
        if (dqvplocked)
            vput(dqvp);
        else
            vrele(dqvp);
        return (error);
    }

    /*
     * Quota vnode lock is before DQ_LOCK. Acquire dqvp lock there
     * since new dq will appear on the hash chain DQ_LOCKed.
     */
    if (vp != dqvp) {
        DQH_UNLOCK();
        vn_lock(dqvp, LK_SHARED | LK_RETRY);
        dqvplocked = 1;
        DQH_LOCK();
        /*
         * Recheck the cache after sleep for quota vnode lock.
         */
        dq = dqhashfind(dqh, id, dqvp);
        if (dq != NULL) {
            DQH_UNLOCK();
            goto hfound;
        }
    }

    /*
     * Not in cache, allocate a new one or take it from the
     * free list.
     */
    if (TAILQ_FIRST(&dqfreelist) == NODQUOT &&
            numdquot < MAXQUOTAS * desiredvnodes)
        desireddquot += DQUOTINC;
    if (numdquot < desireddquot) {
        numdquot++;
        DQH_UNLOCK();
        dq1 = malloc(sizeof *dq1, M_DQUOT, M_WAITOK | M_ZERO);
        mtx_init(&dq1->dq_lock, "dqlock", NULL, MTX_DEF);
        DQH_LOCK();
        /*
         * Recheck the cache after sleep for memory.
         */
        dq = dqhashfind(dqh, id, dqvp);
        if (dq != NULL) {
            numdquot--;
            DQH_UNLOCK();
            mtx_destroy(&dq1->dq_lock);
            free(dq1, M_DQUOT);
            goto hfound;
        }
        dq = dq1;
    } else {
        if ((dq = TAILQ_FIRST(&dqfreelist)) == NULL) {
            DQH_UNLOCK();
            tablefull("dquot");
            *dqp = NODQUOT;
            if (dqvplocked)
                vput(dqvp);
            else
                vrele(dqvp);
            return (EUSERS);
        }
        if (dq->dq_cnt || (dq->dq_flags & DQ_MOD))
            panic("dqget: free dquot isn't %p", dq);
        TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
        if (dq->dq_ump != NULL)
            LIST_REMOVE(dq, dq_hash);
    }

    /*
     * Dq is put into hash already locked to prevent parallel
     * usage while it is being read from file.
     */
    dq->dq_flags = DQ_LOCK;
    dq->dq_id = id;
    dq->dq_type = type;
    dq->dq_ump = ump;
    LIST_INSERT_HEAD(dqh, dq, dq_hash);
    DQREF(dq);
    DQH_UNLOCK();

    /*
     * Read the requested quota record from the quota file, performing
     * any necessary conversions.
     */
    if (ump->um_qflags[type] & QTF_64BIT) {
        recsize = sizeof(struct dqblk64);
        base = sizeof(struct dqhdr64);
    } else {
        recsize = sizeof(struct dqblk32);
        base = 0;
    }
    auio.uio_iov = &aiov;
    auio.uio_iovcnt = 1;
    aiov.iov_base = buf;
    aiov.iov_len = recsize;
    auio.uio_resid = recsize;
    auio.uio_offset = base + id * recsize;
    auio.uio_segflg = UIO_SYSSPACE;
    auio.uio_rw = UIO_READ;
    auio.uio_td = (struct thread *)0;

    error = VOP_READ(dqvp, &auio, 0, ump->um_cred[type]);
    if (auio.uio_resid == recsize && error == 0) {
        bzero(&dq->dq_dqb, sizeof(dq->dq_dqb));
    } else {
        if (ump->um_qflags[type] & QTF_64BIT)
            dqb64_dq((struct dqblk64 *)buf, dq);
        else
            dqb32_dq((struct dqblk32 *)buf, dq);
    }
    if (dqvplocked)
        vput(dqvp);
    else
        vrele(dqvp);
    /*
     * I/O error in reading quota file, release
     * quota structure and reflect problem to caller.
     */
    if (error) {
        DQH_LOCK();
        dq->dq_ump = NULL;
        LIST_REMOVE(dq, dq_hash);
        DQH_UNLOCK();
        DQI_LOCK(dq);
        if (dq->dq_flags & DQ_WANT)
            wakeup(dq);
        dq->dq_flags = 0;
        DQI_UNLOCK(dq);
        dqrele(vp, dq);
        *dqp = NODQUOT;
        return (error);
    }
    DQI_LOCK(dq);
    /*
     * Check for no limit to enforce.
     * Initialize time values if necessary.
     */
    if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
            dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
        dq->dq_flags |= DQ_FAKE;
    if (dq->dq_id != 0) {
        if (dq->dq_btime == 0) {
            dq->dq_btime = time_second + ump->um_btime[type];
            if (dq->dq_bsoftlimit &&
                    dq->dq_curblocks >= dq->dq_bsoftlimit)
                dq->dq_flags |= DQ_MOD;
        }
        if (dq->dq_itime == 0) {
            dq->dq_itime = time_second + ump->um_itime[type];
            if (dq->dq_isoftlimit &&
                    dq->dq_curinodes >= dq->dq_isoftlimit)
                dq->dq_flags |= DQ_MOD;
        }
    }
    DQI_WAKEUP(dq);
    DQI_UNLOCK(dq);
    *dqp = dq;
    return (0);
}