Esempio n. 1
0
/*
 * Reclaim an nfsnode so that it can be used for other purposes.
 */
int
ncl_reclaim(struct vop_reclaim_args *ap)
{
	struct vnode *vp = ap->a_vp;
	struct nfsnode *np = VTONFS(vp);
	struct nfsdmap *dp, *dp2;

	/*
	 * If the NLM is running, give it a chance to abort pending
	 * locks.
	 */
	if (nfs_reclaim_p != NULL)
		nfs_reclaim_p(ap);

	/*
	 * Destroy the vm object and flush associated pages.
	 */
	vnode_destroy_vobject(vp);

	if (NFS_ISV4(vp) && vp->v_type == VREG)
		/*
		 * We can now safely close any remaining NFSv4 Opens for
		 * this file. Most opens will have already been closed by
		 * ncl_inactive(), but there are cases where it is not
		 * called, so we need to do it again here.
		 */
		(void) nfsrpc_close(vp, 1, ap->a_td);

	vfs_hash_remove(vp);

	/*
	 * Call nfscl_reclaimnode() to save attributes in the delegation,
	 * as required.
	 */
	if (vp->v_type == VREG)
		nfscl_reclaimnode(vp);

	/*
	 * Free up any directory cookie structures and
	 * large file handle structures that might be associated with
	 * this nfs node.
	 */
	if (vp->v_type == VDIR) {
		dp = LIST_FIRST(&np->n_cookies);
		while (dp) {
			dp2 = dp;
			dp = LIST_NEXT(dp, ndm_list);
			FREE((caddr_t)dp2, M_NFSDIROFF);
		}
	}
	if (np->n_writecred != NULL)
		crfree(np->n_writecred);
	FREE((caddr_t)np->n_fhp, M_NFSFH);
	if (np->n_v4 != NULL)
		FREE((caddr_t)np->n_v4, M_NFSV4NODE);
	mtx_destroy(&np->n_mtx);
	uma_zfree(newnfsnode_zone, vp->v_data);
	vp->v_data = NULL;
	return (0);
}
Esempio n. 2
0
/*
 * Reclaim an nfsnode so that it can be used for other purposes.
 */
int
ncl_reclaim(struct vop_reclaim_args *ap)
{
	struct vnode *vp = ap->a_vp;
	struct nfsnode *np = VTONFS(vp);
	struct nfsdmap *dp, *dp2;

	if (NFS_ISV4(vp) && vp->v_type == VREG)
		/*
		 * Since mmap()'d files do I/O after VOP_CLOSE(), the NFSv4
		 * Close operations are delayed until ncl_inactive().
		 * However, since VOP_INACTIVE() is not guaranteed to be
		 * called, we need to do it again here.
		 */
		(void) nfsrpc_close(vp, 1, ap->a_td);

	/*
	 * If the NLM is running, give it a chance to abort pending
	 * locks.
	 */
	if (nfs_reclaim_p != NULL)
		nfs_reclaim_p(ap);

	/*
	 * Destroy the vm object and flush associated pages.
	 */
	vnode_destroy_vobject(vp);

	vfs_hash_remove(vp);

	/*
	 * Call nfscl_reclaimnode() to save attributes in the delegation,
	 * as required.
	 */
	if (vp->v_type == VREG)
		nfscl_reclaimnode(vp);

	/*
	 * Free up any directory cookie structures and
	 * large file handle structures that might be associated with
	 * this nfs node.
	 */
	if (vp->v_type == VDIR) {
		dp = LIST_FIRST(&np->n_cookies);
		while (dp) {
			dp2 = dp;
			dp = LIST_NEXT(dp, ndm_list);
			FREE((caddr_t)dp2, M_NFSDIROFF);
		}
	}
	if (np->n_writecred != NULL)
		crfree(np->n_writecred);
	FREE((caddr_t)np->n_fhp, M_NFSFH);
	if (np->n_v4 != NULL)
		FREE((caddr_t)np->n_v4, M_NFSV4NODE);
	mtx_destroy(&np->n_mtx);
	uma_zfree(newnfsnode_zone, vp->v_data);
	vp->v_data = NULL;
	return (0);
}
Esempio n. 3
0
int
ncl_inactive(struct vop_inactive_args *ap)
{
	struct nfsnode *np;
	struct sillyrename *sp;
	struct vnode *vp = ap->a_vp;
	boolean_t retv;

	np = VTONFS(vp);

	if (NFS_ISV4(vp) && vp->v_type == VREG) {
		/*
		 * Since mmap()'d files do I/O after VOP_CLOSE(), the NFSv4
		 * Close operations are delayed until now. Any dirty
		 * buffers/pages must be flushed before the close, so that the
		 * stateid is available for the writes.
		 */
		if (vp->v_object != NULL) {
			VM_OBJECT_WLOCK(vp->v_object);
			retv = vm_object_page_clean(vp->v_object, 0, 0,
			    OBJPC_SYNC);
			VM_OBJECT_WUNLOCK(vp->v_object);
		} else
			retv = TRUE;
		if (retv == TRUE) {
			(void)ncl_flush(vp, MNT_WAIT, NULL, ap->a_td, 1, 0);
			(void)nfsrpc_close(vp, 1, ap->a_td);
		}
	}

	mtx_lock(&np->n_mtx);
	if (vp->v_type != VDIR) {
		sp = np->n_sillyrename;
		np->n_sillyrename = NULL;
	} else
		sp = NULL;
	if (sp) {
		mtx_unlock(&np->n_mtx);
		(void) ncl_vinvalbuf(vp, 0, ap->a_td, 1);
		/*
		 * Remove the silly file that was rename'd earlier
		 */
		ncl_removeit(sp, vp);
		crfree(sp->s_cred);
		TASK_INIT(&sp->s_task, 0, nfs_freesillyrename, sp);
		taskqueue_enqueue(taskqueue_thread, &sp->s_task);
		mtx_lock(&np->n_mtx);
	}
	np->n_flag &= NMODIFIED;
	mtx_unlock(&np->n_mtx);
	return (0);
}
Esempio n. 4
0
int
ncl_inactive(struct vop_inactive_args *ap)
{
	struct vnode *vp = ap->a_vp;
	struct nfsnode *np;
	boolean_t retv;

	if (NFS_ISV4(vp) && vp->v_type == VREG) {
		/*
		 * Since mmap()'d files do I/O after VOP_CLOSE(), the NFSv4
		 * Close operations are delayed until now. Any dirty
		 * buffers/pages must be flushed before the close, so that the
		 * stateid is available for the writes.
		 */
		if (vp->v_object != NULL) {
			VM_OBJECT_WLOCK(vp->v_object);
			retv = vm_object_page_clean(vp->v_object, 0, 0,
			    OBJPC_SYNC);
			VM_OBJECT_WUNLOCK(vp->v_object);
		} else
			retv = TRUE;
		if (retv == TRUE) {
			(void)ncl_flush(vp, MNT_WAIT, NULL, ap->a_td, 1, 0);
			(void)nfsrpc_close(vp, 1, ap->a_td);
		}
	}

	np = VTONFS(vp);
	mtx_lock(&np->n_mtx);
	ncl_releasesillyrename(vp, ap->a_td);

	/*
	 * NMODIFIED means that there might be dirty/stale buffers
	 * associated with the NFS vnode.  None of the other flags are
	 * meaningful after the vnode is unused.
	 */
	np->n_flag &= NMODIFIED;
	mtx_unlock(&np->n_mtx);
	return (0);
}
Esempio n. 5
0
/*
 * Asynchronous I/O daemons for client nfs.
 * They do read-ahead and write-behind operations on the block I/O cache.
 * Returns if we hit the timeout defined by the iodmaxidle sysctl.
 */
static void
nfssvc_iod(void *instance)
{
	struct buf *bp;
	struct nfsmount *nmp;
	int myiod, timo;
	int error = 0;

	mtx_lock(&nfs_iod_mtx);
	myiod = (int *)instance - nfs_asyncdaemon;
	/*
	 * Main loop
	 */
	for (;;) {
	    while (((nmp = nfs_iodmount[myiod]) == NULL)
		   || !TAILQ_FIRST(&nmp->nm_bufq)) {
		if (myiod >= nfs_iodmax)
			goto finish;
		if (nmp)
			nmp->nm_bufqiods--;
		if (nfs_iodwant[myiod] == NFSIOD_NOT_AVAILABLE)
			nfs_iodwant[myiod] = NFSIOD_AVAILABLE;
		nfs_iodmount[myiod] = NULL;
		/*
		 * Always keep at least nfs_iodmin kthreads.
		 */
		timo = (myiod < nfs_iodmin) ? 0 : nfs_iodmaxidle * hz;
		error = msleep(&nfs_iodwant[myiod], &nfs_iod_mtx, PWAIT | PCATCH,
		    "-", timo);
		if (error) {
			nmp = nfs_iodmount[myiod];
			/*
			 * Rechecking the nm_bufq closes a rare race where the 
			 * nfsiod is woken up at the exact time the idle timeout
			 * fires
			 */
			if (nmp && TAILQ_FIRST(&nmp->nm_bufq))
				error = 0;
			break;
		}
	    }
	    if (error)
		    break;
	    while ((bp = TAILQ_FIRST(&nmp->nm_bufq)) != NULL) {
	        int giant_locked = 0;
		    
		/* Take one off the front of the list */
		TAILQ_REMOVE(&nmp->nm_bufq, bp, b_freelist);
		nmp->nm_bufqlen--;
		if (nmp->nm_bufqwant && nmp->nm_bufqlen <= nfs_numasync) {
		    nmp->nm_bufqwant = 0;
		    wakeup(&nmp->nm_bufq);
		}
		mtx_unlock(&nfs_iod_mtx);
		if (NFS_ISV4(bp->b_vp)) {
			giant_locked = 1;
			mtx_lock(&Giant);
		}
		if (bp->b_flags & B_DIRECT) {
			KASSERT((bp->b_iocmd == BIO_WRITE), ("nfscvs_iod: BIO_WRITE not set"));
			(void)nfs_doio_directwrite(bp);
		} else {
			if (bp->b_iocmd == BIO_READ)
				(void) nfs_doio(bp->b_vp, bp, bp->b_rcred, NULL);
			else
				(void) nfs_doio(bp->b_vp, bp, bp->b_wcred, NULL);
		}
		if (giant_locked)
			mtx_unlock(&Giant);
		mtx_lock(&nfs_iod_mtx);
		/*
		 * If there are more than one iod on this mount, then defect
		 * so that the iods can be shared out fairly between the mounts
		 */
		if (nfs_defect && nmp->nm_bufqiods > 1) {
		    NFS_DPF(ASYNCIO,
			    ("nfssvc_iod: iod %d defecting from mount %p\n",
			     myiod, nmp));
		    nfs_iodmount[myiod] = NULL;
		    nmp->nm_bufqiods--;
		    break;
		}
	    }
	}
finish:
	nfs_asyncdaemon[myiod] = 0;
	if (nmp)
	    nmp->nm_bufqiods--;
	nfs_iodwant[myiod] = NFSIOD_NOT_AVAILABLE;
	nfs_iodmount[myiod] = NULL;
	/* Someone may be waiting for the last nfsiod to terminate. */
	if (--nfs_numasync == 0)
		wakeup(&nfs_numasync);
	mtx_unlock(&nfs_iod_mtx);
	if ((error == 0) || (error == EWOULDBLOCK))
		kproc_exit(0);
	/* Abnormal termination */
	kproc_exit(1);
}