Example #1
0
/*ARGSUSED*/
static int
socket_vop_close(struct vnode *vp, int flag, int count, offset_t offset,
    struct cred *cr, caller_context_t *ct)
{
	struct sonode *so;
	int error = 0;

	so = VTOSO(vp);
	ASSERT(vp->v_type == VSOCK);

	cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
	cleanshares(vp, ttoproc(curthread)->p_pid);

	if (vp->v_stream)
		strclean(vp);

	if (count > 1) {
		dprint(2, ("socket_vop_close: count %d\n", count));
		return (0);
	}

	mutex_enter(&so->so_lock);
	if (--so->so_count == 0) {
		/*
		 * Initiate connection shutdown.
		 */
		mutex_exit(&so->so_lock);
		error = socket_close_internal(so, flag, cr);
	} else {
		mutex_exit(&so->so_lock);
	}

	return (error);
}
Example #2
0
/*
 * Close a mounted file descriptor.
 * Remove any locks and apply the VOP_CLOSE operation to the vnode for
 * the file descriptor.
 */
static int
nm_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *crp,
	caller_context_t *ct)
{
	struct namenode *nodep = VTONM(vp);
	int error = 0;

	(void) cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
	cleanshares(vp, ttoproc(curthread)->p_pid);
	error = VOP_CLOSE(nodep->nm_filevp, flag, count, offset, crp, ct);
	if (count == 1) {
		(void) VOP_FSYNC(nodep->nm_filevp, FSYNC, crp, ct);
		/*
		 * Before VN_RELE() we need to remove the vnode from
		 * the hash table.  We should only do so in the  NMNMNT case.
		 * In other cases, nodep->nm_filep keeps a reference
		 * to nm_filevp and the entry in the hash table doesn't
		 * hurt.
		 */
		if ((nodep->nm_flag & NMNMNT) != 0) {
			mutex_enter(&ntable_lock);
			nameremove(nodep);
			mutex_exit(&ntable_lock);
		}
		VN_RELE(nodep->nm_filevp);
	}
	return (error);
}
Example #3
0
/* ARGSUSED1 */
static int
xmem_close(struct vnode *vp, int flag, int count, offset_t offset,
	struct cred *cred)
{
	cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
	cleanshares(vp, ttoproc(curthread)->p_pid);
	return (0);
}
Example #4
0
/* ARGSUSED */
static int
xattr_file_close(vnode_t *vp, int flags, int count, offset_t off,
    cred_t *cr, caller_context_t *ct)
{
	cleanlocks(vp, ddi_get_pid(), 0);
	cleanshares(vp, ddi_get_pid());
	return (0);
}
Example #5
0
/*ARGSUSED1*/
static int
devfs_close(struct vnode *vp, int flag, int count,
    offset_t offset, struct cred *cred)
{
	struct dv_node	*dv = VTODV(vp);

	dcmn_err2(("devfs_close %s\n", dv->dv_name));
	ASSERT(vp->v_type == VDIR);

	cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
	cleanshares(vp, ttoproc(curthread)->p_pid);
	return (0);
}
Example #6
0
/*
 * Clean up the state of a FIFO and/or mounted pipe in the
 * event that a fifo_open() was interrupted while the
 * process was blocked.
 */
void
fifo_cleanup(vnode_t *vp, int flag)
{
	fifonode_t *fnp = VTOF(vp);

	ASSERT(MUTEX_HELD(&fnp->fn_lock->flk_lock));

	cleanlocks(vp, curproc->p_pid, 0);
	cleanshares(vp, curproc->p_pid);
	if (flag & FREAD) {
		fnp->fn_rcnt--;
	}
	if (flag & FWRITE) {
		fnp->fn_wcnt--;
	}
	cv_broadcast(&fnp->fn_wait_cv);
}
Example #7
0
/* Try to discard pages, in order to recycle a vcache entry.
 *
 * We also make some sanity checks:  ref count, open count, held locks.
 *
 * We also do some non-VM-related chores, such as releasing the cred pointer
 * (for AIX and Solaris) and releasing the gnode (for AIX).
 *
 * Locking:  afs_xvcache lock is held. It must not be dropped.
 */
int
osi_VM_FlushVCache(struct vcache *avc)
{
    int s, code;
    vnode_t *vp = &avc->v;

    if (avc->vrefCount != 0)
	return EBUSY;

    if (avc->opens != 0)
	return EBUSY;

    /*
     * Just in case someone is still referring to the vnode we give up
     * trying to get rid of this guy.
     */
    if (CheckLock(&avc->lock) || LockWaiters(&avc->lock))
	return EBUSY;

    s = VN_LOCK(vp);

    /*
     * we just need to avoid the race
     * in vn_rele between the ref count going to 0 and VOP_INACTIVE
     * finishing up.
     * Note that although we checked vcount above, we didn't have the lock
     */
    if (vp->v_count > 0 || (vp->v_flag & VINACT)) {
	VN_UNLOCK(vp, s);
	return EBUSY;
    }
    VN_UNLOCK(vp, s);

    /*
     * Since we store on last close and on VOP_INACTIVE
     * there should be NO dirty pages
     * Note that we hold the xvcache lock the entire time.
     */
    AFS_GUNLOCK();
    PTOSSVP(vp, (off_t) 0, (off_t) MAXLONG);
    AFS_GLOCK();

    /* afs_chkpgoob will drop and re-acquire the global lock. */
    afs_chkpgoob(vp, 0);
    osi_Assert(!VN_GET_PGCNT(vp));
    osi_Assert(!AFS_VN_MAPPED(vp));
    osi_Assert(!AFS_VN_DIRTY(&avc->v));

#if defined(AFS_SGI65_ENV)
    if (vp->v_filocks)
	cleanlocks(vp, IGN_PID, 0);
    mutex_destroy(&vp->v_filocksem);
#else /* AFS_SGI65_ENV */
    if (vp->v_filocksem) {
	if (vp->v_filocks)
#ifdef AFS_SGI64_ENV
	    cleanlocks(vp, &curprocp->p_flid);
#else
	    cleanlocks(vp, IGN_PID, 0);
#endif
	osi_Assert(vp->v_filocks == NULL);
	mutex_destroy(vp->v_filocksem);
	kmem_free(vp->v_filocksem, sizeof *vp->v_filocksem);
	vp->v_filocksem = NULL;
    }
#endif /* AFS_SGI65_ENV */

    if (avc->vrefCount)
	osi_Panic("flushVcache: vm race");
#ifdef AFS_SGI64_ENV
    AFS_GUNLOCK();
    vnode_pcache_reclaim(vp);	/* this can sleep */
    vnode_pcache_free(vp);
    if (vp->v_op != &Afs_vnodeops) {
	VOP_RECLAIM(vp, FSYNC_WAIT, code);
    }
    AFS_GLOCK();
#ifdef AFS_SGI65_ENV
#ifdef VNODE_TRACING
    ktrace_free(vp->v_trace);
#endif /* VNODE_TRACING */
    vn_bhv_remove(VN_BHV_HEAD(vp), &(avc->vc_bhv_desc));
    vn_bhv_head_destroy(&(vp->v_bh));
    destroy_bitlock(&vp->v_pcacheflag);
    mutex_destroy(&vp->v_buf_lock);
#else
    bhv_remove(VN_BHV_HEAD(vp), &(avc->vc_bhv_desc));
    bhv_head_destroy(&(vp->v_bh));
#endif
    vp->v_flag = 0;		/* debug */
#if defined(DEBUG) && defined(VNODE_INIT_BITLOCK)
    destroy_bitlock(&vp->v_flag);
#endif
#ifdef INTR_KTHREADS
    AFS_VN_DESTROY_BUF_LOCK(vp);
#endif
#endif /* AFS_SGI64_ENV */

    return 0;
}