Esempio n. 1
0
void
osi_PostPopulateVCache(struct vcache *avc) {
    memset(&(avc->vc_bhv_desc), 0, sizeof(avc->vc_bhv_desc));
    bhv_desc_init(&(avc->vc_bhv_desc), avc, avc, &Afs_vnodeops);

#if defined(AFS_SGI65_ENV)
    vn_bhv_head_init(&(avc->v.v_bh), "afsvp");
    vn_bhv_insert_initial(&(avc->v.v_bh), &(avc->vc_bhv_desc));
    avc->v.v_mreg = avc->v.v_mregb = (struct pregion *)avc;
# if defined(VNODE_TRACING)
    avc->v.v_trace = ktrace_alloc(VNODE_TRACE_SIZE, 0);
# endif
    init_bitlock(&avc->v.v_pcacheflag, VNODE_PCACHE_LOCKBIT, "afs_pcache",
		 avc->v.v_number);
    init_mutex(&avc->v.v_filocksem, MUTEX_DEFAULT, "afsvfl", (long)avc);
    init_mutex(&avc->v.v_buf_lock, MUTEX_DEFAULT, "afsvnbuf", (long)avc);
#else
    bhv_head_init(&(avc->v.v_bh));
    bhv_insert_initial(&(avc->v.v_bh), &(avc->vc_bhv_desc));
#endif

    vnode_pcache_init(&avc->v);

#if defined(DEBUG) && defined(VNODE_INIT_BITLOCK)
    /* Above define is never true execpt in SGI test kernels. */
    init_bitlock(&avc->v.v_flag, VLOCK, "vnode", avc->v.v_number);
#endif

#ifdef INTR_KTHREADS
    AFS_VN_INIT_BUF_LOCK(&(avc->v));
#endif

    vSetVfsp(avc, afs_globalVFS);
    vSetType(avc, VREG);

    VN_SET_DPAGES(&(avc->v), NULL);
    osi_Assert((avc->v.v_flag & VINACT) == 0);
    avc->v.v_flag = 0;
    osi_Assert(VN_GET_PGCNT(&(avc->v)) == 0);
    osi_Assert(avc->mapcnt == 0 && avc->vc_locktrips == 0);
    osi_Assert(avc->vc_rwlockid == OSI_NO_LOCKID);
    osi_Assert(avc->v.v_filocks == NULL);
# if !defined(AFS_SGI65_ENV)
    osi_Assert(avc->v.v_filocksem == NULL);
# endif
    osi_Assert(avc->cred == NULL);
# if defined(AFS_SGI64_ENV)
    vnode_pcache_reinit(&avc->v);
    avc->v.v_rdev = NODEV;
# endif
    vn_initlist((struct vnlist *)&avc->v);
    avc->lastr = 0;
}
Esempio n. 2
0
int
idbg_afsvfslist()
{
    struct vcache *tvc;
    register struct afs_q *tq;
    struct afs_q *uq;
    afs_int32 nodeid;		/* what ls prints as 'inode' */

    AFS_GLOCK();
    for (tq = VLRU.prev; tq != &VLRU; tq = uq) {
	tvc = QTOV(tq);
	uq = QPrev(tq);
	nodeid = tvc->f.fid.Fid.Vnode + (tvc->f.fid.Fid.Volume << 16);
	nodeid &= 0x7fffffff;
	qprintf("avp 0x%x type %s cnt %d pg %d map %d nodeid %d(0x%x)\n", tvc,
		tab_vtypes[((vnode_t *) tvc)->v_type],
		((vnode_t *) tvc)->v_count,
		(int)VN_GET_PGCNT((vnode_t *) tvc), (int)tvc->mapcnt, nodeid,
		nodeid);
    }
    AFS_GUNLOCK();
    return 0;
}
Esempio n. 3
0
/* Try to discard pages, in order to recycle a vcache entry.
 *
 * We also make some sanity checks:  ref count, open count, held locks.
 *
 * We also do some non-VM-related chores, such as releasing the cred pointer
 * (for AIX and Solaris) and releasing the gnode (for AIX).
 *
 * Locking:  afs_xvcache lock is held. It must not be dropped.
 */
int
osi_VM_FlushVCache(struct vcache *avc)
{
    int s, code;
    vnode_t *vp = &avc->v;

    if (avc->vrefCount != 0)
	return EBUSY;

    if (avc->opens != 0)
	return EBUSY;

    /*
     * Just in case someone is still referring to the vnode we give up
     * trying to get rid of this guy.
     */
    if (CheckLock(&avc->lock) || LockWaiters(&avc->lock))
	return EBUSY;

    s = VN_LOCK(vp);

    /*
     * we just need to avoid the race
     * in vn_rele between the ref count going to 0 and VOP_INACTIVE
     * finishing up.
     * Note that although we checked vcount above, we didn't have the lock
     */
    if (vp->v_count > 0 || (vp->v_flag & VINACT)) {
	VN_UNLOCK(vp, s);
	return EBUSY;
    }
    VN_UNLOCK(vp, s);

    /*
     * Since we store on last close and on VOP_INACTIVE
     * there should be NO dirty pages
     * Note that we hold the xvcache lock the entire time.
     */
    AFS_GUNLOCK();
    PTOSSVP(vp, (off_t) 0, (off_t) MAXLONG);
    AFS_GLOCK();

    /* afs_chkpgoob will drop and re-acquire the global lock. */
    afs_chkpgoob(vp, 0);
    osi_Assert(!VN_GET_PGCNT(vp));
    osi_Assert(!AFS_VN_MAPPED(vp));
    osi_Assert(!AFS_VN_DIRTY(&avc->v));

#if defined(AFS_SGI65_ENV)
    if (vp->v_filocks)
	cleanlocks(vp, IGN_PID, 0);
    mutex_destroy(&vp->v_filocksem);
#else /* AFS_SGI65_ENV */
    if (vp->v_filocksem) {
	if (vp->v_filocks)
#ifdef AFS_SGI64_ENV
	    cleanlocks(vp, &curprocp->p_flid);
#else
	    cleanlocks(vp, IGN_PID, 0);
#endif
	osi_Assert(vp->v_filocks == NULL);
	mutex_destroy(vp->v_filocksem);
	kmem_free(vp->v_filocksem, sizeof *vp->v_filocksem);
	vp->v_filocksem = NULL;
    }
#endif /* AFS_SGI65_ENV */

    if (avc->vrefCount)
	osi_Panic("flushVcache: vm race");
#ifdef AFS_SGI64_ENV
    AFS_GUNLOCK();
    vnode_pcache_reclaim(vp);	/* this can sleep */
    vnode_pcache_free(vp);
    if (vp->v_op != &Afs_vnodeops) {
	VOP_RECLAIM(vp, FSYNC_WAIT, code);
    }
    AFS_GLOCK();
#ifdef AFS_SGI65_ENV
#ifdef VNODE_TRACING
    ktrace_free(vp->v_trace);
#endif /* VNODE_TRACING */
    vn_bhv_remove(VN_BHV_HEAD(vp), &(avc->vc_bhv_desc));
    vn_bhv_head_destroy(&(vp->v_bh));
    destroy_bitlock(&vp->v_pcacheflag);
    mutex_destroy(&vp->v_buf_lock);
#else
    bhv_remove(VN_BHV_HEAD(vp), &(avc->vc_bhv_desc));
    bhv_head_destroy(&(vp->v_bh));
#endif
    vp->v_flag = 0;		/* debug */
#if defined(DEBUG) && defined(VNODE_INIT_BITLOCK)
    destroy_bitlock(&vp->v_flag);
#endif
#ifdef INTR_KTHREADS
    AFS_VN_DESTROY_BUF_LOCK(vp);
#endif
#endif /* AFS_SGI64_ENV */

    return 0;
}