Ejemplo n.º 1
0
/*
 * purge a vnode from the cache
 * At this point the vnode is guaranteed to have no references (vn_count == 0)
 * The caller has to make sure that there are no ways someone could
 * get a handle (via vn_get) on the vnode (usually done via a mount/vfs lock).
 */
void
vn_purge(
	struct vnode	*vp,
	vmap_t		*vmap)
{
	vn_trace_entry(vp, "vn_purge", (inst_t *)__return_address);

again:
	/*
	 * Check whether vp has already been reclaimed since our caller
	 * sampled its version while holding a filesystem cache lock that
	 * its VOP_RECLAIM function acquires.
	 */
	VN_LOCK(vp);
	if (vp->v_number != vmap->v_number) {
		VN_UNLOCK(vp, 0);
		return;
	}

	/*
	 * If vp is being reclaimed or inactivated, wait until it is inert,
	 * then proceed.  Can't assume that vnode is actually reclaimed
	 * just because the reclaimed flag is asserted -- a vn_alloc
	 * reclaim can fail.
	 */
	if (vp->v_flag & (VINACT | VRECLM)) {
		ASSERT(vn_count(vp) == 0);
		vp->v_flag |= VWAIT;
		sv_wait(vptosync(vp), PINOD, &vp->v_lock, 0);
		goto again;
	}

	/*
	 * Another process could have raced in and gotten this vnode...
	 */
	if (vn_count(vp) > 0) {
		VN_UNLOCK(vp, 0);
		return;
	}

	XFS_STATS_DEC(vn_active);
	vp->v_flag |= VRECLM;
	VN_UNLOCK(vp, 0);

	/*
	 * Call VOP_RECLAIM and clean vp. The FSYNC_INVAL flag tells
	 * vp's filesystem to flush and invalidate all cached resources.
	 * When vn_reclaim returns, vp should have no private data,
	 * either in a system cache or attached to v_data.
	 */
	if (vn_reclaim(vp) != 0)
		panic("vn_purge: cannot reclaim");

	/*
	 * Wakeup anyone waiting for vp to be reclaimed.
	 */
	vn_wakeup(vp);
}
Ejemplo n.º 2
0
/*
 * Clean a vnode of filesystem-specific data and prepare it for reuse.
 */
STATIC int
vn_reclaim(
	struct vnode	*vp)
{
	int		error;

	XFS_STATS_INC(vn_reclaim);
	vn_trace_entry(vp, "vn_reclaim", (inst_t *)__return_address);

	/*
	 * Only make the VOP_RECLAIM call if there are behaviors
	 * to call.
	 */
	if (vp->v_fbhv) {
		VOP_RECLAIM(vp, error);
		if (error)
			return -error;
	}
	ASSERT(vp->v_fbhv == NULL);

	VN_LOCK(vp);
	vp->v_flag &= (VRECLM|VWAIT);
	VN_UNLOCK(vp, 0);

	vp->v_type = VNON;
	vp->v_fbhv = NULL;

#ifdef XFS_VNODE_TRACE
	ktrace_free(vp->v_trace);
	vp->v_trace = NULL;
#endif

	return 0;
}
Ejemplo n.º 3
0
/*
 *  Call VOP_INACTIVE on last reference.
 */
void
vn_rele(
	struct vnode	*vp)
{
	int		vcnt;
	int		cache;

	XFS_STATS_INC(vn_rele);

	VN_LOCK(vp);

	vn_trace_entry(vp, "vn_rele", (inst_t *)__return_address);
	vcnt = vn_count(vp);

	/*
	 * Since we always get called from put_inode we know
	 * that i_count won't be decremented after we
	 * return.
	 */
	if (!vcnt) {
		/*
		 * As soon as we turn this on, noone can find us in vn_get
		 * until we turn off VINACT or VRECLM
		 */
		vp->v_flag |= VINACT;
		VN_UNLOCK(vp, 0);

		/*
		 * Do not make the VOP_INACTIVE call if there
		 * are no behaviors attached to the vnode to call.
		 */
		if (vp->v_fbhv)
			VOP_INACTIVE(vp, NULL, cache);

		VN_LOCK(vp);
		if (vp->v_flag & VWAIT)
			sv_broadcast(vptosync(vp));

		vp->v_flag &= ~(VINACT|VWAIT|VRECLM|VMODIFIED);
	}

	VN_UNLOCK(vp, 0);

	vn_trace_exit(vp, "vn_rele", (inst_t *)__return_address);
}
Ejemplo n.º 4
0
STATIC void
vn_wakeup(
	struct vnode	*vp)
{
	VN_LOCK(vp);
	if (vp->v_flag & VWAIT)
		sv_broadcast(vptosync(vp));
	vp->v_flag &= ~(VRECLM|VWAIT|VMODIFIED);
	VN_UNLOCK(vp, 0);
}
Ejemplo n.º 5
0
int
vn_wait(
	struct vnode	*vp)
{
	VN_LOCK(vp);
	if (vp->v_flag & (VINACT | VRECLM)) {
		vp->v_flag |= VWAIT;
		sv_wait(vptosync(vp), PINOD, &vp->v_lock, 0);
		return 1;
	}
	VN_UNLOCK(vp, 0);
	return 0;
}
Ejemplo n.º 6
0
/*
 * Add a reference to a referenced vnode.
 */
struct vnode *
vn_hold(
	struct vnode	*vp)
{
	struct inode	*inode;

	XFS_STATS_INC(vn_hold);

	VN_LOCK(vp);
	inode = igrab(LINVFS_GET_IP(vp));
	ASSERT(inode);
	VN_UNLOCK(vp, 0);

	return vp;
}
Ejemplo n.º 7
0
/* Try to discard pages, in order to recycle a vcache entry.
 *
 * We also make some sanity checks:  ref count, open count, held locks.
 *
 * We also do some non-VM-related chores, such as releasing the cred pointer
 * (for AIX and Solaris) and releasing the gnode (for AIX).
 *
 * Locking:  afs_xvcache lock is held. It must not be dropped.
 */
int
osi_VM_FlushVCache(struct vcache *avc)
{
    int s, code;
    vnode_t *vp = &avc->v;

    if (avc->vrefCount != 0)
	return EBUSY;

    if (avc->opens != 0)
	return EBUSY;

    /*
     * Just in case someone is still referring to the vnode we give up
     * trying to get rid of this guy.
     */
    if (CheckLock(&avc->lock) || LockWaiters(&avc->lock))
	return EBUSY;

    s = VN_LOCK(vp);

    /*
     * we just need to avoid the race
     * in vn_rele between the ref count going to 0 and VOP_INACTIVE
     * finishing up.
     * Note that although we checked vcount above, we didn't have the lock
     */
    if (vp->v_count > 0 || (vp->v_flag & VINACT)) {
	VN_UNLOCK(vp, s);
	return EBUSY;
    }
    VN_UNLOCK(vp, s);

    /*
     * Since we store on last close and on VOP_INACTIVE
     * there should be NO dirty pages
     * Note that we hold the xvcache lock the entire time.
     */
    AFS_GUNLOCK();
    PTOSSVP(vp, (off_t) 0, (off_t) MAXLONG);
    AFS_GLOCK();

    /* afs_chkpgoob will drop and re-acquire the global lock. */
    afs_chkpgoob(vp, 0);
    osi_Assert(!VN_GET_PGCNT(vp));
    osi_Assert(!AFS_VN_MAPPED(vp));
    osi_Assert(!AFS_VN_DIRTY(&avc->v));

#if defined(AFS_SGI65_ENV)
    if (vp->v_filocks)
	cleanlocks(vp, IGN_PID, 0);
    mutex_destroy(&vp->v_filocksem);
#else /* AFS_SGI65_ENV */
    if (vp->v_filocksem) {
	if (vp->v_filocks)
#ifdef AFS_SGI64_ENV
	    cleanlocks(vp, &curprocp->p_flid);
#else
	    cleanlocks(vp, IGN_PID, 0);
#endif
	osi_Assert(vp->v_filocks == NULL);
	mutex_destroy(vp->v_filocksem);
	kmem_free(vp->v_filocksem, sizeof *vp->v_filocksem);
	vp->v_filocksem = NULL;
    }
#endif /* AFS_SGI65_ENV */

    if (avc->vrefCount)
	osi_Panic("flushVcache: vm race");
#ifdef AFS_SGI64_ENV
    AFS_GUNLOCK();
    vnode_pcache_reclaim(vp);	/* this can sleep */
    vnode_pcache_free(vp);
    if (vp->v_op != &Afs_vnodeops) {
	VOP_RECLAIM(vp, FSYNC_WAIT, code);
    }
    AFS_GLOCK();
#ifdef AFS_SGI65_ENV
#ifdef VNODE_TRACING
    ktrace_free(vp->v_trace);
#endif /* VNODE_TRACING */
    vn_bhv_remove(VN_BHV_HEAD(vp), &(avc->vc_bhv_desc));
    vn_bhv_head_destroy(&(vp->v_bh));
    destroy_bitlock(&vp->v_pcacheflag);
    mutex_destroy(&vp->v_buf_lock);
#else
    bhv_remove(VN_BHV_HEAD(vp), &(avc->vc_bhv_desc));
    bhv_head_destroy(&(vp->v_bh));
#endif
    vp->v_flag = 0;		/* debug */
#if defined(DEBUG) && defined(VNODE_INIT_BITLOCK)
    destroy_bitlock(&vp->v_flag);
#endif
#ifdef INTR_KTHREADS
    AFS_VN_DESTROY_BUF_LOCK(vp);
#endif
#endif /* AFS_SGI64_ENV */

    return 0;
}
Ejemplo n.º 8
0
static int
vdev_file_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
    uint64_t *ashift)
{
	static vattr_t vattr;
	vdev_file_t *vf;
	struct vnode *vp;
	int error = 0;
    struct vnode *rootdir;

    dprintf("vdev_file_open %p\n", vd->vdev_tsd);

	/*
	 * We must have a pathname, and it must be absolute.
	 */
	if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') {
		vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
		return (EINVAL);
	}

	/*
	 * Reopen the device if it's not currently open.  Otherwise,
	 * just update the physical size of the device.
	 */
#ifdef _KERNEL
	if (vd->vdev_tsd != NULL) {
		ASSERT(vd->vdev_reopening);
		vf = vd->vdev_tsd;
        vnode_getwithvid(vf->vf_vnode, vf->vf_vid);
        dprintf("skip to open\n");
		goto skip_open;
	}
#endif

	vf = vd->vdev_tsd = kmem_zalloc(sizeof (vdev_file_t), KM_PUSHPAGE);

	/*
	 * We always open the files from the root of the global zone, even if
	 * we're in a local zone.  If the user has gotten to this point, the
	 * administrator has already decided that the pool should be available
	 * to local zone users, so the underlying devices should be as well.
	 */
	ASSERT(vd->vdev_path != NULL && vd->vdev_path[0] == '/');

    /*
      vn_openat(char *pnamep,
      enum uio_seg seg,
      int filemode,
      int createmode,
      struct vnode **vpp,
      enum create crwhy,
      mode_t umask,
      struct vnode *startvp)
      extern int vn_openat(char *pnamep, enum uio_seg seg, int filemode,
      int createmode, struct vnode **vpp, enum create crwhy,
      mode_t umask, struct vnode *startvp);
    */

    rootdir = getrootdir();

    error = vn_openat(vd->vdev_path + 1,
                      UIO_SYSSPACE,
                      spa_mode(vd->vdev_spa) | FOFFMAX,
                      0,
                      &vp,
                      0,
                      0,
                      rootdir
                      );

	if (error) {
		vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
		return (error);
	}

	vf->vf_vnode = vp;
#ifdef _KERNEL
    vf->vf_vid = vnode_vid(vp);
    dprintf("assigning vid %d\n", vf->vf_vid);

	/*
	 * Make sure it's a regular file.
	 */
	if (!vnode_isreg(vp)) {
        vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
        VN_RELE(vf->vf_vnode);
        return (ENODEV);
    }

#endif

#if _KERNEL
skip_open:
	/*
	 * Determine the physical size of the file.
	 */
	vattr.va_mask = AT_SIZE;
    vn_lock(vf->vf_vnode, LK_SHARED | LK_RETRY);
	error = VOP_GETATTR(vf->vf_vnode, &vattr, 0, kcred, NULL);
    VN_UNLOCK(vf->vf_vnode);
#endif
	if (error) {
		vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
        VN_RELE(vf->vf_vnode);
		return (error);
	}

	*max_psize = *psize = vattr.va_size;
	*ashift = SPA_MINBLOCKSHIFT;
    VN_RELE(vf->vf_vnode);

	return (0);
}