Esempio n. 1
0
/*
 * NOTE: We don't combine the inode/chain lock because putting away an
 *       inode would otherwise confuse multiple lock holders of the inode.
 *
 *	 Shared locks are especially sensitive to having too many shared
 *	 lock counts (from the same thread) on certain paths which might
 *	 need to upgrade them.  Only one count of a shared lock can be
 *	 upgraded.
 */
hammer2_chain_t *
hammer2_inode_lock_sh(hammer2_inode_t *ip)
{
	hammer2_chain_t *chain;

	hammer2_inode_ref(ip);
	for (;;) {
		ccms_thread_lock(&ip->topo_cst, CCMS_STATE_SHARED);

		chain = ip->chain;
		KKASSERT(chain != NULL);	/* for now */
		hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS |
					  HAMMER2_RESOLVE_SHARED);

		/*
		 * Resolve duplication races, resolve hardlinks by giving
		 * up and cycling an exclusive lock.
		 */
		if ((chain->flags & HAMMER2_CHAIN_DUPLICATED) == 0 &&
		    chain->data->ipdata.type != HAMMER2_OBJTYPE_HARDLINK) {
			break;
		}
		hammer2_chain_unlock(chain);
		ccms_thread_unlock(&ip->topo_cst);
		chain = hammer2_inode_lock_ex(ip);
		hammer2_inode_unlock_ex(ip, chain);
	}
	return (chain);
}
Esempio n. 2
0
/*
 * NOTE: We don't combine the inode/chain lock because putting away an
 *       inode would otherwise confuse multiple lock holders of the inode.
 *
 *	 Shared locks are especially sensitive to having too many shared
 *	 lock counts (from the same thread) on certain paths which might
 *	 need to upgrade them.  Only one count of a shared lock can be
 *	 upgraded.
 */
hammer2_chain_t *
hammer2_inode_lock_sh(hammer2_inode_t *ip)
{
	hammer2_chain_t *chain;

	hammer2_inode_ref(ip);
again:
	ccms_thread_lock(&ip->topo_cst, CCMS_STATE_SHARED);

	chain = ip->chain;
	KKASSERT(chain != NULL);	/* for now */
	hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS |
				  HAMMER2_RESOLVE_SHARED);

	/*
	 * Resolve duplication races
	 */
	if (hammer2_chain_refactor_test(chain, 1)) {
		hammer2_chain_unlock(chain);
		ccms_thread_unlock(&ip->topo_cst);
		chain = hammer2_inode_lock_ex(ip);
		hammer2_inode_unlock_ex(ip, chain);
		goto again;
	}
	return (chain);
}
Esempio n. 3
0
void
hammer2_inode_unlock_sh(hammer2_inode_t *ip, hammer2_chain_t *chain)
{
	if (chain)
		hammer2_chain_unlock(chain);
	ccms_thread_unlock(&ip->topo_cst);
	hammer2_inode_drop(ip);
}
Esempio n. 4
0
void
hammer2_inode_unlock_ex(hammer2_inode_t *ip, hammer2_chain_t *chain)
{
	/*
	 * XXX this will catch parent directories too which we don't
	 *     really want.
	 */
	if (chain)
		hammer2_chain_unlock(chain);
	ccms_thread_unlock(&ip->topo_cst);
	hammer2_inode_drop(ip);
}
Esempio n. 5
0
void
hammer2_inode_unlock_ex(hammer2_inode_t *ip, hammer2_chain_t *chain)
{
	/*
	 * XXX this will catch parent directories too which we don't
	 *     really want.
	 */
	if (ip->chain && (ip->chain->flags & (HAMMER2_CHAIN_MODIFIED |
					      HAMMER2_CHAIN_SUBMODIFIED))) {
		atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
	}
	if (chain)
		hammer2_chain_unlock(ip->hmp, chain);
	ccms_thread_unlock(&ip->topo_cst);
	hammer2_inode_drop(ip);
}
Esempio n. 6
0
/*
 * The passed-in chain must be locked and the returned inode will also be
 * locked.  This routine typically locates or allocates the inode, assigns
 * ip->chain (adding a ref to chain if necessary), and returns the inode.
 *
 * The hammer2_inode structure regulates the interface between the high level
 * kernel VNOPS API and the filesystem backend (the chains).
 *
 * WARNING!  This routine sucks up the chain's lock (makes it part of the
 *	     inode lock from the point of view of the inode lock API),
 *	     so callers need to be careful.
 *
 * WARNING!  The mount code is allowed to pass dip == NULL for iroot and
 *	     is allowed to pass pmp == NULL and dip == NULL for sroot.
 */
hammer2_inode_t *
hammer2_inode_get(hammer2_pfsmount_t *pmp, hammer2_inode_t *dip,
		  hammer2_chain_t *chain)
{
	hammer2_inode_t *nip;

	KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_INODE);

	/*
	 * Interlocked lookup/ref of the inode.  This code is only needed
	 * when looking up inodes with nlinks != 0 (TODO: optimize out
	 * otherwise and test for duplicates).
	 */
again:
	for (;;) {
		nip = hammer2_inode_lookup(pmp, chain->data->ipdata.inum);
		if (nip == NULL)
			break;
		ccms_thread_lock(&nip->topo_cst, CCMS_STATE_EXCLUSIVE);
		if ((nip->flags & HAMMER2_INODE_ONRBTREE) == 0) { /* race */
			ccms_thread_unlock(&nip->topo_cst);
			hammer2_inode_drop(nip);
			continue;
		}
		if (nip->chain != chain)
			hammer2_inode_repoint(nip, NULL, chain);

		/*
		 * Consolidated nip/nip->chain is locked (chain locked
		 * by caller).
		 */
		return nip;
	}

	/*
	 * We couldn't find the inode number, create a new inode.
	 */
	if (pmp) {
		nip = kmalloc(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO);
		atomic_add_long(&pmp->inmem_inodes, 1);
		hammer2_chain_memory_inc(pmp);
		hammer2_chain_memory_wakeup(pmp);
	} else {
		nip = kmalloc(sizeof(*nip), M_HAMMER2, M_WAITOK | M_ZERO);
		nip->flags = HAMMER2_INODE_SROOT;
	}
	nip->inum = chain->data->ipdata.inum;
	nip->size = chain->data->ipdata.size;
	nip->mtime = chain->data->ipdata.mtime;
	hammer2_inode_repoint(nip, NULL, chain);
	nip->pip = dip;				/* can be NULL */
	if (dip)
		hammer2_inode_ref(dip);	/* ref dip for nip->pip */

	nip->pmp = pmp;

	/*
	 * ref and lock on nip gives it state compatible to after a
	 * hammer2_inode_lock_ex() call.
	 */
	nip->refs = 1;
	ccms_cst_init(&nip->topo_cst, &nip->chain);
	ccms_thread_lock(&nip->topo_cst, CCMS_STATE_EXCLUSIVE);
	/* combination of thread lock and chain lock == inode lock */

	/*
	 * Attempt to add the inode.  If it fails we raced another inode
	 * get.  Undo all the work and try again.
	 */
	if (pmp) {
		spin_lock(&pmp->inum_spin);
		if (RB_INSERT(hammer2_inode_tree, &pmp->inum_tree, nip)) {
			spin_unlock(&pmp->inum_spin);
			ccms_thread_unlock(&nip->topo_cst);
			hammer2_inode_drop(nip);
			goto again;
		}
		atomic_set_int(&nip->flags, HAMMER2_INODE_ONRBTREE);
		spin_unlock(&pmp->inum_spin);
	}

	return (nip);
}
Esempio n. 7
0
void
hammer2_mount_unlock(hammer2_mount_t *hmp)
{
	ccms_thread_unlock(&hmp->vchain.cst);
}
Esempio n. 8
0
/*
 * Get the vnode associated with the given inode, allocating the vnode if
 * necessary.  The vnode will be returned exclusively locked.
 *
 * The caller must lock the inode (shared or exclusive).
 *
 * Great care must be taken to avoid deadlocks and vnode acquisition/reclaim
 * races.
 */
struct vnode *
hammer2_igetv(hammer2_inode_t *ip, int *errorp)
{
	struct vnode *vp;
	hammer2_pfsmount_t *pmp;
	ccms_state_t ostate;

	pmp = ip->pmp;
	KKASSERT(pmp != NULL);
	*errorp = 0;

	for (;;) {
		/*
		 * Attempt to reuse an existing vnode assignment.  It is
		 * possible to race a reclaim so the vget() may fail.  The
		 * inode must be unlocked during the vget() to avoid a
		 * deadlock against a reclaim.
		 */
		vp = ip->vp;
		if (vp) {
			/*
			 * Inode must be unlocked during the vget() to avoid
			 * possible deadlocks, vnode is held to prevent
			 * destruction during the vget().  The vget() can
			 * still fail if we lost a reclaim race on the vnode.
			 */
			/* XXX vhold_interlocked(vp);
			*/
			ccms_thread_unlock(&ip->chain.cst);
/* XXX
			if (vget(vp, LK_EXCLUSIVE)) {
				vdrop(vp);
				ccms_thread_lock(&ip->chain.cst,
						 CCMS_STATE_EXCLUSIVE);
				continue;
			}
*/
			ccms_thread_lock(&ip->chain.cst, CCMS_STATE_EXCLUSIVE);
			/* XX vdrop(vp);
*/
			/* vp still locked and ref from vget */
			*errorp = 0;
			break;
		}

		/*
		 * No vnode exists, allocate a new vnode.  Beware of
		 * allocation races.  This function will return an
		 * exclusively locked and referenced vnode.
		 */
/* XXX
		*errorp = getnewvnode(VT_HAMMER2, pmp->mp, &vp, 0, 0);
		if (*errorp) {
			vp = NULL;
			break;
		}

*/
		/*
		 * Lock the inode and check for an allocation race.
		 */
		ostate = ccms_thread_lock_upgrade(&ip->chain.cst);
		if (ip->vp != NULL) {
			vp->v_type = VBAD;
			/* XXX vx_put(vp); */
			ccms_thread_lock_restore(&ip->chain.cst, ostate);
			continue;
		}

		switch (ip->ip_data.type) {
		case HAMMER2_OBJTYPE_DIRECTORY:
			vp->v_type = VDIR;
			break;
		case HAMMER2_OBJTYPE_REGFILE:
			vp->v_type = VREG;
			/* XVFS vinitvmio(vp, ip->ip_data.size,
				  HAMMER2_LBUFSIZE,
				  (int)ip->ip_data.size & HAMMER2_LBUFMASK);
*/
			break;
		case HAMMER2_OBJTYPE_SOFTLINK:
			/*
			 * XXX for now we are using the generic file_read
			 * and file_write code so we need a buffer cache
			 * association.
			 */
			vp->v_type = VLNK;
			/* XVFS vinitvmio(vp, ip->ip_data.size,
				  HAMMER2_LBUFSIZE,
				  (int)ip->ip_data.size & HAMMER2_LBUFMASK);
*/
			break;
		/* XXX FIFO */
		default:
			panic("hammer2: unhandled objtype %d",
			      ip->ip_data.type);
			break;
		}

		if (ip == pmp->iroot)
			/* vsetflags(vp, VROOT); */

		vp->v_data = ip;
		ip->vp = vp;
		hammer2_chain_ref(ip->hmp, &ip->chain);	/* vp association */
		ccms_thread_lock_restore(&ip->chain.cst, ostate);
		break;
	}

	/*
	 * Return non-NULL vp and *errorp == 0, or NULL vp and *errorp != 0.
	 */
	if (hammer2_debug & 0x0002) {
		kprintf("igetv vp %p refs %d aux %d\n",
			vp, vp->v_sysref.refcnt, vp->v_auxrefs);
	}
	return (vp);
}