Example #1
0
/*
 * Drop an inode reference, freeing the inode when the last reference goes
 * away.
 */
void
hammer2_inode_drop(hammer2_inode_t *ip)
{
	hammer2_pfs_t *pmp;
	u_int refs;

	while (ip) {
		if (hammer2_debug & 0x80000) {
			kprintf("INODE-1 %p (%d->%d)\n",
				ip, ip->refs, ip->refs - 1);
			print_backtrace(8);
		}
		refs = ip->refs;
		cpu_ccfence();
		if (refs == 1) {
			/*
			 * Transition to zero, must interlock with
			 * the inode inumber lookup tree (if applicable).
			 * It should not be possible for anyone to race
			 * the transition to 0.
			 */
			pmp = ip->pmp;
			KKASSERT(pmp);
			hammer2_spin_ex(&pmp->inum_spin);

			if (atomic_cmpset_int(&ip->refs, 1, 0)) {
				KKASSERT(hammer2_mtx_refs(&ip->lock) == 0);
				if (ip->flags & HAMMER2_INODE_ONRBTREE) {
					atomic_clear_int(&ip->flags,
						     HAMMER2_INODE_ONRBTREE);
					RB_REMOVE(hammer2_inode_tree,
						  &pmp->inum_tree, ip);
					--pmp->inum_count;
				}
				hammer2_spin_unex(&pmp->inum_spin);

				ip->pmp = NULL;

				/*
				 * Cleaning out ip->cluster isn't entirely
				 * trivial.
				 */
				hammer2_inode_repoint(ip, NULL, NULL);

				kfree(ip, pmp->minode);
				atomic_add_long(&pmp->inmem_inodes, -1);
				ip = NULL;	/* will terminate loop */
			} else {
				hammer2_spin_unex(&ip->pmp->inum_spin);
			}
		} else {
			/*
			 * Non zero transition
			 */
			if (atomic_cmpset_int(&ip->refs, refs, refs - 1))
				break;
		}
	}
}
Example #2
0
/*
 * Set an inode's cluster modified, marking the related chains RW and
 * duplicating them if necessary.
 *
 * The passed-in chain is a localized copy of the chain previously acquired
 * when the inode was locked (and possilby replaced in the mean time), and
 * must also be updated.  In fact, we update it first and then synchronize
 * the inode's cluster cache.
 */
hammer2_inode_data_t *
hammer2_cluster_modify_ip(hammer2_trans_t *trans, hammer2_inode_t *ip,
			  hammer2_cluster_t *cluster, int flags)
{
	atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED);
	hammer2_cluster_modify(trans, cluster, flags);

	hammer2_inode_repoint(ip, NULL, cluster);
	if (ip->vp)
		vsetisdirty(ip->vp);
	return (&hammer2_cluster_wdata(cluster)->ipdata);
}
Example #3
0
/*
 * HAMMER2 inode locks
 *
 * HAMMER2 offers shared locks and exclusive locks on inodes.
 *
 * An inode's ip->chain pointer is resolved and stable while an inode is
 * locked, and can be cleaned out at any time (become NULL) when an inode
 * is not locked.
 *
 * The underlying chain is also locked and returned.
 *
 * NOTE: We don't combine the inode/chain lock because putting away an
 *       inode would otherwise confuse multiple lock holders of the inode.
 */
hammer2_chain_t *
hammer2_inode_lock_ex(hammer2_inode_t *ip)
{
	hammer2_chain_t *chain;

	hammer2_inode_ref(ip);
	ccms_thread_lock(&ip->topo_cst, CCMS_STATE_EXCLUSIVE);

	/*
	 * ip->chain fixup.  Certain duplications used to move inodes
	 * into indirect blocks (for example) can cause ip->chain to
	 * become stale.
	 */
again:
	chain = ip->chain;
	if (hammer2_chain_refactor_test(chain, 1)) {
		spin_lock(&chain->core->cst.spin);
		while (hammer2_chain_refactor_test(chain, 1))
			chain = chain->next_parent;
		if (ip->chain != chain) {
			hammer2_chain_ref(chain);
			spin_unlock(&chain->core->cst.spin);
			hammer2_inode_repoint(ip, NULL, chain);
			hammer2_chain_drop(chain);
		} else {
			spin_unlock(&chain->core->cst.spin);
		}
	}

	KKASSERT(chain != NULL);	/* for now */
	hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS);

	/*
	 * Resolve duplication races
	 */
	if (hammer2_chain_refactor_test(chain, 1)) {
		hammer2_chain_unlock(chain);
		goto again;
	}
	return (chain);
}
Example #4
0
/*
 * HAMMER2 inode locks
 *
 * HAMMER2 offers shared locks and exclusive locks on inodes.
 *
 * An inode's ip->chain pointer is resolved and stable while an inode is
 * locked, and can be cleaned out at any time (become NULL) when an inode
 * is not locked.
 *
 * This function handles duplication races and hardlink replacement races
 * which can cause ip's cached chain to become stale.
 *
 * The underlying chain is also locked and returned.
 *
 * NOTE: We don't combine the inode/chain lock because putting away an
 *       inode would otherwise confuse multiple lock holders of the inode.
 */
hammer2_chain_t *
hammer2_inode_lock_ex(hammer2_inode_t *ip)
{
	hammer2_chain_t *chain;
	hammer2_chain_t *ochain;
	hammer2_chain_core_t *core;
	int error;

	hammer2_inode_ref(ip);
	ccms_thread_lock(&ip->topo_cst, CCMS_STATE_EXCLUSIVE);

	chain = ip->chain;
	core = chain->core;
	for (;;) {
		if (chain->flags & HAMMER2_CHAIN_DUPLICATED) {
			spin_lock(&core->cst.spin);
			while (chain->flags & HAMMER2_CHAIN_DUPLICATED)
				chain = TAILQ_NEXT(chain, core_entry);
			hammer2_chain_ref(chain);
			spin_unlock(&core->cst.spin);
			hammer2_inode_repoint(ip, NULL, chain);
			hammer2_chain_drop(chain);
		}
		hammer2_chain_lock(chain, HAMMER2_RESOLVE_ALWAYS);
		if ((chain->flags & HAMMER2_CHAIN_DUPLICATED) == 0)
			break;
		hammer2_chain_unlock(chain);
	}
	if (chain->data->ipdata.type == HAMMER2_OBJTYPE_HARDLINK &&
	    (chain->flags & HAMMER2_CHAIN_DELETED) == 0) {
		error = hammer2_hardlink_find(ip->pip, &chain, &ochain);
		hammer2_chain_drop(ochain);
		KKASSERT(error == 0);
		/* XXX error handling */
	}
	return (chain);
}
Example #5
0
/*
 * The passed-in chain must be locked and the returned inode will also be
 * locked.  This routine typically locates or allocates the inode, assigns
 * ip->chain (adding a ref to chain if necessary), and returns the inode.
 *
 * The hammer2_inode structure regulates the interface between the high level
 * kernel VNOPS API and the filesystem backend (the chains).
 *
 * WARNING!  This routine sucks up the chain's lock (makes it part of the
 *	     inode lock from the point of view of the inode lock API),
 *	     so callers need to be careful.
 *
 * WARNING!  The mount code is allowed to pass dip == NULL for iroot and
 *	     is allowed to pass pmp == NULL and dip == NULL for sroot.
 */
hammer2_inode_t *
hammer2_inode_get(hammer2_pfsmount_t *pmp, hammer2_inode_t *dip,
		  hammer2_chain_t *chain)
{
	hammer2_inode_t *nip;

	KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_INODE);

	/*
	 * Interlocked lookup/ref of the inode.  This code is only needed
	 * when looking up inodes with nlinks != 0 (TODO: optimize out
	 * otherwise and test for duplicates).
	 */
again:
	for (;;) {
		nip = hammer2_inode_lookup(pmp, chain->data->ipdata.inum);
		if (nip == NULL)
			break;
		ccms_thread_lock(&nip->topo_cst, CCMS_STATE_EXCLUSIVE);
		if ((nip->flags & HAMMER2_INODE_ONRBTREE) == 0) { /* race */
			ccms_thread_unlock(&nip->topo_cst);
			hammer2_inode_drop(nip);
			continue;
		}
		if (nip->chain != chain)
			hammer2_inode_repoint(nip, NULL, chain);

		/*
		 * Consolidated nip/nip->chain is locked (chain locked
		 * by caller).
		 */
		return nip;
	}

	/*
	 * We couldn't find the inode number, create a new inode.
	 */
	if (pmp) {
		nip = kmalloc(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO);
		atomic_add_long(&pmp->inmem_inodes, 1);
		hammer2_chain_memory_inc(pmp);
		hammer2_chain_memory_wakeup(pmp);
	} else {
		nip = kmalloc(sizeof(*nip), M_HAMMER2, M_WAITOK | M_ZERO);
		nip->flags = HAMMER2_INODE_SROOT;
	}
	nip->inum = chain->data->ipdata.inum;
	nip->size = chain->data->ipdata.size;
	nip->mtime = chain->data->ipdata.mtime;
	hammer2_inode_repoint(nip, NULL, chain);
	nip->pip = dip;				/* can be NULL */
	if (dip)
		hammer2_inode_ref(dip);	/* ref dip for nip->pip */

	nip->pmp = pmp;

	/*
	 * ref and lock on nip gives it state compatible to after a
	 * hammer2_inode_lock_ex() call.
	 */
	nip->refs = 1;
	ccms_cst_init(&nip->topo_cst, &nip->chain);
	ccms_thread_lock(&nip->topo_cst, CCMS_STATE_EXCLUSIVE);
	/* combination of thread lock and chain lock == inode lock */

	/*
	 * Attempt to add the inode.  If it fails we raced another inode
	 * get.  Undo all the work and try again.
	 */
	if (pmp) {
		spin_lock(&pmp->inum_spin);
		if (RB_INSERT(hammer2_inode_tree, &pmp->inum_tree, nip)) {
			spin_unlock(&pmp->inum_spin);
			ccms_thread_unlock(&nip->topo_cst);
			hammer2_inode_drop(nip);
			goto again;
		}
		atomic_set_int(&nip->flags, HAMMER2_INODE_ONRBTREE);
		spin_unlock(&pmp->inum_spin);
	}

	return (nip);
}
Example #6
0
/*
 * Drop an inode reference, freeing the inode when the last reference goes
 * away.
 */
void
hammer2_inode_drop(hammer2_inode_t *ip)
{
	hammer2_pfsmount_t *pmp;
	hammer2_inode_t *pip;
	u_int refs;

	while (ip) {
		refs = ip->refs;
		cpu_ccfence();
		if (refs == 1) {
			/*
			 * Transition to zero, must interlock with
			 * the inode inumber lookup tree (if applicable).
			 *
			 * NOTE: The super-root inode has no pmp.
			 */
			pmp = ip->pmp;
			if (pmp)
				spin_lock(&pmp->inum_spin);

			if (atomic_cmpset_int(&ip->refs, 1, 0)) {
				KKASSERT(ip->topo_cst.count == 0);
				if (ip->flags & HAMMER2_INODE_ONRBTREE) {
					atomic_clear_int(&ip->flags,
						     HAMMER2_INODE_ONRBTREE);
					RB_REMOVE(hammer2_inode_tree,
						  &pmp->inum_tree, ip);
				}
				if (pmp)
					spin_unlock(&pmp->inum_spin);

				pip = ip->pip;
				ip->pip = NULL;
				ip->pmp = NULL;

				/*
				 * Cleaning out ip->chain isn't entirely
				 * trivial.
				 */
				hammer2_inode_repoint(ip, NULL, NULL);

				/*
				 * We have to drop pip (if non-NULL) to
				 * dispose of our implied reference from
				 * ip->pip.  We can simply loop on it.
				 */
				if (pmp) {
					KKASSERT((ip->flags &
						  HAMMER2_INODE_SROOT) == 0);
					kfree(ip, pmp->minode);
					atomic_add_long(&pmp->inmem_inodes, -1);
				} else {
					KKASSERT(ip->flags &
						 HAMMER2_INODE_SROOT);
					kfree(ip, M_HAMMER2);
				}
				ip = pip;
				/* continue with pip (can be NULL) */
			} else {
				if (pmp)
					spin_unlock(&ip->pmp->inum_spin);
			}
		} else {
			/*
			 * Non zero transition
			 */
			if (atomic_cmpset_int(&ip->refs, refs, refs - 1))
				break;
		}
	}
}
Example #7
0
/*
 * Given an exclusively locked inode and chain we consolidate its chain
 * for hardlink creation, adding (nlinks) to the file's link count and
 * potentially relocating the inode to a directory common to ip->pip and tdip.
 *
 * Replaces (*chainp) if consolidation occurred, unlocking the old chain
 * and returning a new locked chain.
 *
 * NOTE!  This function will also replace ip->chain.
 */
int
hammer2_hardlink_consolidate(hammer2_trans_t *trans,
			     hammer2_inode_t *ip, hammer2_chain_t **chainp,
			     hammer2_inode_t *cdip, hammer2_chain_t **cdchainp,
			     int nlinks)
{
	hammer2_inode_data_t *ipdata;
	hammer2_chain_t *chain;
	hammer2_chain_t *nchain;
	int error;

	chain = *chainp;
	if (nlinks == 0 &&			/* no hardlink needed */
	    (chain->data->ipdata.name_key & HAMMER2_DIRHASH_VISIBLE)) {
		return (0);
	}
	if (hammer2_hardlink_enable < 0) {	/* fake hardlinks */
		return (0);
	}

	if (hammer2_hardlink_enable == 0) {	/* disallow hardlinks */
		hammer2_chain_unlock(chain);
		*chainp = NULL;
		return (ENOTSUP);
	}

	/*
	 * If no change in the hardlink's target directory is required and
	 * this is already a hardlink target, all we need to do is adjust
	 * the link count.
	 */
	if (cdip == ip->pip &&
	    (chain->data->ipdata.name_key & HAMMER2_DIRHASH_VISIBLE) == 0) {
		if (nlinks) {
			hammer2_chain_modify(trans, &chain, 0);
			chain->data->ipdata.nlinks += nlinks;
		}
		error = 0;
		goto done;
	}


	/*
	 * chain is the real inode.  If it's visible we have to convert it
	 * to a hardlink pointer.  If it is not visible then it is already
	 * a hardlink target and only needs to be deleted.
	 */
	KKASSERT((chain->flags & HAMMER2_CHAIN_DELETED) == 0);
	KKASSERT(chain->data->ipdata.type != HAMMER2_OBJTYPE_HARDLINK);
	if (chain->data->ipdata.name_key & HAMMER2_DIRHASH_VISIBLE) {
		/*
		 * We are going to duplicate chain later, causing its
		 * media block to be shifted to the duplicate.  Even though
		 * we are delete-duplicating nchain here it might decide not
		 * to reallocate the block.  Set FORCECOW to force it to.
		 */
		nchain = chain;
		hammer2_chain_lock(nchain, HAMMER2_RESOLVE_ALWAYS);
		atomic_set_int(&nchain->flags, HAMMER2_CHAIN_FORCECOW);
		hammer2_chain_delete_duplicate(trans, &nchain,
					       HAMMER2_DELDUP_RECORE);
		KKASSERT((chain->flags & HAMMER2_CHAIN_DUPLICATED) == 0);

		ipdata = &nchain->data->ipdata;
		ipdata->target_type = ipdata->type;
		ipdata->type = HAMMER2_OBJTYPE_HARDLINK;
		ipdata->uflags = 0;
		ipdata->rmajor = 0;
		ipdata->rminor = 0;
		ipdata->ctime = 0;
		ipdata->mtime = 0;
		ipdata->atime = 0;
		ipdata->btime = 0;
		bzero(&ipdata->uid, sizeof(ipdata->uid));
		bzero(&ipdata->gid, sizeof(ipdata->gid));
		ipdata->op_flags = HAMMER2_OPFLAG_DIRECTDATA;
		ipdata->cap_flags = 0;
		ipdata->mode = 0;
		ipdata->size = 0;
		ipdata->nlinks = 1;
		ipdata->iparent = 0;	/* XXX */
		ipdata->pfs_type = 0;
		ipdata->pfs_inum = 0;
		bzero(&ipdata->pfs_clid, sizeof(ipdata->pfs_clid));
		bzero(&ipdata->pfs_fsid, sizeof(ipdata->pfs_fsid));
		ipdata->data_quota = 0;
		ipdata->data_count = 0;
		ipdata->inode_quota = 0;
		ipdata->inode_count = 0;
		ipdata->attr_tid = 0;
		ipdata->dirent_tid = 0;
		bzero(&ipdata->u, sizeof(ipdata->u));
		/* XXX transaction ids */
	} else {
		hammer2_chain_delete(trans, chain, 0);
		nchain = NULL;
	}

	/*
	 * chain represents the hardlink target and is now flagged deleted.
	 * duplicate it to the parent directory and adjust nlinks.
	 *
	 * WARNING! The shiftup() call can cause nchain to be moved into
	 *	    an indirect block, and our nchain will wind up pointing
	 *	    to the older/original version.
	 */
	KKASSERT(chain->flags & HAMMER2_CHAIN_DELETED);
	hammer2_hardlink_shiftup(trans, &chain, cdip, cdchainp, nlinks, &error);

	if (error == 0)
		hammer2_inode_repoint(ip, cdip, chain);

	/*
	 * Unlock the original chain last as the lock blocked races against
	 * the creation of the new hardlink target.
	 */
	if (nchain)
		hammer2_chain_unlock(nchain);

done:
	/*
	 * Cleanup, chain/nchain already dealt with.
	 */
	*chainp = chain;
	hammer2_inode_drop(cdip);

	return (error);
}
Example #8
0
/*
 * Given an exclusively locked inode we consolidate its chain for hardlink
 * creation, adding (nlinks) to the file's link count and potentially
 * relocating the inode to a directory common to ip->pip and tdip.
 *
 * Replaces (*chainp) if consolidation occurred, unlocking the old chain
 * and returning a new locked chain.
 *
 * NOTE!  This function will also replace ip->chain.
 */
int
hammer2_hardlink_consolidate(hammer2_trans_t *trans, hammer2_inode_t *ip,
			     hammer2_chain_t **chainp,
			     hammer2_inode_t *tdip, int nlinks)
{
	hammer2_inode_data_t *ipdata;
	hammer2_inode_t *fdip;
	hammer2_inode_t *cdip;
	hammer2_chain_t *chain;
	hammer2_chain_t *nchain;
	int error;

	chain = *chainp;
	if (nlinks == 0 &&			/* no hardlink needed */
	    (chain->data->ipdata.name_key & HAMMER2_DIRHASH_VISIBLE)) {
		return (0);
	}
	if (hammer2_hardlink_enable < 0) {	/* fake hardlinks */
		return (0);
	}

	if (hammer2_hardlink_enable == 0) {	/* disallow hardlinks */
		hammer2_chain_unlock(chain);
		*chainp = NULL;
		return (ENOTSUP);
	}

	/*
	 * cdip will be returned with a ref, but not locked.
	 */
	fdip = ip->pip;
	cdip = hammer2_inode_common_parent(fdip, tdip);

	/*
	 * If no change in the hardlink's target directory is required and
	 * this is already a hardlink target, all we need to do is adjust
	 * the link count.
	 *
	 * XXX The common parent is a big wiggly due to duplication from
	 *     renames.  Compare the core (RBTREE) pointer instead of the
	 *     ip's.
	 */
	if (cdip == fdip &&
	    (chain->data->ipdata.name_key & HAMMER2_DIRHASH_VISIBLE) == 0) {
		if (nlinks) {
			hammer2_chain_modify(trans, &chain, 0);
			chain->data->ipdata.nlinks += nlinks;
		}
		error = 0;
		goto done;
	}

	/*
	 * We either have to move an existing hardlink target or we have
	 * to create a fresh hardlink target.
	 *
	 * Hardlink targets are hidden inodes in a parent directory common
	 * to all directory entries referencing the hardlink.
	 */
	nchain = hammer2_hardlink_shiftup(trans, &chain, cdip, &error);

	if (error == 0) {
		/*
		 * Bump nlinks on duplicated hidden inode, repoint
		 * ip->chain.
		 */
		hammer2_chain_modify(trans, &nchain, 0);
		nchain->data->ipdata.nlinks += nlinks;
		hammer2_inode_repoint(ip, cdip, nchain);

		/*
		 * If the old chain is not a hardlink target then replace
		 * it with a OBJTYPE_HARDLINK pointer.
		 *
		 * If the old chain IS a hardlink target then delete it.
		 */
		if (chain->data->ipdata.name_key & HAMMER2_DIRHASH_VISIBLE) {
			/*
			 * Replace original non-hardlink that's been dup'd
			 * with a special hardlink directory entry.  We must
			 * set the DIRECTDATA flag to prevent sub-chains
			 * from trying to synchronize to the inode if the
			 * file is extended afterwords.
			 */
			hammer2_chain_modify(trans, &chain, 0);
			hammer2_chain_delete_duplicate(trans, &chain,
						       HAMMER2_DELDUP_RECORE);
			ipdata = &chain->data->ipdata;
			ipdata->target_type = ipdata->type;
			ipdata->type = HAMMER2_OBJTYPE_HARDLINK;
			ipdata->uflags = 0;
			ipdata->rmajor = 0;
			ipdata->rminor = 0;
			ipdata->ctime = 0;
			ipdata->mtime = 0;
			ipdata->atime = 0;
			ipdata->btime = 0;
			bzero(&ipdata->uid, sizeof(ipdata->uid));
			bzero(&ipdata->gid, sizeof(ipdata->gid));
			ipdata->op_flags = HAMMER2_OPFLAG_DIRECTDATA;
			ipdata->cap_flags = 0;
			ipdata->mode = 0;
			ipdata->size = 0;
			ipdata->nlinks = 1;
			ipdata->iparent = 0;	/* XXX */
			ipdata->pfs_type = 0;
			ipdata->pfs_inum = 0;
			bzero(&ipdata->pfs_clid, sizeof(ipdata->pfs_clid));
			bzero(&ipdata->pfs_fsid, sizeof(ipdata->pfs_fsid));
			ipdata->data_quota = 0;
			ipdata->data_count = 0;
			ipdata->inode_quota = 0;
			ipdata->inode_count = 0;
			ipdata->attr_tid = 0;
			ipdata->dirent_tid = 0;
			bzero(&ipdata->u, sizeof(ipdata->u));
			/* XXX transaction ids */
		} else {
			hammer2_chain_delete(trans, chain);
		}

		/*
		 * Return the new chain.
		 */
		hammer2_chain_unlock(chain);
		chain = nchain;
	} else {
		/*
		 * Return an error
		 */
		hammer2_chain_unlock(chain);
		chain = NULL;
	}

	/*
	 * Cleanup, chain/nchain already dealt with.
	 */
done:
	*chainp = chain;
	hammer2_inode_drop(cdip);

	return (error);
}
Example #9
0
/*
 * XXX this API needs a rewrite.  It needs to be split into a
 * hammer2_inode_alloc() and hammer2_inode_build() to allow us to get
 * rid of the inode/chain lock reversal fudge.
 *
 * Returns the inode associated with the passed-in cluster, allocating a new
 * hammer2_inode structure if necessary, then synchronizing it to the passed
 * xop cluster.  When synchronizing, if idx >= 0, only cluster index (idx)
 * is synchronized.  Otherwise the whole cluster is synchronized.  inum will
 * be extracted from the passed-in xop and the inum argument will be ignored.
 *
 * If xop is passed as NULL then a new hammer2_inode is allocated with the
 * specified inum, and returned.   For normal inodes, the inode will be
 * indexed in memory and if it already exists the existing ip will be
 * returned instead of allocating a new one.  The superroot and PFS inodes
 * are not indexed in memory.
 *
 * The passed-in cluster must be locked and will remain locked on return.
 * The returned inode will be locked and the caller may dispose of both
 * via hammer2_inode_unlock() + hammer2_inode_drop().  However, if the caller
 * needs to resolve a hardlink it must ref/unlock/relock/drop the inode.
 *
 * The hammer2_inode structure regulates the interface between the high level
 * kernel VNOPS API and the filesystem backend (the chains).
 *
 * On return the inode is locked with the supplied cluster.
 */
hammer2_inode_t *
hammer2_inode_get(hammer2_pfs_t *pmp, hammer2_xop_head_t *xop,
		  hammer2_tid_t inum, int idx)
{
	hammer2_inode_t *nip;
	const hammer2_inode_data_t *iptmp;
	const hammer2_inode_data_t *nipdata;

	KKASSERT(xop == NULL ||
		 hammer2_cluster_type(&xop->cluster) ==
		 HAMMER2_BREF_TYPE_INODE);
	KKASSERT(pmp);

	/*
	 * Interlocked lookup/ref of the inode.  This code is only needed
	 * when looking up inodes with nlinks != 0 (TODO: optimize out
	 * otherwise and test for duplicates).
	 *
	 * Cluster can be NULL during the initial pfs allocation.
	 */
	if (xop) {
		iptmp = &hammer2_xop_gdata(xop)->ipdata;
		inum = iptmp->meta.inum;
		hammer2_xop_pdata(xop);
	}
again:
	nip = hammer2_inode_lookup(pmp, inum);
	if (nip) {
		/*
		 * We may have to unhold the cluster to avoid a deadlock
		 * against vnlru (and possibly other XOPs).
		 */
		if (xop) {
			if (hammer2_mtx_ex_try(&nip->lock) != 0) {
				hammer2_cluster_unhold(&xop->cluster);
				hammer2_mtx_ex(&nip->lock);
				hammer2_cluster_rehold(&xop->cluster);
			}
		} else {
			hammer2_mtx_ex(&nip->lock);
		}

		/*
		 * Handle SMP race (not applicable to the super-root spmp
		 * which can't index inodes due to duplicative inode numbers).
		 */
		if (pmp->spmp_hmp == NULL &&
		    (nip->flags & HAMMER2_INODE_ONRBTREE) == 0) {
			hammer2_mtx_unlock(&nip->lock);
			hammer2_inode_drop(nip);
			goto again;
		}
		if (xop) {
			if (idx >= 0)
				hammer2_inode_repoint_one(nip, &xop->cluster,
							  idx);
			else
				hammer2_inode_repoint(nip, NULL, &xop->cluster);
		}
		return nip;
	}

	/*
	 * We couldn't find the inode number, create a new inode and try to
	 * insert it, handle insertion races.
	 */
	nip = kmalloc(sizeof(*nip), pmp->minode, M_WAITOK | M_ZERO);
	spin_init(&nip->cluster_spin, "h2clspin");
	atomic_add_long(&pmp->inmem_inodes, 1);
	if (pmp->spmp_hmp)
		nip->flags = HAMMER2_INODE_SROOT;

	/*
	 * Initialize nip's cluster.  A cluster is provided for normal
	 * inodes but typically not for the super-root or PFS inodes.
	 */
	nip->cluster.refs = 1;
	nip->cluster.pmp = pmp;
	nip->cluster.flags |= HAMMER2_CLUSTER_INODE;
	if (xop) {
		nipdata = &hammer2_xop_gdata(xop)->ipdata;
		nip->meta = nipdata->meta;
		hammer2_xop_pdata(xop);
		atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);
		hammer2_inode_repoint(nip, NULL, &xop->cluster);
	} else {
		nip->meta.inum = inum;		/* PFS inum is always 1 XXX */
		/* mtime will be updated when a cluster is available */
		atomic_set_int(&nip->flags, HAMMER2_INODE_METAGOOD);	/*XXX*/
	}

	nip->pmp = pmp;

	/*
	 * ref and lock on nip gives it state compatible to after a
	 * hammer2_inode_lock() call.
	 */
	nip->refs = 1;
	hammer2_mtx_init(&nip->lock, "h2inode");
	hammer2_mtx_init(&nip->truncate_lock, "h2trunc");
	hammer2_mtx_ex(&nip->lock);
	TAILQ_INIT(&nip->depend_static.sideq);
	/* combination of thread lock and chain lock == inode lock */

	/*
	 * Attempt to add the inode.  If it fails we raced another inode
	 * get.  Undo all the work and try again.
	 */
	if (pmp->spmp_hmp == NULL) {
		hammer2_spin_ex(&pmp->inum_spin);
		if (RB_INSERT(hammer2_inode_tree, &pmp->inum_tree, nip)) {
			hammer2_spin_unex(&pmp->inum_spin);
			hammer2_mtx_unlock(&nip->lock);
			hammer2_inode_drop(nip);
			goto again;
		}
		atomic_set_int(&nip->flags, HAMMER2_INODE_ONRBTREE);
		++pmp->inum_count;
		hammer2_spin_unex(&pmp->inum_spin);
	}
	return (nip);
}
Example #10
0
/*
 * Create a new, normal inode.  This function will create the inode,
 * the media chains, but will not insert the chains onto the media topology
 * (doing so would require a flush transaction and cause long stalls).
 *
 * Caller must be in a normal transaction.
 */
hammer2_inode_t *
hammer2_inode_create_normal(hammer2_inode_t *pip,
			    struct vattr *vap, struct ucred *cred,
			    hammer2_key_t inum, int *errorp)
{
	hammer2_xop_create_t *xop;
	hammer2_inode_t *dip;
	hammer2_inode_t *nip;
	int error;
	uid_t xuid;
	uuid_t pip_uid;
	uuid_t pip_gid;
	uint32_t pip_mode;
	uint8_t pip_comp_algo;
	uint8_t pip_check_algo;
	hammer2_tid_t pip_inum;
	uint8_t type;

	dip = pip->pmp->iroot;
	KKASSERT(dip != NULL);

	*errorp = 0;

	/*hammer2_inode_lock(dip, 0);*/

	pip_uid = pip->meta.uid;
	pip_gid = pip->meta.gid;
	pip_mode = pip->meta.mode;
	pip_comp_algo = pip->meta.comp_algo;
	pip_check_algo = pip->meta.check_algo;
	pip_inum = (pip == pip->pmp->iroot) ? 1 : pip->meta.inum;

	/*
	 * Create the in-memory hammer2_inode structure for the specified
	 * inode.
	 */
	nip = hammer2_inode_get(dip->pmp, NULL, inum, -1);
	nip->comp_heuristic = 0;
	KKASSERT((nip->flags & HAMMER2_INODE_CREATING) == 0 &&
		 nip->cluster.nchains == 0);
	atomic_set_int(&nip->flags, HAMMER2_INODE_CREATING);

	/*
	 * Setup the inode meta-data
	 */
	nip->meta.type = hammer2_get_obj_type(vap->va_type);

	switch (nip->meta.type) {
	case HAMMER2_OBJTYPE_CDEV:
	case HAMMER2_OBJTYPE_BDEV:
		nip->meta.rmajor = vap->va_rmajor;
		nip->meta.rminor = vap->va_rminor;
		break;
	default:
		break;
	}
	type = nip->meta.type;

	KKASSERT(nip->meta.inum == inum);
	nip->meta.iparent = pip_inum;
	
	/* Inherit parent's inode compression mode. */
	nip->meta.comp_algo = pip_comp_algo;
	nip->meta.check_algo = pip_check_algo;
	nip->meta.version = HAMMER2_INODE_VERSION_ONE;
	hammer2_update_time(&nip->meta.ctime);
	nip->meta.mtime = nip->meta.ctime;
	nip->meta.mode = vap->va_mode;
	nip->meta.nlinks = 1;

	xuid = hammer2_to_unix_xid(&pip_uid);
	xuid = vop_helper_create_uid(dip->pmp->mp, pip_mode,
				     xuid, cred,
				     &vap->va_mode);
	if (vap->va_vaflags & VA_UID_UUID_VALID)
		nip->meta.uid = vap->va_uid_uuid;
	else if (vap->va_uid != (uid_t)VNOVAL)
		hammer2_guid_to_uuid(&nip->meta.uid, vap->va_uid);
	else
		hammer2_guid_to_uuid(&nip->meta.uid, xuid);

	if (vap->va_vaflags & VA_GID_UUID_VALID)
		nip->meta.gid = vap->va_gid_uuid;
	else if (vap->va_gid != (gid_t)VNOVAL)
		hammer2_guid_to_uuid(&nip->meta.gid, vap->va_gid);
	else
		nip->meta.gid = pip_gid;

	/*
	 * Regular files and softlinks allow a small amount of data to be
	 * directly embedded in the inode.  This flag will be cleared if
	 * the size is extended past the embedded limit.
	 */
	if (nip->meta.type == HAMMER2_OBJTYPE_REGFILE ||
	    nip->meta.type == HAMMER2_OBJTYPE_SOFTLINK) {
		nip->meta.op_flags |= HAMMER2_OPFLAG_DIRECTDATA;
	}

	/*
	 * Create the inode using (inum) as the key.  Pass pip for
	 * method inheritance.
	 */
	xop = hammer2_xop_alloc(pip, HAMMER2_XOP_MODIFYING);
	xop->lhc = inum;
	xop->flags = 0;
	xop->meta = nip->meta;
	KKASSERT(vap);

	xop->meta.name_len = hammer2_xop_setname_inum(&xop->head, inum);
	xop->meta.name_key = inum;
	nip->meta.name_len = xop->meta.name_len;
	nip->meta.name_key = xop->meta.name_key;
	hammer2_inode_modify(nip);

	/*
	 * Create the inode media chains but leave them detached.  We are
	 * not in a flush transaction so we can't mess with media topology
	 * above normal inodes (i.e. the index of the inodes themselves).
	 *
	 * We've already set the INODE_CREATING flag.  The inode's media
	 * chains will be inserted onto the media topology on the next
	 * filesystem sync.
	 */
	hammer2_xop_start(&xop->head, &hammer2_inode_create_det_desc);

	error = hammer2_xop_collect(&xop->head, 0);
#if INODE_DEBUG
	kprintf("create inode type %d error %d\n", nip->meta.type, error);
#endif

	if (error) {
		*errorp = error;
		goto done;
	}

	/*
	 * Associate the media chains created by the backend with the
	 * frontend inode.
	 */
	hammer2_inode_repoint(nip, NULL, &xop->head.cluster);
done:
	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
	/*hammer2_inode_unlock(dip);*/

	return (nip);
}