Esempio n. 1
0
/* 
 * This function provides information about total number of allocation blocks 
 * for each individual metadata file.
 */
static errno_t
hfs_fsinfo_metadata_blocks(struct hfsmount *hfsmp, struct hfs_fsinfo_metadata *fsinfo)
{
	int lockflags = 0;
	int ret_lockflags = 0;

	/* 
	 * Getting number of allocation blocks for all metadata files 
	 * should be a relatively quick operation, so we grab locks for all
	 * the btrees at the same time
	 */
	lockflags = SFL_CATALOG | SFL_EXTENTS | SFL_BITMAP | SFL_ATTRIBUTE;
	ret_lockflags = hfs_systemfile_lock(hfsmp, lockflags, HFS_SHARED_LOCK);

	/* Get information about all the btrees */
	fsinfo->extents    = hfsmp->hfs_extents_cp->c_datafork->ff_blocks;
	fsinfo->catalog    = hfsmp->hfs_catalog_cp->c_datafork->ff_blocks;
	fsinfo->allocation = hfsmp->hfs_allocation_cp->c_datafork->ff_blocks;
	if (hfsmp->hfs_attribute_cp)
		fsinfo->attribute  = hfsmp->hfs_attribute_cp->c_datafork->ff_blocks;
	else
		fsinfo->attribute = 0;

	/* Done with btrees, give up the locks */
	hfs_systemfile_unlock(hfsmp, ret_lockflags);

	/* Get information about journal file */
	fsinfo->journal = howmany(hfsmp->jnl_size, hfsmp->blockSize);

	return 0;
}
Esempio n. 2
0
/*
 * This function provides information about total number of extents (including 
 * extents from overflow extents btree, if any) for each individual metadata 
 * file.
 */
static errno_t
hfs_fsinfo_metadata_extents(struct hfsmount *hfsmp, struct hfs_fsinfo_metadata *fsinfo)
{
	int error = 0;
	int lockflags = 0;
	int ret_lockflags = 0;
	uint32_t overflow_count;

	/*
	 * Counting the number of extents for all metadata files should
	 * be a relatively quick operation, so we grab locks for all the
	 * btrees at the same time
	 */
	lockflags = SFL_CATALOG | SFL_EXTENTS | SFL_BITMAP | SFL_ATTRIBUTE;
	ret_lockflags = hfs_systemfile_lock(hfsmp, lockflags, HFS_SHARED_LOCK);

	/* Get number of extents for extents overflow btree */
	fsinfo->extents = hfs_count_extents_fp(hfsmp->hfs_extents_cp->c_datafork);

	/* Get number of extents for catalog btree */
	fsinfo->catalog = hfs_count_extents_fp(hfsmp->hfs_catalog_cp->c_datafork);
	if (fsinfo->catalog >= kHFSPlusExtentDensity) {
		error = hfs_count_overflow_extents(hfsmp, kHFSCatalogFileID, &overflow_count);
		if (error) {
			goto out;
		}
		fsinfo->catalog += overflow_count;
	}

	/* Get number of extents for allocation file */
	fsinfo->allocation = hfs_count_extents_fp(hfsmp->hfs_allocation_cp->c_datafork);
	if (fsinfo->allocation >= kHFSPlusExtentDensity) {
		error = hfs_count_overflow_extents(hfsmp, kHFSAllocationFileID, &overflow_count);
		if (error) {
			goto out;
		}
		fsinfo->allocation += overflow_count;
	}

	/*
	 * Get number of extents for attribute btree.
	 *	hfs_attribute_cp might be NULL.
	 */
	if (hfsmp->hfs_attribute_cp) {
		fsinfo->attribute = hfs_count_extents_fp(hfsmp->hfs_attribute_cp->c_datafork);
		if (fsinfo->attribute >= kHFSPlusExtentDensity) {
			error = hfs_count_overflow_extents(hfsmp, kHFSAttributesFileID, &overflow_count);
			if (error) {
				goto out;
			}
			fsinfo->attribute += overflow_count;
		}
	}
	/* Journal always has one extent */
	fsinfo->journal = 1;
out:
	hfs_systemfile_unlock(hfsmp, ret_lockflags);
	return error;
}
Esempio n. 3
0
/*
 * This function provides percentage of free nodes vs total nodes for each 
 * individual metadata btrees, i.e. for catalog, overflow extents and 
 * attributes btree.  This information is not applicable for allocation 
 * file and journal file.
 */
static errno_t
hfs_fsinfo_metadata_percentfree(struct hfsmount *hfsmp, struct hfs_fsinfo_metadata *fsinfo)
{
	int lockflags = 0;
	int ret_lockflags = 0;
	BTreeControlBlockPtr btreePtr;
	uint32_t free_nodes, total_nodes;

	/*
	 * Getting total and used nodes for all metadata btrees should 
	 * be a relatively quick operation, so we grab locks for all the
	 * btrees at the same time
	 */
	lockflags = SFL_CATALOG | SFL_EXTENTS | SFL_BITMAP | SFL_ATTRIBUTE;
	ret_lockflags = hfs_systemfile_lock(hfsmp, lockflags, HFS_SHARED_LOCK);
	
	/* Overflow extents btree */
	btreePtr = VTOF(hfsmp->hfs_extents_vp)->fcbBTCBPtr;
	total_nodes = btreePtr->totalNodes;
	free_nodes = btreePtr->freeNodes;
	fsinfo->extents = hfs_percent(free_nodes, total_nodes);

	/* Catalog btree */
	btreePtr = VTOF(hfsmp->hfs_catalog_vp)->fcbBTCBPtr;
	total_nodes = btreePtr->totalNodes;
	free_nodes = btreePtr->freeNodes;
	fsinfo->catalog = hfs_percent(free_nodes, total_nodes);

	/* Attributes btree */
	if (hfsmp->hfs_attribute_vp) {
		btreePtr = VTOF(hfsmp->hfs_attribute_vp)->fcbBTCBPtr;
		total_nodes = btreePtr->totalNodes;
		free_nodes = btreePtr->freeNodes;
		fsinfo->attribute = hfs_percent(free_nodes, total_nodes);
	}

	hfs_systemfile_unlock(hfsmp, ret_lockflags);
	return 0;
}
Esempio n. 4
0
/* 
 * Function to traverse all the records of a btree and then call caller-provided 
 * callback function for every record found.  The type of btree is chosen based 
 * on the fileID provided by the caller.  This fuction grabs the correct locks 
 * depending on the type of btree it will be traversing and flags provided 
 * by the caller.
 *
 * Note: It might drop and reacquire the locks during execution.
 */
static errno_t
traverse_btree(struct hfsmount *hfsmp, uint32_t btree_fileID, traverse_btree_flag_t flags,
			   void *fsinfo, int (*callback)(struct hfsmount *, HFSPlusKey *, HFSPlusRecord *, void *))
{
	int error = 0;
	int lockflags = 0;
	int ret_lockflags = 0;
	FCB *fcb;
	struct BTreeIterator *iterator = NULL;
	struct FSBufferDescriptor btdata;
	int btree_operation;
	HFSPlusRecord record;
	HFSPlusKey *key;
	uint64_t start, timeout_abs;

	switch(btree_fileID) {
		case kHFSExtentsFileID: 
			fcb = VTOF(hfsmp->hfs_extents_vp);
			lockflags = SFL_EXTENTS;
			break;
		case kHFSCatalogFileID:
			fcb = VTOF(hfsmp->hfs_catalog_vp);
			lockflags = SFL_CATALOG;
			break;
		case kHFSAttributesFileID:
			// Attributes file doesn’t exist, There are no records to iterate.
			if (hfsmp->hfs_attribute_vp == NULL)
				return error;
			fcb = VTOF(hfsmp->hfs_attribute_vp);
			lockflags = SFL_ATTRIBUTE;
			break;

		default:
			return EINVAL;
	}

	MALLOC(iterator, struct BTreeIterator *, sizeof(struct BTreeIterator), M_TEMP, M_WAITOK | M_ZERO);

	/* The key is initialized to zero because we are traversing entire btree */
	key = (HFSPlusKey *)&iterator->key;

	if (flags & TRAVERSE_BTREE_EXTENTS) {
		lockflags |= SFL_EXTENTS;
	}

	btdata.bufferAddress = &record;
	btdata.itemSize = sizeof(HFSPlusRecord);
	btdata.itemCount = 1;

	/* Lock btree for duration of traversal */
	ret_lockflags = hfs_systemfile_lock(hfsmp, lockflags, HFS_SHARED_LOCK);
	btree_operation = kBTreeFirstRecord;

	nanoseconds_to_absolutetime(HFS_FSINFO_MAX_LOCKHELD_TIME, &timeout_abs);
	start = mach_absolute_time();

	while (1) {

		if (msleep(NULL, NULL, PINOD | PCATCH,
				   "hfs_fsinfo", NULL) == EINTR) {
			error = EINTR;
			break;
		}

		error = BTIterateRecord(fcb, btree_operation, iterator, &btdata, NULL);
		if (error != 0) {
			if (error == fsBTRecordNotFoundErr || error == fsBTEndOfIterationErr) {
				error = 0;
			}
			break;
		}
		/* Lookup next btree record on next call to BTIterateRecord() */
		btree_operation = kBTreeNextRecord;

		/* Call our callback function and stop iteration if there are any errors */
		error = callback(hfsmp, key, &record, fsinfo);
		if (error) {
			break;
		}

		/* let someone else use the tree after we've processed over HFS_FSINFO_MAX_LOCKHELD_TIME */
		if ((mach_absolute_time() - start) >= timeout_abs) {

			/* release b-tree locks and let someone else get the lock */
			hfs_systemfile_unlock (hfsmp, ret_lockflags);

			/* add tsleep here to force context switch and fairness */
			tsleep((caddr_t)hfsmp, PRIBIO, "hfs_fsinfo", 1);

			/*
			 * re-acquire the locks in the same way that we wanted them originally.
			 * note: it is subtle but worth pointing out that in between the time that we
			 * released and now want to re-acquire these locks that the b-trees may have shifted
			 * slightly but significantly. For example, the catalog or other b-tree could have grown
			 * past 8 extents and now requires the extents lock to be held in order to be safely
			 * manipulated. We can't be sure of the state of the b-tree from where we last left off.
			 */

			ret_lockflags = hfs_systemfile_lock (hfsmp, lockflags, HFS_SHARED_LOCK);

			/*
			 * It's highly likely that the search key we stashed away before dropping lock
			 * no longer points to an existing item.  Iterator's IterateRecord is able to
			 * re-position itself and process the next record correctly.  With lock dropped,
			 * there might be records missed for statistic gathering, which is ok. The
			 * point is to get aggregate values.
			 */

			start = mach_absolute_time();

			/* loop back around and get another record */
		}
	}

	hfs_systemfile_unlock(hfsmp, ret_lockflags);
	FREE (iterator, M_TEMP);
	return MacToVFSError(error);
}
Esempio n. 5
0
int
hfs_vnop_lookup(struct vnop_lookup_args *ap)
{
	struct vnode *dvp = ap->a_dvp;
	struct vnode *vp;
	struct cnode *cp;
	struct cnode *dcp;
	struct hfsmount *hfsmp;
	int error;
	struct vnode **vpp = ap->a_vpp;
	struct componentname *cnp = ap->a_cnp;
	struct proc *p = vfs_context_proc(ap->a_context);
	int flags = cnp->cn_flags;
	int force_casesensitive_lookup = proc_is_forcing_hfs_case_sensitivity(p);
	int cnode_locked;

	*vpp = NULL;
	dcp = VTOC(dvp);
	
	hfsmp = VTOHFS(dvp);

	/*
	 * Lookup an entry in the cache
	 *
	 * If the lookup succeeds, the vnode is returned in *vpp,
	 * and a status of -1 is returned.
	 *
	 * If the lookup determines that the name does not exist
	 * (negative cacheing), a status of ENOENT is returned.
	 *
	 * If the lookup fails, a status of zero is returned.
	 */
	error = cache_lookup(dvp, vpp, cnp);
	if (error != -1) {
		if ((error == ENOENT) && (cnp->cn_nameiop != CREATE))		
			goto exit;	/* found a negative cache entry */
		goto lookup;		/* did not find it in the cache */
	}
	/*
	 * We have a name that matched
	 * cache_lookup returns the vp with an iocount reference already taken
	 */
	error = 0;
	vp = *vpp;
	cp = VTOC(vp);
	
	/* We aren't allowed to vend out vp's via lookup to the hidden directory */
	if (cp->c_cnid == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid ||
		cp->c_cnid == hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid) {
		/* Drop the iocount from cache_lookup */
		vnode_put (vp);
		error = ENOENT;
		goto exit;
	}
	
	
	/*
	 * If this is a hard-link vnode then we need to update
	 * the name (of the link), the parent ID, the cnid, the
	 * text encoding and the catalog hint.  This enables
	 * getattrlist calls to return the correct link info.
	 */

	/*
	 * Alternatively, if we are forcing a case-sensitive lookup
	 * on a case-insensitive volume, the namecache entry
	 * may have been for an incorrect case. Since we cannot
	 * determine case vs. normalization, redrive the catalog
	 * lookup based on any byte mismatch.
	 */
	if (((flags & ISLASTCN) && (cp->c_flag & C_HARDLINK))
		|| (force_casesensitive_lookup && !(hfsmp->hfs_flags & HFS_CASE_SENSITIVE))) {
		int stale_link = 0;

		hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);	
		if ((cp->c_parentcnid != dcp->c_cnid) ||
		    (cnp->cn_namelen != cp->c_desc.cd_namelen) ||
		    (bcmp(cnp->cn_nameptr, cp->c_desc.cd_nameptr, cp->c_desc.cd_namelen) != 0)) {
			struct cat_desc desc;
			struct cat_attr lookup_attr;
			int lockflags;

			if (force_casesensitive_lookup && !(hfsmp->hfs_flags & HFS_CASE_SENSITIVE)) {
				/*
				 * Since the name in the cnode doesn't match our lookup
				 * string exactly, do a full lookup.
				 */
				hfs_unlock (cp);

				vnode_put(vp);
				goto lookup;
			}

			/*
			 * Get an updated descriptor
			 */
			desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
			desc.cd_namelen = cnp->cn_namelen;
			desc.cd_parentcnid = dcp->c_fileid;
			desc.cd_hint = dcp->c_childhint;
			desc.cd_encoding = 0;
			desc.cd_cnid = 0;
			desc.cd_flags = S_ISDIR(cp->c_mode) ? CD_ISDIR : 0;

			/*
			 * Because lookups call replace_desc to put a new descriptor in
			 * the cnode we are modifying it is possible that this cnode's 
			 * descriptor is out of date for the parent ID / name that
			 * we are trying to look up. (It may point to a different hardlink).
			 *
			 * We need to be cautious that when re-supplying the 
			 * descriptor below that the results of the catalog lookup
			 * still point to the same raw inode for the hardlink.  This would 
			 * not be the case if we found something in the cache above but 
			 * the vnode it returned no longer has a valid hardlink for the 
			 * parent ID/filename combo we are requesting.  (This is because 
			 * hfs_unlink does not directly trigger namecache removal). 
			 *
			 * As a result, before vending out the vnode (and replacing
			 * its descriptor) verify that the fileID is the same by comparing
			 * the in-cnode attributes vs. the one returned from the lookup call
			 * below.  If they do not match, treat this lookup as if we never hit
			 * in the cache at all.
			 */

			lockflags = hfs_systemfile_lock(VTOHFS(dvp), SFL_CATALOG, HFS_SHARED_LOCK);		
		
			error = cat_lookup(VTOHFS(vp), &desc, 0, 0, &desc, &lookup_attr, NULL, NULL);	
			
			hfs_systemfile_unlock(VTOHFS(dvp), lockflags);

			/* 
			 * Note that cat_lookup may fail to find something with the name provided in the
			 * stack-based descriptor above. In that case, an ENOENT is a legitimate errno
			 * to be placed in error, which will get returned in the fastpath below.
			 */
			if (error == 0) {
				if (lookup_attr.ca_fileid == cp->c_attr.ca_fileid) {
					/* It still points to the right raw inode.  Replacing the descriptor is fine */
					replace_desc (cp, &desc);

					/* 
					 * Save the origin info for file and directory hardlinks.  Directory hardlinks 
					 * need the origin for '..' lookups, and file hardlinks need it to ensure that 
					 * competing lookups do not cause us to vend different hardlinks than the ones requested.
					 * We want to restrict saving the cache entries to LOOKUP namei operations, since
					 * we're really doing this to protect getattr.
					 */
					if (cnp->cn_nameiop == LOOKUP) {
						hfs_savelinkorigin(cp, dcp->c_fileid);
					}
				}
				else {
					/* If the fileID does not match then do NOT replace the descriptor! */
					stale_link = 1;
				}	
			}
		}
		hfs_unlock (cp);
		
		if (stale_link) {
			/* 
			 * If we had a stale_link, then we need to pretend as though
			 * we never found this vnode and force a lookup through the 
			 * traditional path.  Drop the iocount acquired through 
			 * cache_lookup above and force a cat lookup / getnewvnode
			 */
			vnode_put(vp);
			goto lookup;
		}
		
		if (error) {
			/* 
			 * If the cat_lookup failed then the caller will not expect 
			 * a vnode with an iocount on it.
			 */
			vnode_put(vp);
		}

	}	
	goto exit;
	
lookup:
	/*
	 * The vnode was not in the name cache or it was stale.
	 *
	 * So we need to do a real lookup.
	 */
	cnode_locked = 0;

	error = hfs_lookup(dvp, vpp, cnp, &cnode_locked, force_casesensitive_lookup);
	
	if (cnode_locked)
		hfs_unlock(VTOC(*vpp));
exit:
	{
	uthread_t ut = (struct uthread *)get_bsdthread_info(current_thread());

	/*
	 * check to see if we issued any I/O while completing this lookup and
	 * this thread/task is throttleable... if so, throttle now
	 *
	 * this allows us to throttle in between multiple meta data reads that
	 * might result due to looking up a long pathname (since we'll have to
	 * re-enter hfs_vnop_lookup for each component of the pathnam not in
	 * the VFS cache), instead of waiting until the entire path lookup has
	 * completed and throttling at the systemcall return
	 */
	if (__improbable(ut->uu_lowpri_window)) {
		throttle_lowpri_io(1);
	}
	}

	return (error);
}
Esempio n. 6
0
/*	
 *	Lookup *cnp in directory *dvp, return it in *vpp.
 *	**vpp is held on exit.
 *	We create a cnode for the file, but we do NOT open the file here.

#% lookup	dvp L ? ?
#% lookup	vpp - L -

	IN struct vnode *dvp - Parent node of file;
	INOUT struct vnode **vpp - node of target file, its a new node if
		the target vnode did not exist;
	IN struct componentname *cnp - Name of file;

 *	When should we lock parent_hp in here ??
 */
static int
hfs_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, int *cnode_locked, int force_casesensitive_lookup)
{
	struct cnode *dcp;	/* cnode for directory being searched */
	struct vnode *tvp;	/* target vnode */
	struct hfsmount *hfsmp;
	int flags;
	int nameiop;
	int retval = 0;
	int isDot;
	struct cat_desc desc;
	struct cat_desc cndesc;
	struct cat_attr attr;
	struct cat_fork fork;
	int lockflags;
	int newvnode_flags;

  retry:
	newvnode_flags = 0;
	dcp = NULL;
	hfsmp = VTOHFS(dvp);
	*vpp = NULL;
	*cnode_locked = 0;
	isDot = FALSE;
	tvp = NULL;
	nameiop = cnp->cn_nameiop;
	flags = cnp->cn_flags;
	bzero(&desc, sizeof(desc));

	/*
	 * First check to see if it is a . or .., else look it up.
	 */
	if (flags & ISDOTDOT) {		/* Wanting the parent */
		cnp->cn_flags &= ~MAKEENTRY;
		goto found;	/* .. is always defined */
	} else if ((cnp->cn_nameptr[0] == '.') && (cnp->cn_namelen == 1)) {
		isDot = TRUE;
		cnp->cn_flags &= ~MAKEENTRY;
		goto found;	/* We always know who we are */
	} else {
		if (hfs_lock(VTOC(dvp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) != 0) {
			retval = ENOENT;  /* The parent no longer exists ? */
			goto exit;
		}
		dcp = VTOC(dvp);

		if (dcp->c_flag & C_DIR_MODIFICATION) {
		    // XXXdbg - if we could msleep on a lck_rw_t then we would do that
		    //          but since we can't we have to unlock, delay for a bit
		    //          and then retry...
		    // msleep((caddr_t)&dcp->c_flag, &dcp->c_rwlock, PINOD, "hfs_vnop_lookup", 0);
		    hfs_unlock(dcp);
		    tsleep((caddr_t)dvp, PRIBIO, "hfs_lookup", 1);

		    goto retry;
		}


		/*
		 * We shouldn't need to go to the catalog if there are no children.
		 * However, in the face of a minor disk corruption where the valence of
		 * the directory is off, we could infinite loop here if we return ENOENT
		 * even though there are actually items in the directory.  (create will
		 * see the ENOENT, try to create something, which will return with 
		 * EEXIST over and over again).  As a result, always check the catalog.
		 */

		bzero(&cndesc, sizeof(cndesc));
		cndesc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
		cndesc.cd_namelen = cnp->cn_namelen;
		cndesc.cd_parentcnid = dcp->c_fileid;
		cndesc.cd_hint = dcp->c_childhint;

		lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);

		retval = cat_lookup(hfsmp, &cndesc, 0, force_casesensitive_lookup, &desc, &attr, &fork, NULL);
		
		hfs_systemfile_unlock(hfsmp, lockflags);

		if (retval == 0) {
			dcp->c_childhint = desc.cd_hint;
			/*
			 * Note: We must drop the parent lock here before calling
			 * hfs_getnewvnode (which takes the child lock).
			 */
			hfs_unlock(dcp);
			dcp = NULL;
			
			/* Verify that the item just looked up isn't one of the hidden directories. */
			if (desc.cd_cnid == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid ||
				desc.cd_cnid == hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid) {
				retval = ENOENT;
				goto exit;
			}
			
			goto found;
		}
		
		/*
		 * ENAMETOOLONG supersedes other errors
		 *
		 * For a CREATE or RENAME operation on the last component
		 * the ENAMETOOLONG will be handled in the next VNOP.
		 */
		if ((retval != ENAMETOOLONG) && 
		    (cnp->cn_namelen > kHFSPlusMaxFileNameChars) &&
		    (((flags & ISLASTCN) == 0) || ((nameiop != CREATE) && (nameiop != RENAME)))) {
			retval = ENAMETOOLONG;
		} else if (retval == 0) {
			retval = ENOENT;
		} else if (retval == ERESERVEDNAME) {
			/*
			 * We found the name in the catalog, but it is unavailable
			 * to us. The exact error to return to our caller depends
			 * on the operation, and whether we've already reached the
			 * last path component. In all cases, avoid a negative
			 * cache entry, since someone else may be able to access
			 * the name if their lookup is configured differently.
			 */

			cnp->cn_flags &= ~MAKEENTRY;

			if (((flags & ISLASTCN) == 0) || ((nameiop == LOOKUP) || (nameiop == DELETE))) {
				/* A reserved name for a pure lookup is the same as the path not being present */
				retval = ENOENT;
			} else {
				/* A reserved name with intent to create must be rejected as impossible */
				retval = EEXIST;
			}
		}
		if (retval != ENOENT)
			goto exit;
		/*
		 * This is a non-existing entry
		 *
		 * If creating, and at end of pathname and current
		 * directory has not been removed, then can consider
		 * allowing file to be created.
		 */
		if ((nameiop == CREATE || nameiop == RENAME ||
		    (nameiop == DELETE &&
		    (cnp->cn_flags & DOWHITEOUT) &&
		    (cnp->cn_flags & ISWHITEOUT))) &&
		    (flags & ISLASTCN) &&
		    !(ISSET(dcp->c_flag, C_DELETED | C_NOEXISTS))) {
			retval = EJUSTRETURN;
			goto exit;
		}
		/*
		 * Insert name into the name cache (as non-existent).
		 */
		if ((hfsmp->hfs_flags & HFS_STANDARD) == 0 &&
		    (cnp->cn_flags & MAKEENTRY) &&
		    (nameiop != CREATE)) {
			cache_enter(dvp, NULL, cnp);
			dcp->c_flag |= C_NEG_ENTRIES;
		}
		goto exit;
	}

found:
	if (flags & ISLASTCN) {
		switch(nameiop) {
		case DELETE:
			cnp->cn_flags &= ~MAKEENTRY;
			break;

		case RENAME:
			cnp->cn_flags &= ~MAKEENTRY;
			if (isDot) {
				retval = EISDIR;
				goto exit;
			}
			break;
		}
	}

	if (isDot) {
		if ((retval = vnode_get(dvp)))
			goto exit;
		*vpp = dvp;
	} else if (flags & ISDOTDOT) {
		/*
		 * Directory hard links can have multiple parents so
		 * find the appropriate parent for the current thread.
		 */
		if ((retval = hfs_vget(hfsmp, hfs_currentparent(VTOC(dvp)), &tvp, 0, 0))) {
			goto exit;
		}
		*cnode_locked = 1;
		*vpp = tvp;
	} else {
		int type = (attr.ca_mode & S_IFMT);

		if (!(flags & ISLASTCN) && (type != S_IFDIR) && (type != S_IFLNK)) {
			retval = ENOTDIR;
			goto exit;
		}
		/* Don't cache directory hardlink names. */
		if (attr.ca_recflags & kHFSHasLinkChainMask) {
			cnp->cn_flags &= ~MAKEENTRY;
		}
		/* Names with composed chars are not cached. */
		if (cnp->cn_namelen != desc.cd_namelen)
			cnp->cn_flags &= ~MAKEENTRY;

		retval = hfs_getnewvnode(hfsmp, dvp, cnp, &desc, 0, &attr, &fork, &tvp, &newvnode_flags);

		if (retval) {
			/*
			 * If this was a create/rename operation lookup, then by this point
			 * we expected to see the item returned from hfs_getnewvnode above.  
			 * In the create case, it would probably eventually bubble out an EEXIST 
			 * because the item existed when we were trying to create it.  In the 
			 * rename case, it would let us know that we need to go ahead and 
			 * delete it as part of the rename.  However, if we hit the condition below
			 * then it means that we found the element during cat_lookup above, but 
			 * it is now no longer there.  We simply behave as though we never found
			 * the element at all and return EJUSTRETURN.
			 */  
			if ((retval == ENOENT) &&
					((cnp->cn_nameiop == CREATE) || (cnp->cn_nameiop == RENAME)) &&
					(flags & ISLASTCN)) {
				retval = EJUSTRETURN;
			}
			
			/*
			 * If this was a straight lookup operation, we may need to redrive the entire 
			 * lookup starting from cat_lookup if the element was deleted as the result of 
			 * a rename operation.  Since rename is supposed to guarantee atomicity, then
			 * lookups cannot fail because the underlying element is deleted as a result of
			 * the rename call -- either they returned the looked up element prior to rename
			 * or return the newer element.  If we are in this region, then all we can do is add
			 * workarounds to guarantee the latter case. The element has already been deleted, so
			 * we just re-try the lookup to ensure the caller gets the most recent element.
			 */
			if ((retval == ENOENT) && (cnp->cn_nameiop == LOOKUP) &&
				(newvnode_flags & (GNV_CHASH_RENAMED | GNV_CAT_DELETED))) {
				if (dcp) {
					hfs_unlock (dcp);
				}
				/* get rid of any name buffers that may have lingered from the cat_lookup call */
				cat_releasedesc (&desc);
				goto retry;
			}

			/* Also, re-drive the lookup if the item we looked up was a hardlink, and the number 
			 * or name of hardlinks has changed in the interim between the cat_lookup above, and
			 * our call to hfs_getnewvnode.  hfs_getnewvnode will validate the cattr we passed it
			 * against what is actually in the catalog after the cnode is created.  If there were
			 * any issues, it will bubble out ERECYCLE, which we need to swallow and use as the
			 * key to redrive as well.  We need to special case this below because in this case, 
			 * it needs to occur regardless of the type of lookup we're doing here.  
			 */
			if ((retval == ERECYCLE) && (newvnode_flags & GNV_CAT_ATTRCHANGED)) {
				if (dcp) {
					hfs_unlock (dcp);
				}
				/* get rid of any name buffers that may have lingered from the cat_lookup call */
				cat_releasedesc (&desc);
				retval = 0;
				goto retry;
			}

			/* skip to the error-handling code if we can't retry */
			goto exit;
		}

		/* 
		 * Save the origin info for file and directory hardlinks.  Directory hardlinks 
		 * need the origin for '..' lookups, and file hardlinks need it to ensure that 
		 * competing lookups do not cause us to vend different hardlinks than the ones requested.
		 * We want to restrict saving the cache entries to LOOKUP namei operations, since
		 * we're really doing this to protect getattr.
		 */
		if ((nameiop == LOOKUP) && (VTOC(tvp)->c_flag & C_HARDLINK)) {
			hfs_savelinkorigin(VTOC(tvp), VTOC(dvp)->c_fileid);
		}
		*cnode_locked = 1;
		*vpp = tvp;
	}
exit:
	if (dcp) {
		hfs_unlock(dcp);
	}
	cat_releasedesc(&desc);
	return (retval);
}