示例#1
0
static int mdevrw(dev_t dev, struct uio *uio, __unused int ioflag) {
	int 			status;
	addr64_t		mdata;
	int 			devid;
	enum uio_seg 	saveflag;

	devid = minor(dev);									/* Get minor device number */

	if (devid > 16) return (ENXIO);						/* Not valid */
	if (!(mdev[devid].mdFlags & mdInited))  return (ENXIO);	/* Have we actually been defined yet? */

	mdata = ((addr64_t)mdev[devid].mdBase << 12) + uio->uio_offset;	/* Point to the area in "file" */
	
	saveflag = uio->uio_segflg;							/* Remember what the request is */
#if LP64_DEBUG
	if (IS_VALID_UIO_SEGFLG(uio->uio_segflg) == 0) {
	  panic("mdevrw - invalid uio_segflg\n"); 
	}
#endif /* LP64_DEBUG */
	/* Make sure we are moving from physical ram if physical device */
	if (mdev[devid].mdFlags & mdPhys) {
		if (uio->uio_segflg == UIO_USERSPACE64) 
			uio->uio_segflg = UIO_PHYS_USERSPACE64;	
		else if (uio->uio_segflg == UIO_USERSPACE32)
			uio->uio_segflg = UIO_PHYS_USERSPACE32;	
		else
			uio->uio_segflg = UIO_PHYS_USERSPACE;	
	}
	status = uiomove64(mdata, uio_resid(uio), uio);		/* Move the data */
	uio->uio_segflg = saveflag;							/* Restore the flag */

	return (status);
}
示例#2
0
/*
 * uio_reset - reset an uio_t.
 * 	Reset the given uio_t to initial values.  The uio_t is not fully initialized
 * 	until all iovecs are added using uio_addiov calls.
 *	The a_iovcount value passed in the uio_create is the maximum number of 
 *	iovecs you may add.
 */
void uio_reset( uio_t a_uio,
				off_t a_offset,			/* current offset */
				int a_spacetype,		/* type of address space */
				int a_iodirection )		/* read or write flag */
{
	vm_size_t	my_size;
	int			my_max_iovs;
	u_int32_t	my_old_flags;
	
#if LP64_DEBUG
	if (a_uio == NULL) {
		panic("%s :%d - could not allocate uio_t\n", __FILE__, __LINE__); 
	}
	if (!IS_VALID_UIO_SEGFLG(a_spacetype)) {
		panic("%s :%d - invalid address space type\n", __FILE__, __LINE__); 
	}
	if (!(a_iodirection == UIO_READ || a_iodirection == UIO_WRITE)) {
		panic("%s :%d - invalid IO direction flag\n", __FILE__, __LINE__); 
	}
#endif /* LP64_DEBUG */

	if (a_uio == NULL) {
		return;
	}

	my_size = a_uio->uio_size;
	my_old_flags = a_uio->uio_flags;
	my_max_iovs = a_uio->uio_max_iovs;
	bzero(a_uio, my_size);
	a_uio->uio_size = my_size;
	a_uio->uio_segflg = a_spacetype;
	if (my_max_iovs > 0) {
		a_uio->uio_iovs.uiovp = (struct user_iovec *)
			(((uint8_t *)a_uio) + sizeof(struct uio));
	}
	else {
		a_uio->uio_iovs.uiovp = NULL;
	}
	a_uio->uio_max_iovs = my_max_iovs;
	a_uio->uio_offset = a_offset;
	a_uio->uio_rw = a_iodirection;
	a_uio->uio_flags = my_old_flags;

	return;
}
示例#3
0
/*
 * uio_createwithbuffer - create an uio_t.
 * 	Create a uio_t using the given buffer.  The uio_t
 *	is not fully initialized until all iovecs are added using uio_addiov calls.
 *	a_iovcount is the maximum number of iovecs you may add.
 *	This call may fail if the given buffer is not large enough.
 */
__private_extern__ uio_t 
	uio_createwithbuffer( int a_iovcount,		/* number of iovecs */
				  			off_t a_offset,		/* current offset */
				  			int a_spacetype,	/* type of address space */
				 			int a_iodirection,	/* read or write flag */
				 			void *a_buf_p,		/* pointer to a uio_t buffer */
				 			int a_buffer_size )	/* size of uio_t buffer */
{
	uio_t				my_uio = (uio_t) a_buf_p;
	int					my_size;
	
	my_size = sizeof(struct uio) + (sizeof(struct user_iovec) * a_iovcount);
	if (a_buffer_size < my_size) {
#if DEBUG
		panic("%s :%d - a_buffer_size is too small\n", __FILE__, __LINE__); 
#endif /* DEBUG */
		return( NULL );
	}
	my_size = a_buffer_size;
	
#if DEBUG
	if (my_uio == 0) {
		panic("%s :%d - could not allocate uio_t\n", __FILE__, __LINE__); 
	}
	if (!IS_VALID_UIO_SEGFLG(a_spacetype)) {
		panic("%s :%d - invalid address space type\n", __FILE__, __LINE__); 
	}
	if (!(a_iodirection == UIO_READ || a_iodirection == UIO_WRITE)) {
		panic("%s :%d - invalid IO direction flag\n", __FILE__, __LINE__); 
	}
	if (a_iovcount > UIO_MAXIOV) {
		panic("%s :%d - invalid a_iovcount\n", __FILE__, __LINE__); 
	}
#endif /* DEBUG */

	bzero(my_uio, my_size);
	my_uio->uio_size = my_size;

	/* we use uio_segflg to indicate if the uio_t is the new format or */
	/* old (pre LP64 support) legacy format */
	switch (a_spacetype) {
	case UIO_USERSPACE:
		my_uio->uio_segflg = UIO_USERSPACE32;
	case UIO_SYSSPACE:
		my_uio->uio_segflg = UIO_SYSSPACE32;
	case UIO_PHYS_USERSPACE:
		my_uio->uio_segflg = UIO_PHYS_USERSPACE32;
	case UIO_PHYS_SYSSPACE:
		my_uio->uio_segflg = UIO_PHYS_SYSSPACE32;
	default:
		my_uio->uio_segflg = a_spacetype;
		break;
	}

	if (a_iovcount > 0) {
		my_uio->uio_iovs.uiovp = (struct user_iovec *)
			(((uint8_t *)my_uio) + sizeof(struct uio));
	}
	else {
		my_uio->uio_iovs.uiovp = NULL;
	}

	my_uio->uio_max_iovs = a_iovcount;
	my_uio->uio_offset = a_offset;
	my_uio->uio_rw = a_iodirection;
	my_uio->uio_flags = UIO_FLAGS_INITED;

	return( my_uio );
}
示例#4
0
	// LP64todo - fix this! 'n' should be int64_t?
int
uiomove64(const addr64_t c_cp, int n, struct uio *uio)
{
	addr64_t cp = c_cp;
#if LP64KERN
	uint64_t acnt;
#else
	u_int acnt;
#endif
	int error = 0;

#if DIAGNOSTIC
	if (uio->uio_rw != UIO_READ && uio->uio_rw != UIO_WRITE)
		panic("uiomove: mode");
#endif

#if LP64_DEBUG
	if (IS_VALID_UIO_SEGFLG(uio->uio_segflg) == 0) {
		panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__); 
	}
#endif /* LP64_DEBUG */

	while (n > 0 && uio_resid(uio)) {
		acnt = uio_iov_len(uio);
		if (acnt == 0) {
			uio_next_iov(uio);
			uio->uio_iovcnt--;
			continue;
		}
		if (n > 0 && acnt > (uint64_t)n)
			acnt = n;

		switch (uio->uio_segflg) {

		case UIO_USERSPACE64:
		case UIO_USERISPACE64:
			// LP64 - 3rd argument in debug code is 64 bit, expected to be 32 bit
			if (uio->uio_rw == UIO_READ)
			  {
			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
					 (int)cp, (int)uio->uio_iovs.iov64p->iov_base, acnt, 0,0);

					error = copyout( CAST_DOWN(caddr_t, cp), uio->uio_iovs.iov64p->iov_base, acnt );

			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
					 (int)cp, (int)uio->uio_iovs.iov64p->iov_base, acnt, 0,0);
			  }
			else
			  {
			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
					 (int)uio->uio_iovs.iov64p->iov_base, (int)cp, acnt, 0,0);

			        error = copyin(uio->uio_iovs.iov64p->iov_base, CAST_DOWN(caddr_t, cp), acnt);

			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
					 (int)uio->uio_iovs.iov64p->iov_base, (int)cp, acnt, 0,0);
			  }
			if (error)
				return (error);
			break;

		case UIO_USERSPACE32:
		case UIO_USERISPACE32:
		case UIO_USERSPACE:
		case UIO_USERISPACE:
			if (uio->uio_rw == UIO_READ)
			  {
			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
					 (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 0,0);

					error = copyout( CAST_DOWN(caddr_t, cp), CAST_USER_ADDR_T(uio->uio_iovs.iov32p->iov_base), acnt );

			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
					 (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 0,0);
			  }
			else
			  {
			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
					 (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 0,0);

			        error = copyin(CAST_USER_ADDR_T(uio->uio_iovs.iov32p->iov_base), CAST_DOWN(caddr_t, cp), acnt);

			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
					 (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 0,0);
			  }
			if (error)
				return (error);
			break;

		case UIO_SYSSPACE32:
		case UIO_SYSSPACE:
			if (uio->uio_rw == UIO_READ)
				error = copywithin(CAST_DOWN(caddr_t, cp), (caddr_t)uio->uio_iovs.iov32p->iov_base,
						   acnt);
			else
				error = copywithin((caddr_t)uio->uio_iovs.iov32p->iov_base, CAST_DOWN(caddr_t, cp),
						   acnt);
			break;

		case UIO_PHYS_USERSPACE64:
			if (uio->uio_rw == UIO_READ)
			  {
			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
					 (int)cp, (int)uio->uio_iovs.iov64p->iov_base, acnt, 1,0);

				error = copypv((addr64_t)cp, uio->uio_iovs.iov64p->iov_base, acnt, cppvPsrc | cppvNoRefSrc);
				if (error) 	/* Copy physical to virtual */
				        error = EFAULT;

			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
					 (int)cp, (int)uio->uio_iovs.iov64p->iov_base, acnt, 1,0);
			  }
			else
			  {
			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
					 (int)uio->uio_iovs.iov64p->iov_base, (int)cp, acnt, 1,0);

				error = copypv(uio->uio_iovs.iov64p->iov_base, (addr64_t)cp, acnt, cppvPsnk | cppvNoRefSrc | cppvNoModSnk);
				if (error)	/* Copy virtual to physical */
				        error = EFAULT;

			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
					 (int)uio->uio_iovs.iov64p->iov_base, (int)cp, acnt, 1,0);
			  }
			if (error)
				return (error);
			break;

		case UIO_PHYS_USERSPACE32:
		case UIO_PHYS_USERSPACE:
			if (uio->uio_rw == UIO_READ)
			  {
			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
					 (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 1,0);

				error = copypv((addr64_t)cp, (addr64_t)uio->uio_iovs.iov32p->iov_base, acnt, cppvPsrc | cppvNoRefSrc);
				if (error) 	/* Copy physical to virtual */
				        error = EFAULT;

			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
					 (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 1,0);
			  }
			else
			  {
			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
					 (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 1,0);

				error = copypv((addr64_t)uio->uio_iovs.iov32p->iov_base, (addr64_t)cp, acnt, cppvPsnk | cppvNoRefSrc | cppvNoModSnk);
				if (error)	/* Copy virtual to physical */
				        error = EFAULT;

			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
					 (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 1,0);
			  }
			if (error)
				return (error);
			break;

		case UIO_PHYS_SYSSPACE32:
		case UIO_PHYS_SYSSPACE:
			if (uio->uio_rw == UIO_READ)
			  {
			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
					 (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 2,0);

				error = copypv((addr64_t)cp, uio->uio_iovs.iov32p->iov_base, acnt, cppvKmap | cppvPsrc | cppvNoRefSrc);
				if (error) 	/* Copy physical to virtual */
				        error = EFAULT;

			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
					 (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 2,0);
			  }
			else
			  {
			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
					 (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 2,0);

				error = copypv(uio->uio_iovs.iov32p->iov_base, (addr64_t)cp, acnt, cppvKmap | cppvPsnk | cppvNoRefSrc | cppvNoModSnk);
				if (error)	/* Copy virtual to physical */
				        error = EFAULT;

			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
					 (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 2,0);
			  }
			if (error)
				return (error);
			break;

		default:
			break;
		}
		uio_iov_base_add(uio, acnt);
#if LP64KERN
		uio_iov_len_add(uio, -((int64_t)acnt));
		uio_setresid(uio, (uio_resid(uio) - ((int64_t)acnt)));
#else
		uio_iov_len_add(uio, -((int)acnt));
		uio_setresid(uio, (uio_resid(uio) - ((int)acnt)));
#endif
		uio->uio_offset += acnt;
		cp += acnt;
		n -= acnt;
	}
	return (error);
}
示例#5
0
/*
 * uio_reset - reset an uio_t.
 * 	Reset the given uio_t to initial values.  The uio_t is not fully initialized
 * 	until all iovecs are added using uio_addiov calls.
 *	The a_iovcount value passed in the uio_create is the maximum number of 
 *	iovecs you may add.
 */
void uio_reset( uio_t a_uio,
				off_t a_offset,			/* current offset */
				int a_spacetype,		/* type of address space */
				int a_iodirection )		/* read or write flag */
{
	vm_size_t	my_size;
	int			my_max_iovs;
	u_int32_t	my_old_flags;
	
#if LP64_DEBUG
	if (a_uio == NULL) {
		panic("%s :%d - could not allocate uio_t\n", __FILE__, __LINE__); 
	}
	if (!IS_VALID_UIO_SEGFLG(a_spacetype)) {
		panic("%s :%d - invalid address space type\n", __FILE__, __LINE__); 
	}
	if (!(a_iodirection == UIO_READ || a_iodirection == UIO_WRITE)) {
		panic("%s :%d - invalid IO direction flag\n", __FILE__, __LINE__); 
	}
#endif /* LP64_DEBUG */

	if (a_uio == NULL) {
		return;
	}

	my_size = a_uio->uio_size;
	my_old_flags = a_uio->uio_flags;
	my_max_iovs = a_uio->uio_max_iovs;
	bzero(a_uio, my_size);
	a_uio->uio_size = my_size;

	/*
	 * we use uio_segflg to indicate if the uio_t is the new format or
	 * old (pre LP64 support) legacy format
	 * This switch statement should canonicalize incoming space type
	 * to one of UIO_USERSPACE32/64, UIO_PHYS_USERSPACE32/64, or
	 * UIO_SYSSPACE/UIO_PHYS_SYSSPACE
	 */
	switch (a_spacetype) {
	case UIO_USERSPACE:
		a_uio->uio_segflg = UIO_USERSPACE32;
		break;
	case UIO_SYSSPACE32:
		a_uio->uio_segflg = UIO_SYSSPACE;
		break;
	case UIO_PHYS_USERSPACE:
		a_uio->uio_segflg = UIO_PHYS_USERSPACE32;
		break;
	default:
		a_uio->uio_segflg = a_spacetype;
		break;
	}

	if (my_max_iovs > 0) {
		a_uio->uio_iovs.uiovp = (struct user_iovec *)
			(((uint8_t *)a_uio) + sizeof(struct uio));
	}
	else {
		a_uio->uio_iovs.uiovp = NULL;
	}

	a_uio->uio_max_iovs = my_max_iovs;
	a_uio->uio_offset = a_offset;
	a_uio->uio_rw = a_iodirection;
	a_uio->uio_flags = my_old_flags;

	return;
}
示例#6
0
/*
 * Convert a pathname into a pointer to a locked inode.
 *
 * The FOLLOW flag is set when symbolic links are to be followed
 * when they occur at the end of the name translation process.
 * Symbolic links are always followed for all other pathname
 * components other than the last.
 *
 * The segflg defines whether the name is to be copied from user
 * space or kernel space.
 *
 * Overall outline of namei:
 *
 *	copy in name
 *	get starting directory
 *	while (!done && !error) {
 *		call lookup to search path.
 *		if symbolic link, massage name in buffer and continue
 *	}
 *
 * Returns:	0			Success
 *		ENOENT			No such file or directory
 *		ELOOP			Too many levels of symbolic links
 *		ENAMETOOLONG		Filename too long
 *		copyinstr:EFAULT	Bad address
 *		copyinstr:ENAMETOOLONG	Filename too long
 *		lookup:EBADF		Bad file descriptor
 *		lookup:EROFS
 *		lookup:EACCES
 *		lookup:EPERM
 *		lookup:ERECYCLE	 vnode was recycled from underneath us in lookup.
 *						 This means we should re-drive lookup from this point.
 *		lookup: ???
 *		VNOP_READLINK:???
 */
int
namei(struct nameidata *ndp)
{
	struct filedesc *fdp;	/* pointer to file descriptor state */
	char *cp;		/* pointer into pathname argument */
	struct vnode *dp;	/* the directory we are searching */
	struct vnode *usedvp = ndp->ni_dvp;  /* store pointer to vp in case we must loop due to
										   	heavy vnode pressure */
	u_long cnpflags = ndp->ni_cnd.cn_flags; /* store in case we have to restore after loop */
	uio_t auio;
	int error;
	struct componentname *cnp = &ndp->ni_cnd;
	vfs_context_t ctx = cnp->cn_context;
	proc_t p = vfs_context_proc(ctx);
/* XXX ut should be from context */
	uthread_t ut = (struct uthread *)get_bsdthread_info(current_thread());
	char *tmppn;
	char uio_buf[ UIO_SIZEOF(1) ];

#if DIAGNOSTIC
	if (!vfs_context_ucred(ctx) || !p)
		panic ("namei: bad cred/proc");
	if (cnp->cn_nameiop & (~OPMASK))
		panic ("namei: nameiop contaminated with flags");
	if (cnp->cn_flags & OPMASK)
		panic ("namei: flags contaminated with nameiops");
#endif
	fdp = p->p_fd;

vnode_recycled:

	/*
	 * Get a buffer for the name to be translated, and copy the
	 * name into the buffer.
	 */
	if ((cnp->cn_flags & HASBUF) == 0) {
		cnp->cn_pnbuf = ndp->ni_pathbuf;
		cnp->cn_pnlen = PATHBUFLEN;
	}
#if LP64_DEBUG
	if (IS_VALID_UIO_SEGFLG(ndp->ni_segflg) == 0) {
		panic("%s :%d - invalid ni_segflg\n", __FILE__, __LINE__); 
	}
#endif /* LP64_DEBUG */

retry_copy:
	if (UIO_SEG_IS_USER_SPACE(ndp->ni_segflg)) {
		error = copyinstr(ndp->ni_dirp, cnp->cn_pnbuf,
			    cnp->cn_pnlen, (size_t *)&ndp->ni_pathlen);
	} else {
		error = copystr(CAST_DOWN(void *, ndp->ni_dirp), cnp->cn_pnbuf,
			    cnp->cn_pnlen, (size_t *)&ndp->ni_pathlen);
	}
	if (error == ENAMETOOLONG && !(cnp->cn_flags & HASBUF)) {
		MALLOC_ZONE(cnp->cn_pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
		if (cnp->cn_pnbuf == NULL) {
			error = ENOMEM;
			goto error_out;
		}

		cnp->cn_flags |= HASBUF;
		cnp->cn_pnlen = MAXPATHLEN;
		
		goto retry_copy;
	}
	if (error)
	        goto error_out;

#if CONFIG_VOLFS
 	/*
	 * Check for legacy volfs style pathnames.
	 *
	 * For compatibility reasons we currently allow these paths,
	 * but future versions of the OS may not support them.
	 */
	if (ndp->ni_pathlen >= VOLFS_MIN_PATH_LEN &&
	    cnp->cn_pnbuf[0] == '/' &&
	    cnp->cn_pnbuf[1] == '.' &&
	    cnp->cn_pnbuf[2] == 'v' &&
	    cnp->cn_pnbuf[3] == 'o' &&
	    cnp->cn_pnbuf[4] == 'l' &&
	    cnp->cn_pnbuf[5] == '/' ) {
		char * realpath;
		int realpath_err;
		/* Attempt to resolve a legacy volfs style pathname. */
		MALLOC_ZONE(realpath, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
		if (realpath) {
			if ((realpath_err= vfs_getrealpath(&cnp->cn_pnbuf[6], realpath, MAXPATHLEN, ctx))) {
				FREE_ZONE(realpath, MAXPATHLEN, M_NAMEI);
				if (realpath_err == ENOSPC){
					error = ENAMETOOLONG;
					goto error_out;
				}
			} else {
				if (cnp->cn_flags & HASBUF) {
					FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI);
				}
				cnp->cn_pnbuf = realpath;
				cnp->cn_pnlen = MAXPATHLEN;
				ndp->ni_pathlen = strlen(realpath) + 1;
				cnp->cn_flags |= HASBUF | CN_VOLFSPATH;
			}
		}
	}
 #endif /* CONFIG_VOLFS */

	/* If we are auditing the kernel pathname, save the user pathname */
	if (cnp->cn_flags & AUDITVNPATH1)
		AUDIT_ARG(upath, ut->uu_cdir, cnp->cn_pnbuf, ARG_UPATH1); 
	if (cnp->cn_flags & AUDITVNPATH2)
		AUDIT_ARG(upath, ut->uu_cdir, cnp->cn_pnbuf, ARG_UPATH2); 

	/*
	 * Do not allow empty pathnames
	 */
	if (*cnp->cn_pnbuf == '\0') {
		error = ENOENT;
		goto error_out;
	}
	ndp->ni_loopcnt = 0;

	/*
	 * determine the starting point for the translation.
	 */
	if ((ndp->ni_rootdir = fdp->fd_rdir) == NULLVP) {
	        if ( !(fdp->fd_flags & FD_CHROOT))
		        ndp->ni_rootdir = rootvnode;
	}
	cnp->cn_nameptr = cnp->cn_pnbuf;

	ndp->ni_usedvp = NULLVP;

	if (*(cnp->cn_nameptr) == '/') {
	        while (*(cnp->cn_nameptr) == '/') {
		        cnp->cn_nameptr++;
			ndp->ni_pathlen--;
		}
		dp = ndp->ni_rootdir;
	} else if (cnp->cn_flags & USEDVP) {
	        dp = ndp->ni_dvp;
		ndp->ni_usedvp = dp;
	} else
	        dp = vfs_context_cwd(ctx);

	if (dp == NULLVP || (dp->v_lflag & VL_DEAD)) {
	        error = ENOENT;
		goto error_out;
	}
	ndp->ni_dvp = NULLVP;
	ndp->ni_vp  = NULLVP;

	for (;;) {
	        int need_newpathbuf;
		int linklen;

		ndp->ni_startdir = dp;

		if ( (error = lookup(ndp)) ) {
			goto error_out;
		}
		/*
		 * Check for symbolic link
		 */
		if ((cnp->cn_flags & ISSYMLINK) == 0) {
			return (0);
		}
		if ((cnp->cn_flags & FSNODELOCKHELD)) {
		        cnp->cn_flags &= ~FSNODELOCKHELD;
			unlock_fsnode(ndp->ni_dvp, NULL);
		}	
		if (ndp->ni_loopcnt++ >= MAXSYMLINKS) {
			error = ELOOP;
			break;
		}
#if CONFIG_MACF
		if ((error = mac_vnode_check_readlink(ctx, ndp->ni_vp)) != 0)
			break;
#endif /* MAC */
		if (ndp->ni_pathlen > 1 || !(cnp->cn_flags & HASBUF))
		        need_newpathbuf = 1;
		else
		        need_newpathbuf = 0;

		if (need_newpathbuf) {
			MALLOC_ZONE(cp, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
			if (cp == NULL) {
				error = ENOMEM;
				break;
			}
		} else {
			cp = cnp->cn_pnbuf;
		}
		auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, &uio_buf[0], sizeof(uio_buf));

		uio_addiov(auio, CAST_USER_ADDR_T(cp), MAXPATHLEN);

		error = VNOP_READLINK(ndp->ni_vp, auio, ctx);
		if (error) {
			if (need_newpathbuf)
				FREE_ZONE(cp, MAXPATHLEN, M_NAMEI);
			break;
		}
		// LP64todo - fix this
		linklen = MAXPATHLEN - uio_resid(auio);
		if (linklen + ndp->ni_pathlen > MAXPATHLEN) {
			if (need_newpathbuf)
				FREE_ZONE(cp, MAXPATHLEN, M_NAMEI);

			error = ENAMETOOLONG;
			break;
		}
		if (need_newpathbuf) {
			long len = cnp->cn_pnlen;

			tmppn = cnp->cn_pnbuf;
			bcopy(ndp->ni_next, cp + linklen, ndp->ni_pathlen);
			cnp->cn_pnbuf = cp;
			cnp->cn_pnlen = MAXPATHLEN;

			if ( (cnp->cn_flags & HASBUF) )
			        FREE_ZONE(tmppn, len, M_NAMEI);
			else
			        cnp->cn_flags |= HASBUF;
		} else
			cnp->cn_pnbuf[linklen] = '\0';

		ndp->ni_pathlen += linklen;
		cnp->cn_nameptr = cnp->cn_pnbuf;

		/*
		 * starting point for 'relative'
		 * symbolic link path
		 */
		dp = ndp->ni_dvp;
	        /*
		 * get rid of references returned via 'lookup'
		 */
		vnode_put(ndp->ni_vp);
		vnode_put(ndp->ni_dvp);

		ndp->ni_vp = NULLVP;
		ndp->ni_dvp = NULLVP;

		/*
		 * Check if symbolic link restarts us at the root
		 */
		if (*(cnp->cn_nameptr) == '/') {
			while (*(cnp->cn_nameptr) == '/') {
				cnp->cn_nameptr++;
				ndp->ni_pathlen--;
			}
			if ((dp = ndp->ni_rootdir) == NULLVP) {
			        error = ENOENT;
				goto error_out;
			}
		}
	}
	/*
	 * only come here if we fail to handle a SYMLINK...
	 * if either ni_dvp or ni_vp is non-NULL, then
	 * we need to drop the iocount that was picked
	 * up in the lookup routine
	 */
	if (ndp->ni_dvp)
	        vnode_put(ndp->ni_dvp);
	if (ndp->ni_vp)
	        vnode_put(ndp->ni_vp);
 error_out:
	if ( (cnp->cn_flags & HASBUF) ) {
		cnp->cn_flags &= ~HASBUF;
		FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI);
	}
	cnp->cn_pnbuf = NULL;
	ndp->ni_vp = NULLVP;
	if (error == ERECYCLE){
		/* vnode was recycled underneath us. re-drive lookup to start at 
		   the beginning again, since recycling invalidated last lookup*/
		ndp->ni_cnd.cn_flags = cnpflags;
		ndp->ni_dvp = usedvp;
		goto vnode_recycled;
	}


	return (error);
}