Exemple #1
0
static void
set_rootvnode(void)
{
	struct proc *p;

	if (VFS_ROOT(TAILQ_FIRST(&mountlist), LK_EXCLUSIVE, &rootvnode))
		panic("Cannot find root vnode");

	VOP_UNLOCK(rootvnode, 0);

	p = curthread->td_proc;
	FILEDESC_XLOCK(p->p_fd);

	if (p->p_fd->fd_cdir != NULL)
		vrele(p->p_fd->fd_cdir);
	p->p_fd->fd_cdir = rootvnode;
	VREF(rootvnode);

	if (p->p_fd->fd_rdir != NULL)
		vrele(p->p_fd->fd_rdir);
	p->p_fd->fd_rdir = rootvnode;
	VREF(rootvnode);

	FILEDESC_XUNLOCK(p->p_fd);
}
/* ARGSUSED */
int
zfsctl_root_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, pathname_t *pnp,
    int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct,
    int *direntflags, pathname_t *realpnp)
{
	zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data;
	int err;

	/*
	 * No extended attributes allowed under .zfs
	 */
	if (flags & LOOKUP_XATTR)
		return (EINVAL);

	ZFS_ENTER(zfsvfs);

	if (strcmp(nm, "..") == 0) {
		err = VFS_ROOT(dvp->v_vfsp, vpp);
	} else {
		err = gfs_vop_lookup(dvp, nm, vpp, pnp, flags, rdir,
		    cr, ct, direntflags, realpnp);
	}

	ZFS_EXIT(zfsvfs);

	return (err);
}
/*
 * Create the '.zfs' directory.  This directory is cached as part of the VFS
 * structure.  This results in a hold on the vfs_t.  The code in zfs_umount()
 * therefore checks against a vfs_count of 2 instead of 1.  This reference
 * is removed when the ctldir is destroyed in the unmount.
 */
void
zfsctl_create(zfsvfs_t *zfsvfs)
{
	vnode_t *vp, *rvp;
	zfsctl_node_t *zcp;

	ASSERT(zfsvfs->z_ctldir == NULL);

	vp = gfs_root_create(sizeof (zfsctl_node_t), zfsvfs->z_vfs,
	    zfsctl_ops_root, ZFSCTL_INO_ROOT, zfsctl_root_entries,
	    zfsctl_root_inode_cb, MAXNAMELEN, NULL, NULL);
	zcp = vp->v_data;
	zcp->zc_id = ZFSCTL_INO_ROOT;

	VERIFY(VFS_ROOT(zfsvfs->z_vfs, &rvp) == 0);
	ZFS_TIME_DECODE(&zcp->zc_cmtime, VTOZ(rvp)->z_phys->zp_crtime);
	VN_RELE(rvp);

	/*
	 * We're only faking the fact that we have a root of a filesystem for
	 * the sake of the GFS interfaces.  Undo the flag manipulation it did
	 * for us.
	 */
	vp->v_flag &= ~(VROOT | VNOCACHE | VNOMAP | VNOSWAP | VNOMOUNT);

	zfsvfs->z_ctldir = vp;
}
Exemple #4
0
int
imageboot_setup()
{
	dev_t       dev;
	int         error = 0;
	char *root_path = NULL;

	DBG_TRACE("%s: entry\n", __FUNCTION__);

	MALLOC_ZONE(root_path, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
	if (root_path == NULL)
		return (ENOMEM);

	if(PE_parse_boot_argn("rp", root_path, MAXPATHLEN) == FALSE) {
		error = ENOENT;
		goto done;
	}

	printf("%s: root image url is %s\n", __FUNCTION__, root_path);
	error = di_root_image(root_path, rootdevice, &dev);
	if(error) {
		printf("%s: di_root_image failed: %d\n", __FUNCTION__, error);
		goto done;
	}

	rootdev = dev;
	mountroot = NULL;
	printf("%s: root device 0x%x\n", __FUNCTION__, rootdev);
	error = vfs_mountroot();

	if (error == 0 && rootvnode != NULL) {
		struct vnode *tvp;
		struct vnode *newdp;

		/*
		 * Get the vnode for '/'.
		 * Set fdp->fd_fd.fd_cdir to reference it.
		 */
		if (VFS_ROOT(TAILQ_LAST(&mountlist,mntlist), &newdp, vfs_context_kernel()))
			panic("%s: cannot find root vnode", __FUNCTION__);

		vnode_ref(newdp);
		vnode_put(newdp);
		tvp = rootvnode;
		vnode_rele(tvp);
		filedesc0.fd_cdir = newdp;
		rootvnode = newdp;
		mount_list_lock();
		TAILQ_REMOVE(&mountlist, TAILQ_FIRST(&mountlist), mnt_list);
		mount_list_unlock();
		mountlist.tqh_first->mnt_flag |= MNT_ROOTFS;
		DBG_TRACE("%s: root switched\n", __FUNCTION__);
	}
done:
	FREE_ZONE(root_path, MAXPATHLEN, M_NAMEI);

	DBG_TRACE("%s: exit\n", __FUNCTION__);

	return (error);
}
Exemple #5
0
/*
 * Create the '.zfs' directory.  This directory is cached as part of the VFS
 * structure.  This results in a hold on the vfs_t.  The code in zfs_umount()
 * therefore checks against a vfs_count of 2 instead of 1.  This reference
 * is removed when the ctldir is destroyed in the unmount.
 */
void
zfsctl_create(zfsvfs_t *zfsvfs)
{
	vnode_t *vp, *rvp;
	zfsctl_node_t *zcp;
	uint64_t crtime[2];

	ASSERT(zfsvfs->z_ctldir == NULL);

	vp = gfs_root_create(sizeof (zfsctl_node_t), zfsvfs->z_vfs,
	    &zfsctl_ops_root, ZFSCTL_INO_ROOT, zfsctl_root_entries,
	    zfsctl_root_inode_cb, MAXNAMELEN, NULL, NULL);
	zcp = vp->v_data;
	zcp->zc_id = ZFSCTL_INO_ROOT;

	VERIFY(VFS_ROOT(zfsvfs->z_vfs, LK_EXCLUSIVE, &rvp) == 0);
	VERIFY(0 == sa_lookup(VTOZ(rvp)->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs),
	    &crtime, sizeof (crtime)));
	ZFS_TIME_DECODE(&zcp->zc_cmtime, crtime);
	VN_URELE(rvp);

	/*
	 * We're only faking the fact that we have a root of a filesystem for
	 * the sake of the GFS interfaces.  Undo the flag manipulation it did
	 * for us.
	 */
	vp->v_vflag &= ~VV_ROOT;

	zfsvfs->z_ctldir = vp;

	VOP_UNLOCK(vp, 0);
}
Exemple #6
0
static int
vfs_getrealpath(const char * path, char * realpath, size_t bufsize, vfs_context_t ctx)
{
	vnode_t vp;
	struct mount *mp = NULL;
	char  *str;
	char ch;
	uint32_t  id;
	ino64_t ino;
	int error;
	int length;

	/* Get file system id and move str to next component. */
	id = strtoul(path, &str, 10);
	if (id == 0 || str[0] != '/') {
		return (EINVAL);
	}
	while (*str == '/') {
		str++;
	}
	ch = *str;

	mp = mount_lookupby_volfsid(id, 1);
	if (mp == NULL) {
		return (EINVAL);  /* unexpected failure */
	}
	/* Check for an alias to a file system root. */
	if (ch == '@' && str[1] == '\0') {
		ino = 2;
		str++;
	} else {
		/* Get file id and move str to next component. */
	    ino = strtouq(str, &str, 10);
	}

	/* Get the target vnode. */
	if (ino == 2) {
		error = VFS_ROOT(mp, &vp, ctx);
	} else {
		error = VFS_VGET(mp, ino, &vp, ctx);
	}
	vfs_unbusy(mp);
	if (error) {
		goto out;
	}
	realpath[0] = '\0';

	/* Get the absolute path to this vnode. */
	error = build_path(vp, realpath, bufsize, &length, 0, ctx);
	vnode_put(vp);

	if (error == 0 && *str != '\0') {
		int attempt = strlcat(realpath, str, MAXPATHLEN);
		if (attempt > MAXPATHLEN){
			error = ENAMETOOLONG;
		}
	}
out:
	return (error);
}
static void
smb_tree_acl_access(cred_t *cred, const char *sharename, vnode_t *pathvp,
		    uint32_t *access)
{
	int rc;
	vfs_t *vfsp;
	vnode_t *root = NULL;
	vnode_t *sharevp = NULL;
	char *sharepath;
	struct pathname pnp;
	size_t size;

	*access = ACE_ALL_PERMS; /* default to full "UNIX" access */

	/*
	 * Using the vnode of the share path, we then find the root
	 * directory of the mounted file system. We will then look to
	 * see if there is a .zfs/shares directory and if there is,
	 * get the access information from the ACL/ACES values and
	 * check against the cred.
	 */
	vfsp = pathvp->v_vfsp;
	if (vfsp != NULL)
		rc = VFS_ROOT(vfsp, &root);
	else
		rc = ENOENT;

	if (rc != 0)
		return;


	/*
	 * Find the share object, if there is one. Need to construct
	 * the path to the .zfs/shares/<sharename> object and look it
	 * up.  root is called held but will be released by
	 * lookuppnvp().
	 */

	size = sizeof (SHARES_DIR) + strlen(sharename) + 1;
	sharepath = kmem_alloc(size, KM_SLEEP);
	(void) sprintf(sharepath, "%s%s", SHARES_DIR, sharename);

	pn_alloc(&pnp);
	(void) pn_set(&pnp, sharepath);
	rc = lookuppnvp(&pnp, NULL, NO_FOLLOW, NULL,
	    &sharevp, rootdir, root, kcred);
	pn_free(&pnp);

	kmem_free(sharepath, size);

	/*
	 * Now get the effective access value based on cred and ACL
	 * values.
	 */

	if (rc == 0) {
		smb_vop_eaccess(sharevp, (int *)access, V_ACE_MASK, NULL, cred);
		VN_RELE(sharevp);
	}
}
Exemple #8
0
/*
 * Try to create a journal log inside the filesystem.
 */
int
wapbl_create_infs_log(struct mount *mp, struct fs *fs, struct vnode *devvp,
    daddr_t *startp, size_t *countp, uint64_t *extradatap)
{
	struct vnode *vp, *rvp;
	struct inode *ip;
	int error;

	if ((error = VFS_ROOT(mp, &rvp)) != 0)
		return error;

	error = UFS_INODE_ALLOC(VTOI(rvp), 0 | S_IFREG, NOCRED, &vp);
	if (mp->mnt_flag & MNT_UPDATE) {
		vput(rvp);
	} else {
		VOP_UNLOCK(rvp, 0);
		vgone(rvp);
	}
	if (error != 0)
		return error;

	vp->v_type = VREG;
	ip = VTOI(vp);
	ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE;
	/* ip->i_mode = 0 | IFREG; */
	DIP_ASSIGN(ip, mode, 0 | IFREG);
	/* ip->i_flags = SF_LOG; */
	DIP_ASSIGN(ip, flags, SF_LOG);
	ip->i_effnlink = 1;
	DIP_ASSIGN(ip, nlink, 1);
	ffs_update(ip, MNT_WAIT);

	if ((error = wapbl_allocate_log_file(mp, vp,
	                 startp, countp, extradatap)) != 0) {
		/*
		 * If we couldn't allocate the space for the log file,
		 * remove the inode by setting its link count back to
		 * zero and bail.
		 */
		ip->i_effnlink = 0;
		DIP_ASSIGN(ip, nlink, 0);
		VOP_UNLOCK(vp, 0);
		vgone(vp);

		return error;
	}

	/*
	 * Now that we have the place-holder inode for the journal,
	 * we don't need the vnode ever again.
	 */
	VOP_UNLOCK(vp, 0);
	vgone(vp);

	return 0;
}
Exemple #9
0
STATIC void
linvfs_unfreeze_fs(
	struct super_block	*sb)
{
	vfs_t			*vfsp = LINVFS_GET_VFS(sb);
	vnode_t			*vp;
	int			error;

	VFS_ROOT(vfsp, &vp, error);
	VOP_IOCTL(vp, LINVFS_GET_IP(vp), NULL, 0, XFS_IOC_THAW, 0, error);
	VN_RELE(vp);
}
Exemple #10
0
static OSKIT_COMDECL filesystem_getroot(oskit_filesystem_t *f,
				       struct oskit_dir **out_dir)
{
    struct gfilesystem *fs = (struct gfilesystem *) f; 
    oskit_dir_t *d;
    struct proc *p;
    oskit_error_t ferror;
    struct vnode *vp;
    int error;

    if (!fs || !fs->count || !fs->mp)
	    return OSKIT_E_INVALIDARG;

    ferror = getproc(&p);
    if (ferror)
	    return ferror;
    
    error = VFS_ROOT(fs->mp, &vp);
    if (error)
    {
	prfree(p);
	return errno_to_oskit_error(error);
    }
    
    error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p);
    if (error)
    {
	vput(vp);
	prfree(p);
	return errno_to_oskit_error(error);
    }
    
    d = (oskit_dir_t *) hashtab_search(vptab, (hashtab_key_t) vp);
    if (d)
    {
	oskit_dir_addref(d);
    }
    else
    {
	d = (oskit_dir_t *) gfile_create(fs,vp);
	if (!d)
	{
	    vput(vp);
	    prfree(p);
	    return OSKIT_ENOMEM;
	}
    }

    vput(vp);
    prfree(p);
    *out_dir = d;
    return 0;    
}
Exemple #11
0
int
traverse(vnode_t **cvpp, int lktype)
{
	vnode_t *cvp;
	vnode_t *tvp;
	vfs_t *vfsp;
	int error;

	cvp = *cvpp;
	tvp = NULL;

	/*
	 * If this vnode is mounted on, then we transparently indirect
	 * to the vnode which is the root of the mounted file system.
	 * Before we do this we must check that an unmount is not in
	 * progress on this vnode.
	 */

	for (;;) {
		/*
		 * Reached the end of the mount chain?
		 */
		vfsp = vn_mountedvfs(cvp);
		if (vfsp == NULL)
			break;
		error = vfs_busy(vfsp, 0);
		/*
		 * tvp is NULL for *cvpp vnode, which we can't unlock.
		 */
		if (tvp != NULL)
			vput(cvp);
		else
			vrele(cvp);
		if (error)
			return (error);

		/*
		 * The read lock must be held across the call to VFS_ROOT() to
		 * prevent a concurrent unmount from destroying the vfs.
		 */
		error = VFS_ROOT(vfsp, lktype, &tvp);
		vfs_unbusy(vfsp);
		if (error != 0)
			return (error);
		cvp = tvp;
	}

	*cvpp = cvp;
	return (0);
}
Exemple #12
0
STATIC void
linvfs_freeze_fs(
	struct super_block	*sb)
{
	vfs_t			*vfsp = LINVFS_GET_VFS(sb);
	vnode_t			*vp;
	int			error;

	if (sb->s_flags & MS_RDONLY)
		return;
	VFS_ROOT(vfsp, &vp, error);
	VOP_IOCTL(vp, LINVFS_GET_IP(vp), NULL, 0, XFS_IOC_FREEZE, 0, error);
	VN_RELE(vp);
}
Exemple #13
0
/*
 * If a hold on the specified VFS has already been taken
 * then only increment the reference count of the corresponding
 * smb_vfs_t structure. If no smb_vfs_t structure has been created
 * yet for the specified VFS then create one and take a hold on
 * the VFS.
 */
int
smb_vfs_hold(smb_export_t *se, vfs_t *vfsp)
{
	smb_vfs_t	*smb_vfs;
	vnode_t 	*rootvp;
	int		rc;

	if (se == NULL || vfsp == NULL)
		return (EINVAL);

	smb_llist_enter(&se->e_vfs_list, RW_WRITER);

	if ((smb_vfs = smb_vfs_find(se, vfsp)) != NULL) {
		smb_vfs->sv_refcnt++;
		DTRACE_PROBE1(smb_vfs_hold_hit, smb_vfs_t *, smb_vfs);
		smb_llist_exit(&se->e_vfs_list);
		return (0);
	}

	if ((rc = VFS_ROOT(vfsp, &rootvp)) != 0) {
		smb_llist_exit(&se->e_vfs_list);
		return (rc);
	}

	smb_vfs = kmem_cache_alloc(smb_kshare_cache_vfs, KM_SLEEP);

	bzero(smb_vfs, sizeof (smb_vfs_t));

	smb_vfs->sv_magic = SMB_VFS_MAGIC;
	smb_vfs->sv_refcnt = 1;
	smb_vfs->sv_vfsp = vfsp;
	/*
	 * We have a hold on the root vnode of the file system
	 * from the VFS_ROOT call above.
	 */
	smb_vfs->sv_rootvp = rootvp;

	smb_llist_insert_head(&se->e_vfs_list, smb_vfs);
	DTRACE_PROBE1(smb_vfs_hold_miss, smb_vfs_t *, smb_vfs);
	smb_llist_exit(&se->e_vfs_list);

	return (0);
}
Exemple #14
0
int
vfs_getbyid(fsid_t *fsid, ino64_t ino, vnode_t *vpp, vfs_context_t ctx)
{
	mount_t mp;
	int error;
	
	mp = mount_lookupby_volfsid(fsid->val[0], 1);
	if (mp == NULL) {
		return EINVAL;
	}

	/* Get the target vnode. */
	if (ino == 2) {
		error = VFS_ROOT(mp, vpp, ctx);
	} else {
		error = VFS_VGET(mp, ino, vpp, ctx);
	}

	vfs_unbusy(mp);
	return error;
}
Exemple #15
0
/*
 * Resolve a mount point's glue ncp.  This ncp connects creates the illusion
 * of continuity in the namecache tree by connecting the ncp related to the
 * vnode under the mount to the ncp related to the mount's root vnode.
 *
 * If no error occured a locked, ref'd ncp is stored in *ncpp.
 */
int
nlookup_mp(struct mount *mp, struct nchandle *nch)
{
    struct vnode *vp;
    int error;

    error = 0;
    cache_get(&mp->mnt_ncmountpt, nch);
    if (nch->ncp->nc_flag & NCF_UNRESOLVED) {
	while (vfs_busy(mp, 0))
	    ;
	error = VFS_ROOT(mp, &vp);
	vfs_unbusy(mp);
	if (error) {
	    cache_put(nch);
	} else {
	    cache_setvp(nch, vp);
	    vput(vp);
	}
    }
    return(error);
}
Exemple #16
0
/*
 * Create the '.zfs' directory.
 */
void
zfsctl_create(zfsvfs_t *zfsvfs)
{
	zfsctl_root_t *dot_zfs;
	sfs_node_t *snapdir;
	vnode_t *rvp;
	uint64_t crtime[2];

	ASSERT(zfsvfs->z_ctldir == NULL);

	snapdir = sfs_alloc_node(sizeof(*snapdir), "snapshot", ZFSCTL_INO_ROOT,
	    ZFSCTL_INO_SNAPDIR);
	dot_zfs = (zfsctl_root_t *)sfs_alloc_node(sizeof(*dot_zfs), ".zfs", 0,
	    ZFSCTL_INO_ROOT);
	dot_zfs->snapdir = snapdir;

	VERIFY(VFS_ROOT(zfsvfs->z_vfs, LK_EXCLUSIVE, &rvp) == 0);
	VERIFY(0 == sa_lookup(VTOZ(rvp)->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs),
	    &crtime, sizeof(crtime)));
	ZFS_TIME_DECODE(&dot_zfs->cmtime, crtime);
	vput(rvp);

	zfsvfs->z_ctldir = dot_zfs;
}
Exemple #17
0
/*
 * Traverse a mount point.  Routine accepts a vnode pointer as a reference
 * parameter and performs the indirection, releasing the original vnode.
 */
int
traverse(vnode_t **cvpp)
{
	int error = 0;
	vnode_t *cvp;
	vnode_t *tvp;
	vfs_t *vfsp;

	cvp = *cvpp;

	/*
	 * If this vnode is mounted on, then we transparently indirect
	 * to the vnode which is the root of the mounted file system.
	 * Before we do this we must check that an unmount is not in
	 * progress on this vnode.
	 */

	for (;;) {
		/*
		 * Try to read lock the vnode.  If this fails because
		 * the vnode is already write locked, then check to
		 * see whether it is the current thread which locked
		 * the vnode.  If it is not, then read lock the vnode
		 * by waiting to acquire the lock.
		 *
		 * The code path in domount() is an example of support
		 * which needs to look up two pathnames and locks one
		 * of them in between the two lookups.
		 */
		error = vn_vfsrlock(cvp);
		if (error) {
			if (!vn_vfswlock_held(cvp))
				error = vn_vfsrlock_wait(cvp);
			if (error != 0) {
				/*
				 * lookuppn() expects a held vnode to be
				 * returned because it promptly calls
				 * VN_RELE after the error return
				 */
				*cvpp = cvp;
				return (error);
			}
		}

		/*
		 * Reached the end of the mount chain?
		 */
		vfsp = vn_mountedvfs(cvp);
		if (vfsp == NULL) {
			vn_vfsunlock(cvp);
			break;
		}

		/*
		 * The read lock must be held across the call to VFS_ROOT() to
		 * prevent a concurrent unmount from destroying the vfs.
		 */
		error = VFS_ROOT(vfsp, &tvp);
		vn_vfsunlock(cvp);

		if (error)
			break;

		VN_RELE(cvp);

		cvp = tvp;
	}

	*cvpp = cvp;
	return (error);
}
Exemple #18
0
vnode_t	*
dm_handle_to_vp(
	xfs_handle_t	*handlep,
	short		*typep)
{
	dm_fsreg_t	*fsrp;
	vnode_t		*vp;
	short		type;
	int		lc;			/* lock cookie */
	int		error;
	fid_t		*fidp;

	if ((fsrp = dm_find_fsreg_and_lock((fsid_t*)&handlep->ha_fsid, &lc)) == NULL)
		return(NULL);

	if (fsrp->fr_state == DM_STATE_MOUNTING) {
		mutex_spinunlock(&fsrp->fr_lock, lc);
		return(NULL);
	}

	for (;;) {
		if (fsrp->fr_state == DM_STATE_MOUNTED)
			break;
		if (fsrp->fr_state == DM_STATE_UNMOUNTED) {
			if (fsrp->fr_unmount && fsrp->fr_hdlcnt == 0)
				sv_broadcast(&fsrp->fr_queue);
			mutex_spinunlock(&fsrp->fr_lock, lc);
			return(NULL);
		}

		/* Must be DM_STATE_UNMOUNTING. */

		fsrp->fr_hdlcnt++;
		sv_wait(&fsrp->fr_queue, 1, &fsrp->fr_lock, lc);
		lc = mutex_spinlock(&fsrp->fr_lock);
		fsrp->fr_hdlcnt--;
	}

	fsrp->fr_vfscnt++;
	mutex_spinunlock(&fsrp->fr_lock, lc);

	/* Now that the mutex is released, wait until we have access to the
	   vnode.
	*/

	fidp = (fid_t*)&handlep->ha_fid;
	if (fidp->fid_len == 0) {	/* filesystem handle */
		VFS_ROOT(fsrp->fr_vfsp, &vp, error);
	} else {				/* file object handle */
		VFS_VGET(fsrp->fr_vfsp, &vp, fidp, error);
	}

	lc = mutex_spinlock(&fsrp->fr_lock);

	fsrp->fr_vfscnt--;
	if (fsrp->fr_unmount && fsrp->fr_vfscnt == 0)
		sv_broadcast(&fsrp->fr_queue);

	mutex_spinunlock(&fsrp->fr_lock, lc);
	if (error || vp == NULL)
		return(NULL);

	if (fidp->fid_len == 0) {
		type = DM_TDT_VFS;
	} else if (vp->v_type == VREG) {
		type = DM_TDT_REG;
	} else if (vp->v_type == VDIR) {
		type = DM_TDT_DIR;
	} else if (vp->v_type == VLNK) {
		type = DM_TDT_LNK;
	} else {
		type = DM_TDT_OTH;
	}
	*typep = type;
	return(vp);
}
Exemple #19
0
int spl_vfs_root(mount_t mount, struct vnode **vp)
{
    return VFS_ROOT(mount, vp, vfs_context_current() );
}
Exemple #20
0
/*
 * sr - the request info, used to find root of dataset,
 *      unicode or ascii, where the share is rooted in the
 *      dataset
 * root_node - root of the share
 * cur_node - where in the share for the command
 * buf - is the path for the command to be processed
 *       returned without @GMT if processed
 * vss_cur_node - returned value for the snapshot version
 *                of the cur_node
 * vss_root_node - returned value for the snapshot version
 *                 of the root_node
 *
 * This routine is the processing for handling the
 * SMB_FLAGS2_REPARSE_PATH bit being set in the smb header.
 *
 * By using the cur_node passed in, a new node is found or
 * created that is the same place in the directory tree, but
 * in the snapshot. We also use root_node to do the same for
 * the root.
 * Once the new smb node is found, the path is modified by
 * removing the @GMT token from the path in the buf.
 */
int
smb_vss_lookup_nodes(smb_request_t *sr, smb_node_t *root_node,
    smb_node_t *cur_node, char *buf, smb_node_t **vss_cur_node,
    smb_node_t **vss_root_node)
{
	smb_arg_open_t	*op = &sr->arg.open;
	smb_node_t	*tnode;
	char		*snapname, *path;
	char		*gmttoken;
	char		gmttok_buf[SMB_VSS_GMT_SIZE];
	vnode_t		*fsrootvp = NULL;
	time_t		toktime;
	int		err = 0;
	boolean_t	smb1;

	if (sr->tid_tree == NULL)
		return (ESTALE);

	tnode = sr->tid_tree->t_snode;

	ASSERT(tnode);
	ASSERT(tnode->vp);
	ASSERT(tnode->vp->v_vfsp);

	smb1 = (sr->session->dialect < 0x200);
	if (smb1) {
		const char *p;

		/* get gmttoken from buf */
		if ((p = smb_vss_find_gmttoken(buf)) == NULL)
			return (ENOENT);

		bcopy(p, gmttok_buf, SMB_VSS_GMT_SIZE);
		gmttok_buf[SMB_VSS_GMT_SIZE - 1] = '\0';
		gmttoken = gmttok_buf;
		toktime = 0;
	} else {
		/* SMB2 and later */
		gmttoken = NULL;
		toktime = op->timewarp.tv_sec;
	}

	path = smb_srm_alloc(sr, MAXPATHLEN);
	snapname = smb_srm_alloc(sr, MAXPATHLEN);

	err = smb_node_getmntpath(tnode, path, MAXPATHLEN);
	if (err != 0)
		return (err);

	/*
	 * Find the corresponding snapshot name.  If snapname is
	 * empty after the map call, no such snapshot was found.
	 */
	*snapname = '\0';
	smb_vss_map_gmttoken(sr->tid_tree, path, gmttoken, toktime,
	    snapname);
	if (*snapname == '\0')
		return (ENOENT);

	/* find snapshot nodes */
	err = VFS_ROOT(tnode->vp->v_vfsp, &fsrootvp);
	if (err != 0)
		return (err);

	/* find snapshot node corresponding to root_node */
	err = smb_vss_lookup_node(sr, root_node, fsrootvp,
	    snapname, cur_node, vss_root_node);
	if (err == 0) {
		/* find snapshot node corresponding to cur_node */
		err = smb_vss_lookup_node(sr, cur_node, fsrootvp,
		    snapname, cur_node, vss_cur_node);
		if (err != 0)
			smb_node_release(*vss_root_node);
	}

	VN_RELE(fsrootvp);

	if (smb1)
		smb_vss_remove_first_token_from_path(buf);

	return (err);
}
Exemple #21
0
/*
 * Search a pathname.
 * This is a very central and rather complicated routine.
 *
 * The pathname is pointed to by ni_cnd.cn_nameptr and is of length
 * ni_pathlen.  The starting directory is taken from ni_startdir. The
 * pathname is descended until done, or a symbolic link is encountered.
 * If the path is completed the flag ISLASTCN is set in ni_cnd.cn_flags.
 * If a symbolic link need interpretation is encountered, the flag ISSYMLINK
 * is set in ni_cnd.cn_flags.
 *
 * The flag argument is LOOKUP, CREATE, RENAME, or DELETE depending on
 * whether the name is to be looked up, created, renamed, or deleted.
 * When CREATE, RENAME, or DELETE is specified, information usable in
 * creating, renaming, or deleting a directory entry may be calculated.
 * If flag has LOCKPARENT or'ed into it, the parent directory is returned
 * locked. If flag has WANTPARENT or'ed into it, the parent directory is
 * returned unlocked. Otherwise the parent directory is not returned. If
 * the target of the pathname exists and LOCKLEAF is or'ed into the flag
 * the target is returned locked, otherwise it is returned unlocked.
 * When creating or renaming and LOCKPARENT is specified, the target may not
 * be ".".  When deleting and LOCKPARENT is specified, the target may be ".".
 *
 * Overall outline of lookup:
 *
 * dirloop:
 *	identify next component of name at ndp->ni_ptr
 *	handle degenerate case where name is null string
 *	if .. and crossing mount points and on mounted filesys, find parent
 *	call VOP_LOOKUP routine for next component name
 *	    directory vnode returned in ni_dvp, unlocked unless LOCKPARENT set
 *	    component vnode returned in ni_vp (if it exists), locked.
 *	if result vnode is mounted on and crossing mount points,
 *	    find mounted on vnode
 *	if more components of name, do next level at dirloop
 *	return the answer in ni_vp, locked if LOCKLEAF set
 *	    if LOCKPARENT set, return locked parent in ni_dvp
 *	    if WANTPARENT set, return unlocked parent in ni_dvp
 */
int
lookup(struct nameidata *ndp)
{
	char *cp;			/* pointer into pathname argument */
	struct vnode *dp = 0;		/* the directory we are searching */
	struct vnode *tdp;		/* saved dp */
	struct mount *mp;		/* mount table entry */
	int docache;			/* == 0 do not cache last component */
	int wantparent;			/* 1 => wantparent or lockparent flag */
	int rdonly;			/* lookup read-only flag bit */
	int error = 0;
	int dpunlocked = 0;		/* dp has already been unlocked */
	int slashes;
	struct componentname *cnp = &ndp->ni_cnd;
	struct proc *p = cnp->cn_proc;
	/*
	 * Setup: break out flag bits into variables.
	 */
	wantparent = cnp->cn_flags & (LOCKPARENT | WANTPARENT);
	docache = (cnp->cn_flags & NOCACHE) ^ NOCACHE;
	if (cnp->cn_nameiop == DELETE ||
	    (wantparent && cnp->cn_nameiop != CREATE))
		docache = 0;
	rdonly = cnp->cn_flags & RDONLY;
	ndp->ni_dvp = NULL;
	cnp->cn_flags &= ~ISSYMLINK;
	dp = ndp->ni_startdir;
	ndp->ni_startdir = NULLVP;
	vn_lock(dp, LK_EXCLUSIVE | LK_RETRY, p);

	/*
	 * If we have a leading string of slashes, remove them, and just make
	 * sure the current node is a directory.
	 */
	cp = cnp->cn_nameptr;
	if (*cp == '/') {
		do {
			cp++;
		} while (*cp == '/');
		ndp->ni_pathlen -= cp - cnp->cn_nameptr;
		cnp->cn_nameptr = cp;

		if (dp->v_type != VDIR) {
			error = ENOTDIR;
			goto bad;
		}

		/*
		 * If we've exhausted the path name, then just return the
		 * current node.  If the caller requested the parent node (i.e.
		 * it's a CREATE, DELETE, or RENAME), and we don't have one
		 * (because this is the root directory), then we must fail.
		 */
		if (cnp->cn_nameptr[0] == '\0') {
			if (ndp->ni_dvp == NULL && wantparent) {
				error = EISDIR;
				goto bad;
			}
			ndp->ni_vp = dp;
			cnp->cn_flags |= ISLASTCN;
			goto terminal;
		}
	}

dirloop:
	/*
	 * Search a new directory.
	 *
	 * The cn_hash value is for use by vfs_cache.
	 * The last component of the filename is left accessible via
	 * cnp->cn_nameptr for callers that need the name. Callers needing
	 * the name set the SAVENAME flag. When done, they assume
	 * responsibility for freeing the pathname buffer.
	 */
	cp = NULL;
	cnp->cn_consume = 0;
	cnp->cn_hash = hash32_stre(cnp->cn_nameptr, '/', &cp, HASHINIT);
	cnp->cn_namelen = cp - cnp->cn_nameptr;
	if (cnp->cn_namelen > NAME_MAX) {
		error = ENAMETOOLONG;
		goto bad;
	}
#ifdef NAMEI_DIAGNOSTIC
	{ char c = *cp;
	*cp = '\0';
	printf("{%s}: ", cnp->cn_nameptr);
	*cp = c; }
#endif
	ndp->ni_pathlen -= cnp->cn_namelen;
	ndp->ni_next = cp;
	/*
	 * If this component is followed by a slash, then move the pointer to
	 * the next component forward, and remember that this component must be
	 * a directory.
	 */
	if (*cp == '/') {
		do {
			cp++;
		} while (*cp == '/');
		slashes = cp - ndp->ni_next;
		ndp->ni_pathlen -= slashes;
		ndp->ni_next = cp;
		cnp->cn_flags |= REQUIREDIR;
	} else {
		slashes = 0;
		cnp->cn_flags &= ~REQUIREDIR;
	}
	/*
	 * We do special processing on the last component, whether or not it's
	 * a directory.  Cache all intervening lookups, but not the final one.
	 */
	if (*cp == '\0') {
		if (docache)
			cnp->cn_flags |= MAKEENTRY;
		else
			cnp->cn_flags &= ~MAKEENTRY;
		cnp->cn_flags |= ISLASTCN;
	} else {
		cnp->cn_flags |= MAKEENTRY;
		cnp->cn_flags &= ~ISLASTCN;
	}
	if (cnp->cn_namelen == 2 &&
	    cnp->cn_nameptr[1] == '.' && cnp->cn_nameptr[0] == '.')
		cnp->cn_flags |= ISDOTDOT;
	else
		cnp->cn_flags &= ~ISDOTDOT;

	/*
	 * Handle "..": two special cases.
	 * 1. If at root directory (e.g. after chroot)
	 *    or at absolute root directory
	 *    then ignore it so can't get out.
	 * 2. If this vnode is the root of a mounted
	 *    filesystem, then replace it with the
	 *    vnode which was mounted on so we take the
	 *    .. in the other file system.
	 */
	if (cnp->cn_flags & ISDOTDOT) {
		for (;;) {
			if (dp == ndp->ni_rootdir || dp == rootvnode) {
				ndp->ni_dvp = dp;
				ndp->ni_vp = dp;
				vref(dp);
				goto nextname;
			}
			if ((dp->v_flag & VROOT) == 0 ||
			    (cnp->cn_flags & NOCROSSMOUNT))
				break;
			tdp = dp;
			dp = dp->v_mount->mnt_vnodecovered;
			vput(tdp);
			vref(dp);
			vn_lock(dp, LK_EXCLUSIVE | LK_RETRY, p);
		}
	}

	/*
	 * We now have a segment name to search for, and a directory to search.
	 */
	ndp->ni_dvp = dp;
	ndp->ni_vp = NULL;
	cnp->cn_flags &= ~PDIRUNLOCK;

	if ((error = VOP_LOOKUP(dp, &ndp->ni_vp, cnp)) != 0) {
#ifdef DIAGNOSTIC
		if (ndp->ni_vp != NULL)
			panic("leaf should be empty");
#endif
#ifdef NAMEI_DIAGNOSTIC
		printf("not found\n");
#endif
		if (error != EJUSTRETURN)
			goto bad;
		/*
		 * If this was not the last component, or there were trailing
		 * slashes, then the name must exist.
		 */
		if (cnp->cn_flags & REQUIREDIR) {
			error = ENOENT;
			goto bad;
		}
		/*
		 * If creating and at end of pathname, then can consider
		 * allowing file to be created.
		 */
		if (rdonly || (ndp->ni_dvp->v_mount->mnt_flag & MNT_RDONLY)) {
			error = EROFS;
			goto bad;
		}
		/*
		 * We return with ni_vp NULL to indicate that the entry
		 * doesn't currently exist, leaving a pointer to the
		 * (possibly locked) directory inode in ndp->ni_dvp.
		 */
		if (cnp->cn_flags & SAVESTART) {
			ndp->ni_startdir = ndp->ni_dvp;
			vref(ndp->ni_startdir);
		}
		return (0);
	}
#ifdef NAMEI_DIAGNOSTIC
	printf("found\n");
#endif

	/*
	 * Take into account any additional components consumed by the
	 * underlying filesystem.  This will include any trailing slashes after
	 * the last component consumed.
	 */
	if (cnp->cn_consume > 0) {
		if (cnp->cn_consume >= slashes) {
			cnp->cn_flags &= ~REQUIREDIR;
		}

		ndp->ni_pathlen -= cnp->cn_consume - slashes;
		ndp->ni_next += cnp->cn_consume - slashes;
		cnp->cn_consume = 0;
		if (ndp->ni_next[0] == '\0')
			cnp->cn_flags |= ISLASTCN;
	}

	dp = ndp->ni_vp;
	/*
	 * Check to see if the vnode has been mounted on;
	 * if so find the root of the mounted file system.
	 */
	while (dp->v_type == VDIR && (mp = dp->v_mountedhere) &&
	    (cnp->cn_flags & NOCROSSMOUNT) == 0) {
		if (vfs_busy(mp, VB_READ|VB_WAIT))
			continue;
		VOP_UNLOCK(dp, 0, p);
		error = VFS_ROOT(mp, &tdp);
		vfs_unbusy(mp);
		if (error) {
			dpunlocked = 1;
			goto bad2;
		}
		vrele(dp);
		ndp->ni_vp = dp = tdp;
	}

	/*
	 * Check for symbolic link.  Back up over any slashes that we skipped,
	 * as we will need them again.
	 */
	if ((dp->v_type == VLNK) && (cnp->cn_flags & (FOLLOW|REQUIREDIR))) {
		ndp->ni_pathlen += slashes;
		ndp->ni_next -= slashes;
		cnp->cn_flags |= ISSYMLINK;
		return (0);
	}

	/*
	 * Check for directory, if the component was followed by a series of
	 * slashes.
	 */
	if ((dp->v_type != VDIR) && (cnp->cn_flags & REQUIREDIR)) {
		error = ENOTDIR;
		goto bad2;
	}

nextname:
	/*
	 * Not a symbolic link.  If this was not the last component, then
	 * continue at the next component, else return.
	 */
	if (!(cnp->cn_flags & ISLASTCN)) {
		cnp->cn_nameptr = ndp->ni_next;
		vrele(ndp->ni_dvp);
		goto dirloop;
	}

terminal:
	/*
	 * Check for read-only file systems.
	 */
	if (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME) {
		/*
		 * Disallow directory write attempts on read-only
		 * file systems.
		 */
		if (rdonly || (dp->v_mount->mnt_flag & MNT_RDONLY) ||
		    (wantparent &&
		    (ndp->ni_dvp->v_mount->mnt_flag & MNT_RDONLY))) {
			error = EROFS;
			goto bad2;
		}
	}
	if (ndp->ni_dvp != NULL) {
		if (cnp->cn_flags & SAVESTART) {
			ndp->ni_startdir = ndp->ni_dvp;
			vref(ndp->ni_startdir);
		}
		if (!wantparent)
			vrele(ndp->ni_dvp);
	}
	if ((cnp->cn_flags & LOCKLEAF) == 0)
		VOP_UNLOCK(dp, 0, p);
	return (0);

bad2:
	if ((cnp->cn_flags & LOCKPARENT) && (cnp->cn_flags & ISLASTCN) &&
	    ((cnp->cn_flags & PDIRUNLOCK) == 0))
		VOP_UNLOCK(ndp->ni_dvp, 0, p);
	vrele(ndp->ni_dvp);
bad:
	if (dpunlocked)
		vrele(dp);
	else
		vput(dp);
	ndp->ni_vp = NULL;
	return (error);
}
Exemple #22
0
static int
vfs_mountroot_shuffle(struct thread *td, struct mount *mpdevfs)
{
	struct nameidata nd;
	struct mount *mporoot, *mpnroot;
	struct vnode *vp, *vporoot, *vpdevfs;
	char *fspath;
	int error;

	mpnroot = TAILQ_NEXT(mpdevfs, mnt_list);

	/* Shuffle the mountlist. */
	mtx_lock(&mountlist_mtx);
	mporoot = TAILQ_FIRST(&mountlist);
	TAILQ_REMOVE(&mountlist, mpdevfs, mnt_list);
	if (mporoot != mpdevfs) {
		TAILQ_REMOVE(&mountlist, mpnroot, mnt_list);
		TAILQ_INSERT_HEAD(&mountlist, mpnroot, mnt_list);
	}
	TAILQ_INSERT_TAIL(&mountlist, mpdevfs, mnt_list);
	mtx_unlock(&mountlist_mtx);

	cache_purgevfs(mporoot);
	if (mporoot != mpdevfs)
		cache_purgevfs(mpdevfs);

	VFS_ROOT(mporoot, LK_EXCLUSIVE, &vporoot);

	VI_LOCK(vporoot);
	vporoot->v_iflag &= ~VI_MOUNT;
	VI_UNLOCK(vporoot);
	vporoot->v_mountedhere = NULL;
	mporoot->mnt_flag &= ~MNT_ROOTFS;
	mporoot->mnt_vnodecovered = NULL;
	vput(vporoot);

	/* Set up the new rootvnode, and purge the cache */
	mpnroot->mnt_vnodecovered = NULL;
	set_rootvnode();
	cache_purgevfs(rootvnode->v_mount);

	if (mporoot != mpdevfs) {
		/* Remount old root under /.mount or /mnt */
		fspath = "/.mount";
		NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE,
		    fspath, td);
		error = namei(&nd);
		if (error) {
			NDFREE(&nd, NDF_ONLY_PNBUF);
			fspath = "/mnt";
			NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE,
			    fspath, td);
			error = namei(&nd);
		}
		if (!error) {
			vp = nd.ni_vp;
			error = (vp->v_type == VDIR) ? 0 : ENOTDIR;
			if (!error)
				error = vinvalbuf(vp, V_SAVE, 0, 0);
			if (!error) {
				cache_purge(vp);
				mporoot->mnt_vnodecovered = vp;
				vp->v_mountedhere = mporoot;
				strlcpy(mporoot->mnt_stat.f_mntonname,
				    fspath, MNAMELEN);
				VOP_UNLOCK(vp, 0);
			} else
				vput(vp);
		}
		NDFREE(&nd, NDF_ONLY_PNBUF);

		if (error && bootverbose)
			printf("mountroot: unable to remount previous root "
			    "under /.mount or /mnt (error %d).\n", error);
	}

	/* Remount devfs under /dev */
	NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, "/dev", td);
	error = namei(&nd);
	if (!error) {
		vp = nd.ni_vp;
		error = (vp->v_type == VDIR) ? 0 : ENOTDIR;
		if (!error)
			error = vinvalbuf(vp, V_SAVE, 0, 0);
		if (!error) {
			vpdevfs = mpdevfs->mnt_vnodecovered;
			if (vpdevfs != NULL) {
				cache_purge(vpdevfs);
				vpdevfs->v_mountedhere = NULL;
				vrele(vpdevfs);
			}
			mpdevfs->mnt_vnodecovered = vp;
			vp->v_mountedhere = mpdevfs;
			VOP_UNLOCK(vp, 0);
		} else
			vput(vp);
	}
	if (error && bootverbose)
		printf("mountroot: unable to remount devfs under /dev "
		    "(error %d).\n", error);
	NDFREE(&nd, NDF_ONLY_PNBUF);

	if (mporoot == mpdevfs) {
		vfs_unbusy(mpdevfs);
		/* Unlink the no longer needed /dev/dev -> / symlink */
		error = kern_unlink(td, "/dev/dev", UIO_SYSSPACE);
		if (error && bootverbose)
			printf("mountroot: unable to unlink /dev/dev "
			    "(error %d)\n", error);
	}

	return (0);
}
Exemple #23
0
/*
 * Triggers the mount if needed. If the mount has been triggered by
 * another thread, it will wait for its return status, and return it.
 * Whether the mount is triggered by this thread, another thread, or
 * if the vnode was already covered, '*newvp' is a
 * VN_HELD vnode pointing to the root of the filesystem covering 'vp'.
 * If the node is not mounted on, and should not be mounted on, '*newvp'
 * will be NULL.
 * The calling routine may use '*newvp' to do the filesystem jump.
 */
static int
auto_trigger_mount(vnode_t *vp, cred_t *cred, vnode_t **newvp)
{
	fnnode_t *fnp = vntofn(vp);
	fninfo_t *fnip = vfstofni(vp->v_vfsp);
	vnode_t *dvp;
	vfs_t *vfsp;
	int delayed_ind;
	char name[AUTOFS_MAXPATHLEN];
	int error;

	AUTOFS_DPRINT((4, "auto_trigger_mount: vp=%p\n", (void *)vp));

	*newvp = NULL;

	/*
	 * Cross-zone mount triggering is disallowed.
	 */
	if (fnip->fi_zoneid != getzoneid())
		return (EPERM);	/* Not owner of mount */

retry:
	error = 0;
	delayed_ind = 0;
	mutex_enter(&fnp->fn_lock);
	while (fnp->fn_flags & (MF_LOOKUP | MF_INPROG)) {
		/*
		 * Mount or lookup in progress,
		 * wait for it before proceeding.
		 */
		mutex_exit(&fnp->fn_lock);
		error = auto_wait4mount(fnp);
		if (error == AUTOFS_SHUTDOWN) {
			error = 0;
			goto done;
		}
		if (error && error != EAGAIN)
			goto done;
		error = 0;
		mutex_enter(&fnp->fn_lock);
	}

	/*
	 * If the vfslock can't be acquired for the first time.
	 * drop the fn_lock and retry next time in blocking mode.
	 */
	if (vn_vfswlock(vp)) {
		/*
		 * Lock held by another thread.
		 * Perform blocking by dropping the
		 * fn_lock.
		 */
		mutex_exit(&fnp->fn_lock);
		error = vn_vfswlock_wait(vp);
		if (error)
			goto done;
		/*
		 * Because fn_lock wasn't held, the state
		 * of the trigger node might have changed.
		 * Need to run through the checks on trigger
		 * node again.
		 */
		vn_vfsunlock(vp);
		goto retry;
	}

	vfsp = vn_mountedvfs(vp);
	if (vfsp != NULL) {
		mutex_exit(&fnp->fn_lock);
		error = VFS_ROOT(vfsp, newvp);
		vn_vfsunlock(vp);
		goto done;
	} else {
		vn_vfsunlock(vp);
		if ((fnp->fn_flags & MF_MOUNTPOINT) &&
		    fnp->fn_trigger != NULL) {
			ASSERT(fnp->fn_dirents == NULL);
			mutex_exit(&fnp->fn_lock);
			/*
			 * The filesystem that used to sit here has been
			 * forcibly unmounted. Do our best to recover.
			 * Try to unmount autofs subtree below this node
			 * and retry the action.
			 */
			if (unmount_subtree(fnp, B_TRUE) != 0) {
				error = EIO;
				goto done;
			}
			goto retry;
		}
	}

	ASSERT(vp->v_type == VDIR);
	dvp = fntovn(fnp->fn_parent);

	if ((fnp->fn_dirents == NULL) &&
	    ((fnip->fi_flags & MF_DIRECT) == 0) &&
	    ((vp->v_flag & VROOT) == 0) &&
	    (dvp->v_flag & VROOT)) {
		/*
		 * If the parent of this node is the root of an indirect
		 * AUTOFS filesystem, this node is remountable.
		 */
		delayed_ind = 1;
	}

	if (delayed_ind ||
	    ((fnip->fi_flags & MF_DIRECT) && (fnp->fn_dirents == NULL))) {
		/*
		 * Trigger mount since:
		 * direct mountpoint with no subdirs or
		 * delayed indirect.
		 */
		AUTOFS_BLOCK_OTHERS(fnp, MF_INPROG);
		fnp->fn_error = 0;
		mutex_exit(&fnp->fn_lock);
		if (delayed_ind)
			(void) strcpy(name, fnp->fn_name);
		else
			(void) strcpy(name, ".");
		fnp->fn_ref_time = gethrestime_sec();
		auto_new_mount_thread(fnp, name, cred);
		/*
		 * At this point we're simply another thread waiting
		 * for the mount to finish.
		 */
		error = auto_wait4mount(fnp);
		if (error == EAGAIN)
			goto retry;
		if (error == AUTOFS_SHUTDOWN) {
			error = 0;
			goto done;
		}
		if (error == 0) {
			if (error = vn_vfsrlock_wait(vp))
				goto done;
			/* Reacquire after dropping locks */
			vfsp = vn_mountedvfs(vp);
			if (vfsp != NULL) {
				error = VFS_ROOT(vfsp, newvp);
				vn_vfsunlock(vp);
			} else {
				vn_vfsunlock(vp);
				goto retry;
			}
		}
	} else
		mutex_exit(&fnp->fn_lock);

done:
	AUTOFS_DPRINT((5, "auto_trigger_mount: error=%d\n", error));
	return (error);
}
Exemple #24
0
int
adosfs_mountfs(struct vnode *devvp, struct mount *mp, struct lwp *l)
{
	struct disklabel dl;
	struct partition *parp;
	struct adosfsmount *amp;
	struct buf *bp;
	struct vnode *rvp;
	size_t bitmap_sz = 0;
	int error, i;
	uint64_t numsecs;
	unsigned secsize;
	unsigned long secsperblk, blksperdisk, resvblks;

	amp = NULL;

	if ((error = vinvalbuf(devvp, V_SAVE, l->l_cred, l, 0, 0)) != 0)
		return (error);

	/*
	 * open blkdev and read boot and root block
	 */
	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
	if ((error = VOP_OPEN(devvp, FREAD, NOCRED)) != 0) {
		VOP_UNLOCK(devvp);
		return (error);
	}

	error = getdisksize(devvp, &numsecs, &secsize);
	if (error)
		goto fail;

	amp = kmem_zalloc(sizeof(struct adosfsmount), KM_SLEEP);

	/*
	 * compute filesystem parameters from disklabel
	 * on arch/amiga the disklabel is computed from the native
	 * partition tables
	 * - p_fsize is the filesystem block size
	 * - p_frag is the number of sectors per filesystem block
	 * - p_cpg is the number of reserved blocks (boot blocks)
	 * - p_psize is reduced by the number of preallocated blocks
	 *           at the end of a partition
	 *
	 * XXX
	 * - bsize and secsperblk could be computed from the first sector
	 *   of the root block
	 * - resvblks (the number of boot blocks) can only be guessed
	 *   by scanning for the root block as its position moves
	 *   with resvblks
	 */
	error = VOP_IOCTL(devvp, DIOCGDINFO, &dl, FREAD, NOCRED);
	VOP_UNLOCK(devvp);
	if (error)
		goto fail;
	parp = &dl.d_partitions[DISKPART(devvp->v_rdev)];
	if (dl.d_type == DTYPE_FLOPPY) {
		amp->bsize = secsize;
		secsperblk = 1;
		resvblks   = 2;
	} else if (parp->p_fsize > 0 && parp->p_frag > 0) {
		amp->bsize = parp->p_fsize * parp->p_frag;
		secsperblk = parp->p_frag;
		resvblks   = parp->p_cpg;
	} else {
		error = EINVAL;
		goto fail;
	}
	blksperdisk = numsecs / secsperblk;


	/* The filesytem variant ('dostype') is stored in the boot block */
	bp = NULL;
	if ((error = bread(devvp, (daddr_t)BBOFF,
			   amp->bsize, NOCRED, 0, &bp)) != 0) {
		goto fail;
	}
	amp->dostype = adoswordn(bp, 0);
	brelse(bp, 0);

	/* basic sanity checks */
	if (amp->dostype < 0x444f5300 || amp->dostype > 0x444f5305) {
		error = EINVAL;
		goto fail;
	}

	amp->rootb = (blksperdisk - 1 + resvblks) / 2;
	amp->numblks = blksperdisk - resvblks;

	amp->nwords = amp->bsize >> 2;
	amp->dbsize = amp->bsize - (IS_FFS(amp) ? 0 : OFS_DATA_OFFSET);
	amp->devvp = devvp;

	amp->mp = mp;
	mp->mnt_data = amp;
	mp->mnt_stat.f_fsidx.__fsid_val[0] = (long)devvp->v_rdev;
	mp->mnt_stat.f_fsidx.__fsid_val[1] = makefstype(MOUNT_ADOSFS);
	mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
	mp->mnt_stat.f_namemax = ADMAXNAMELEN;
	mp->mnt_fs_bshift = ffs(amp->bsize) - 1;
	mp->mnt_dev_bshift = DEV_BSHIFT;
	mp->mnt_flag |= MNT_LOCAL;

	/*
	 * init anode table.
	 */
	for (i = 0; i < ANODEHASHSZ; i++)
		LIST_INIT(&amp->anodetab[i]);

	/*
	 * get the root anode, if not a valid fs this will fail.
	 */
	if ((error = VFS_ROOT(mp, &rvp)) != 0)
		goto fail;
	/* allocate and load bitmap, set free space */
	bitmap_sz = ((amp->numblks + 31) / 32) * sizeof(*amp->bitmap);
	amp->bitmap = kmem_alloc(bitmap_sz, KM_SLEEP);
	if (amp->bitmap)
		adosfs_loadbitmap(amp);
	if (mp->mnt_flag & MNT_RDONLY && amp->bitmap) {
		/*
		 * Don't need the bitmap any more if it's read-only.
		 */
		kmem_free(amp->bitmap, bitmap_sz);
		amp->bitmap = NULL;
	}
	vput(rvp);

	return(0);

fail:
	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
	(void) VOP_CLOSE(devvp, FREAD, NOCRED);
	VOP_UNLOCK(devvp);
	if (amp && amp->bitmap)
		kmem_free(amp->bitmap, bitmap_sz);
	if (amp)
		kmem_free(amp, sizeof(*amp));
	return (error);
}
Exemple #25
0
static int
auto_getattr(
	vnode_t *vp,
	vattr_t *vap,
	int flags,
	cred_t *cred,
	caller_context_t *ct)
{
	fnnode_t *fnp = vntofn(vp);
	vnode_t *newvp;
	vfs_t *vfsp;
	int error;

	AUTOFS_DPRINT((4, "auto_getattr vp %p\n", (void *)vp));

	if (flags & ATTR_TRIGGER) {
		/*
		 * Pre-trigger the mount
		 */
		error = auto_trigger_mount(vp, cred, &newvp);
		if (error)
			return (error);

		if (newvp == NULL)
			goto defattr;

		if (error = vn_vfsrlock_wait(vp)) {
			VN_RELE(newvp);
			return (error);
		}

		vfsp = newvp->v_vfsp;
		VN_RELE(newvp);
	} else {
		/*
		 * Recursive auto_getattr/mount; go to the vfsp == NULL
		 * case.
		 */
		if (vn_vfswlock_held(vp))
			goto defattr;

		if (error = vn_vfsrlock_wait(vp))
			return (error);

		vfsp = vn_mountedvfs(vp);
	}

	if (vfsp != NULL) {
		/*
		 * Node is mounted on.
		 */
		error = VFS_ROOT(vfsp, &newvp);
		vn_vfsunlock(vp);
		if (error)
			return (error);
		mutex_enter(&fnp->fn_lock);
		if (fnp->fn_seen == newvp && fnp->fn_thread == curthread) {
			/*
			 * Recursive auto_getattr(); just release newvp and drop
			 * into the vfsp == NULL case.
			 */
			mutex_exit(&fnp->fn_lock);
			VN_RELE(newvp);
		} else {
			while (fnp->fn_thread && fnp->fn_thread != curthread) {
				fnp->fn_flags |= MF_ATTR_WAIT;
				cv_wait(&fnp->fn_cv_mount, &fnp->fn_lock);
			}
			fnp->fn_thread = curthread;
			fnp->fn_seen = newvp;
			mutex_exit(&fnp->fn_lock);
			error = VOP_GETATTR(newvp, vap, flags, cred, ct);
			VN_RELE(newvp);
			mutex_enter(&fnp->fn_lock);
			fnp->fn_seen = 0;
			fnp->fn_thread = 0;
			if (fnp->fn_flags & MF_ATTR_WAIT) {
				fnp->fn_flags &= ~MF_ATTR_WAIT;
				cv_broadcast(&fnp->fn_cv_mount);
			}
			mutex_exit(&fnp->fn_lock);
			return (error);
		}
	} else {
		vn_vfsunlock(vp);
	}

defattr:
	ASSERT(vp->v_type == VDIR || vp->v_type == VLNK);
	vap->va_uid	= 0;
	vap->va_gid	= 0;
	vap->va_nlink	= fnp->fn_linkcnt;
	vap->va_nodeid	= (u_longlong_t)fnp->fn_nodeid;
	vap->va_size	= fnp->fn_size;
	vap->va_atime	= fnp->fn_atime;
	vap->va_mtime	= fnp->fn_mtime;
	vap->va_ctime	= fnp->fn_ctime;
	vap->va_type	= vp->v_type;
	vap->va_mode	= fnp->fn_mode;
	vap->va_fsid	= vp->v_vfsp->vfs_dev;
	vap->va_rdev	= 0;
	vap->va_blksize	= MAXBSIZE;
	vap->va_nblocks	= (fsblkcnt64_t)btod(vap->va_size);
	vap->va_seq	= 0;

	return (0);
}
Exemple #26
0
void
bsd_init(void)
{
	struct uthread *ut;
	unsigned int i;
	struct vfs_context context;
	kern_return_t	ret;
	struct ucred temp_cred;
	struct posix_cred temp_pcred;
#if NFSCLIENT || CONFIG_IMAGEBOOT
	boolean_t       netboot = FALSE;
#endif

#define bsd_init_kprintf(x...) /* kprintf("bsd_init: " x) */

	throttle_init();

	printf(copyright);
	
	bsd_init_kprintf("calling kmeminit\n");
	kmeminit();
	
	bsd_init_kprintf("calling parse_bsd_args\n");
	parse_bsd_args();

#if CONFIG_DEV_KMEM
	bsd_init_kprintf("calling dev_kmem_init\n");
	dev_kmem_init();
#endif

	/* Initialize kauth subsystem before instancing the first credential */
	bsd_init_kprintf("calling kauth_init\n");
	kauth_init();

	/* Initialize process and pgrp structures. */
	bsd_init_kprintf("calling procinit\n");
	procinit();

	/* Initialize the ttys (MUST be before kminit()/bsd_autoconf()!)*/
	tty_init();

	kernproc = &proc0;	/* implicitly bzero'ed */

	/* kernel_task->proc = kernproc; */
	set_bsdtask_info(kernel_task,(void *)kernproc);

	/* give kernproc a name */
	bsd_init_kprintf("calling process_name\n");
	process_name("kernel_task", kernproc);

	/* allocate proc lock group attribute and group */
	bsd_init_kprintf("calling lck_grp_attr_alloc_init\n");
	proc_lck_grp_attr= lck_grp_attr_alloc_init();

	proc_lck_grp = lck_grp_alloc_init("proc",  proc_lck_grp_attr);
#if CONFIG_FINE_LOCK_GROUPS
	proc_slock_grp = lck_grp_alloc_init("proc-slock",  proc_lck_grp_attr);
	proc_fdmlock_grp = lck_grp_alloc_init("proc-fdmlock",  proc_lck_grp_attr);
	proc_ucred_mlock_grp = lck_grp_alloc_init("proc-ucred-mlock",  proc_lck_grp_attr);
	proc_mlock_grp = lck_grp_alloc_init("proc-mlock",  proc_lck_grp_attr);
#endif
	/* Allocate proc lock attribute */
	proc_lck_attr = lck_attr_alloc_init();
#if 0
#if __PROC_INTERNAL_DEBUG
	lck_attr_setdebug(proc_lck_attr);
#endif
#endif

#if CONFIG_FINE_LOCK_GROUPS
	proc_list_mlock = lck_mtx_alloc_init(proc_mlock_grp, proc_lck_attr);
	proc_klist_mlock = lck_mtx_alloc_init(proc_mlock_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_mlock, proc_mlock_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_fdmlock, proc_fdmlock_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_ucred_mlock, proc_ucred_mlock_grp, proc_lck_attr);
	lck_spin_init(&kernproc->p_slock, proc_slock_grp, proc_lck_attr);
#else
	proc_list_mlock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr);
	proc_klist_mlock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_mlock, proc_lck_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_fdmlock, proc_lck_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_ucred_mlock, proc_lck_grp, proc_lck_attr);
	lck_spin_init(&kernproc->p_slock, proc_lck_grp, proc_lck_attr);
#endif

	assert(bsd_simul_execs != 0);
	execargs_cache_lock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr);
	execargs_cache_size = bsd_simul_execs;
	execargs_free_count = bsd_simul_execs;
	execargs_cache = (vm_offset_t *)kalloc(bsd_simul_execs * sizeof(vm_offset_t));
	bzero(execargs_cache, bsd_simul_execs * sizeof(vm_offset_t));
	
	if (current_task() != kernel_task)
		printf("bsd_init: We have a problem, "
				"current task is not kernel task\n");
	
	bsd_init_kprintf("calling get_bsdthread_info\n");
	ut = (uthread_t)get_bsdthread_info(current_thread());

#if CONFIG_MACF
	/*
	 * Initialize the MAC Framework
	 */
	mac_policy_initbsd();
	kernproc->p_mac_enforce = 0;

#if defined (__i386__) || defined (__x86_64__)
	/*
	 * We currently only support this on i386/x86_64, as that is the
	 * only lock code we have instrumented so far.
	 */
	check_policy_init(policy_check_flags);
#endif
#endif /* MAC */

	/* Initialize System Override call */
	init_system_override();
	
	/*
	 * Create process 0.
	 */
	proc_list_lock();
	LIST_INSERT_HEAD(&allproc, kernproc, p_list);
	kernproc->p_pgrp = &pgrp0;
	LIST_INSERT_HEAD(PGRPHASH(0), &pgrp0, pg_hash);
	LIST_INIT(&pgrp0.pg_members);
#ifdef CONFIG_FINE_LOCK_GROUPS
	lck_mtx_init(&pgrp0.pg_mlock, proc_mlock_grp, proc_lck_attr);
#else
	lck_mtx_init(&pgrp0.pg_mlock, proc_lck_grp, proc_lck_attr);
#endif
	/* There is no other bsd thread this point and is safe without pgrp lock */
	LIST_INSERT_HEAD(&pgrp0.pg_members, kernproc, p_pglist);
	kernproc->p_listflag |= P_LIST_INPGRP;
	kernproc->p_pgrpid = 0;
	kernproc->p_uniqueid = 0;

	pgrp0.pg_session = &session0;
	pgrp0.pg_membercnt = 1;

	session0.s_count = 1;
	session0.s_leader = kernproc;
	session0.s_listflags = 0;
#ifdef CONFIG_FINE_LOCK_GROUPS
	lck_mtx_init(&session0.s_mlock, proc_mlock_grp, proc_lck_attr);
#else
	lck_mtx_init(&session0.s_mlock, proc_lck_grp, proc_lck_attr);
#endif
	LIST_INSERT_HEAD(SESSHASH(0), &session0, s_hash);
	proc_list_unlock();

	kernproc->task = kernel_task;
	
	kernproc->p_stat = SRUN;
	kernproc->p_flag = P_SYSTEM;
	kernproc->p_lflag = 0;
	kernproc->p_ladvflag = 0;
	
#if DEVELOPMENT || DEBUG
	if (bootarg_disable_aslr)
		kernproc->p_flag |= P_DISABLE_ASLR;
#endif

	kernproc->p_nice = NZERO;
	kernproc->p_pptr = kernproc;

	TAILQ_INIT(&kernproc->p_uthlist);
	TAILQ_INSERT_TAIL(&kernproc->p_uthlist, ut, uu_list);
	
	kernproc->sigwait = FALSE;
	kernproc->sigwait_thread = THREAD_NULL;
	kernproc->exit_thread = THREAD_NULL;
	kernproc->p_csflags = CS_VALID;

	/*
	 * Create credential.  This also Initializes the audit information.
	 */
	bsd_init_kprintf("calling bzero\n");
	bzero(&temp_cred, sizeof(temp_cred));
	bzero(&temp_pcred, sizeof(temp_pcred));
	temp_pcred.cr_ngroups = 1;
	/* kern_proc, shouldn't call up to DS for group membership */
	temp_pcred.cr_flags = CRF_NOMEMBERD;
	temp_cred.cr_audit.as_aia_p = audit_default_aia_p;
	
	bsd_init_kprintf("calling kauth_cred_create\n");
	/*
	 * We have to label the temp cred before we create from it to
	 * properly set cr_ngroups, or the create will fail.
	 */
	posix_cred_label(&temp_cred, &temp_pcred);
	kernproc->p_ucred = kauth_cred_create(&temp_cred); 

	/* update cred on proc */
	PROC_UPDATE_CREDS_ONPROC(kernproc);

	/* give the (already exisiting) initial thread a reference on it */
	bsd_init_kprintf("calling kauth_cred_ref\n");
	kauth_cred_ref(kernproc->p_ucred);
	ut->uu_context.vc_ucred = kernproc->p_ucred;
	ut->uu_context.vc_thread = current_thread();

	TAILQ_INIT(&kernproc->p_aio_activeq);
	TAILQ_INIT(&kernproc->p_aio_doneq);
	kernproc->p_aio_total_count = 0;
	kernproc->p_aio_active_count = 0;

	bsd_init_kprintf("calling file_lock_init\n");
	file_lock_init();

#if CONFIG_MACF
	mac_cred_label_associate_kernel(kernproc->p_ucred);
#endif

	/* Create the file descriptor table. */
	kernproc->p_fd = &filedesc0;
	filedesc0.fd_cmask = cmask;
	filedesc0.fd_knlistsize = -1;
	filedesc0.fd_knlist = NULL;
	filedesc0.fd_knhash = NULL;
	filedesc0.fd_knhashmask = 0;

	/* Create the limits structures. */
	kernproc->p_limit = &limit0;
	for (i = 0; i < sizeof(kernproc->p_rlimit)/sizeof(kernproc->p_rlimit[0]); i++)
		limit0.pl_rlimit[i].rlim_cur = 
			limit0.pl_rlimit[i].rlim_max = RLIM_INFINITY;
	limit0.pl_rlimit[RLIMIT_NOFILE].rlim_cur = NOFILE;
	limit0.pl_rlimit[RLIMIT_NPROC].rlim_cur = maxprocperuid;
	limit0.pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc;
	limit0.pl_rlimit[RLIMIT_STACK] = vm_initial_limit_stack;
	limit0.pl_rlimit[RLIMIT_DATA] = vm_initial_limit_data;
	limit0.pl_rlimit[RLIMIT_CORE] = vm_initial_limit_core;
	limit0.pl_refcnt = 1;

	kernproc->p_stats = &pstats0;
	kernproc->p_sigacts = &sigacts0;

	/*
	 * Charge root for one process: launchd.
	 */
	bsd_init_kprintf("calling chgproccnt\n");
	(void)chgproccnt(0, 1);

	/*
	 *	Allocate a kernel submap for pageable memory
	 *	for temporary copying (execve()).
	 */
	{
		vm_offset_t	minimum;

		bsd_init_kprintf("calling kmem_suballoc\n");
		assert(bsd_pageable_map_size != 0);
		ret = kmem_suballoc(kernel_map,
				&minimum,
				(vm_size_t)bsd_pageable_map_size,
				TRUE,
				VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_BSD),
				&bsd_pageable_map);
		if (ret != KERN_SUCCESS) 
			panic("bsd_init: Failed to allocate bsd pageable map");
	}

	/*
	 * Initialize buffers and hash links for buffers
	 *
	 * SIDE EFFECT: Starts a thread for bcleanbuf_thread(), so must
	 *		happen after a credential has been associated with
	 *		the kernel task.
	 */
	bsd_init_kprintf("calling bsd_bufferinit\n");
	bsd_bufferinit();

	/* Initialize the execve() semaphore */
	bsd_init_kprintf("calling semaphore_create\n");

	if (ret != KERN_SUCCESS)
		panic("bsd_init: Failed to create execve semaphore");

	/*
	 * Initialize the calendar.
	 */
	bsd_init_kprintf("calling IOKitInitializeTime\n");
	IOKitInitializeTime();

	bsd_init_kprintf("calling ubc_init\n");
	ubc_init();

	/*
	 * Initialize device-switches.
	 */
	bsd_init_kprintf("calling devsw_init() \n");
	devsw_init();

	/* Initialize the file systems. */
	bsd_init_kprintf("calling vfsinit\n");
	vfsinit();

#if CONFIG_PROC_UUID_POLICY
	/* Initial proc_uuid_policy subsystem */
	bsd_init_kprintf("calling proc_uuid_policy_init()\n");
	proc_uuid_policy_init();
#endif

#if SOCKETS
	/* Initialize per-CPU cache allocator */
	mcache_init();

	/* Initialize mbuf's. */
	bsd_init_kprintf("calling mbinit\n");
	mbinit();
	net_str_id_init(); /* for mbuf tags */
#endif /* SOCKETS */

	/*
	 * Initializes security event auditing.
	 * XXX: Should/could this occur later?
	 */
#if CONFIG_AUDIT
	bsd_init_kprintf("calling audit_init\n");
 	audit_init();  
#endif

	/* Initialize kqueues */
	bsd_init_kprintf("calling knote_init\n");
	knote_init();

	/* Initialize for async IO */
	bsd_init_kprintf("calling aio_init\n");
	aio_init();

	/* Initialize pipes */
	bsd_init_kprintf("calling pipeinit\n");
	pipeinit();

	/* Initialize SysV shm subsystem locks; the subsystem proper is
	 * initialized through a sysctl.
	 */
#if SYSV_SHM
	bsd_init_kprintf("calling sysv_shm_lock_init\n");
	sysv_shm_lock_init();
#endif
#if SYSV_SEM
	bsd_init_kprintf("calling sysv_sem_lock_init\n");
	sysv_sem_lock_init();
#endif
#if SYSV_MSG
	bsd_init_kprintf("sysv_msg_lock_init\n");
	sysv_msg_lock_init();
#endif
	bsd_init_kprintf("calling pshm_lock_init\n");
	pshm_lock_init();
	bsd_init_kprintf("calling psem_lock_init\n");
	psem_lock_init();

	pthread_init();
	/* POSIX Shm and Sem */
	bsd_init_kprintf("calling pshm_cache_init\n");
	pshm_cache_init();
	bsd_init_kprintf("calling psem_cache_init\n");
	psem_cache_init();
	bsd_init_kprintf("calling time_zone_slock_init\n");
	time_zone_slock_init();
	bsd_init_kprintf("calling select_waitq_init\n");
	select_waitq_init();

	/*
	 * Initialize protocols.  Block reception of incoming packets
	 * until everything is ready.
	 */
	bsd_init_kprintf("calling sysctl_register_fixed\n");
	sysctl_register_fixed(); 
	bsd_init_kprintf("calling sysctl_mib_init\n");
	sysctl_mib_init();
#if NETWORKING
	bsd_init_kprintf("calling dlil_init\n");
	dlil_init();
	bsd_init_kprintf("calling proto_kpi_init\n");
	proto_kpi_init();
#endif /* NETWORKING */
#if SOCKETS
	bsd_init_kprintf("calling socketinit\n");
	socketinit();
	bsd_init_kprintf("calling domaininit\n");
	domaininit();
	iptap_init();
#if FLOW_DIVERT
	flow_divert_init();
#endif	/* FLOW_DIVERT */
#endif /* SOCKETS */

	kernproc->p_fd->fd_cdir = NULL;
	kernproc->p_fd->fd_rdir = NULL;

#if CONFIG_FREEZE
#ifndef CONFIG_MEMORYSTATUS
    #error "CONFIG_FREEZE defined without matching CONFIG_MEMORYSTATUS"
#endif
	/* Initialise background freezing */
	bsd_init_kprintf("calling memorystatus_freeze_init\n");
	memorystatus_freeze_init();
#endif

#if CONFIG_MEMORYSTATUS
	/* Initialize kernel memory status notifications */
	bsd_init_kprintf("calling memorystatus_init\n");
	memorystatus_init();
#endif /* CONFIG_MEMORYSTATUS */

	bsd_init_kprintf("calling macx_init\n");
	macx_init();

	bsd_init_kprintf("calling acct_init\n");
	acct_init();

#ifdef GPROF
	/* Initialize kernel profiling. */
	kmstartup();
#endif

	bsd_init_kprintf("calling bsd_autoconf\n");
	bsd_autoconf();

#if CONFIG_DTRACE
	dtrace_postinit();
#endif

	/*
	 * We attach the loopback interface *way* down here to ensure
	 * it happens after autoconf(), otherwise it becomes the
	 * "primary" interface.
	 */
#include <loop.h>
#if NLOOP > 0
	bsd_init_kprintf("calling loopattach\n");
	loopattach();			/* XXX */
#endif
#if NGIF
	/* Initialize gif interface (after lo0) */
	gif_init();
#endif

#if PFLOG
	/* Initialize packet filter log interface */
	pfloginit();
#endif /* PFLOG */

#if NETHER > 0
	/* Register the built-in dlil ethernet interface family */
	bsd_init_kprintf("calling ether_family_init\n");
	ether_family_init();
#endif /* ETHER */

#if NETWORKING
	/* Call any kext code that wants to run just after network init */
	bsd_init_kprintf("calling net_init_run\n");
	net_init_run();
	
#if CONTENT_FILTER
	cfil_init();
#endif

#if PACKET_MANGLER
	pkt_mnglr_init();
#endif	

#if NECP
	/* Initialize Network Extension Control Policies */
	necp_init();
#endif

	netagent_init();

	/* register user tunnel kernel control handler */
	utun_register_control();
#if IPSEC
	ipsec_register_control();
#endif /* IPSEC */
	netsrc_init();
	nstat_init();
	tcp_cc_init();
#if MPTCP
	mptcp_control_register();
#endif /* MPTCP */
#endif /* NETWORKING */

	bsd_init_kprintf("calling vnode_pager_bootstrap\n");
	vnode_pager_bootstrap();

	bsd_init_kprintf("calling inittodr\n");
	inittodr(0);

	/* Mount the root file system. */
	while( TRUE) {
		int err;

		bsd_init_kprintf("calling setconf\n");
		setconf();
#if NFSCLIENT
		netboot = (mountroot == netboot_mountroot);
#endif

		bsd_init_kprintf("vfs_mountroot\n");
		if (0 == (err = vfs_mountroot()))
			break;
		rootdevice[0] = '\0';
#if NFSCLIENT
		if (netboot) {
			PE_display_icon( 0, "noroot");  /* XXX a netboot-specific icon would be nicer */
			vc_progress_set(FALSE, 0);
			for (i=1; 1; i*=2) {
				printf("bsd_init: failed to mount network root, error %d, %s\n",
					err, PE_boot_args());
				printf("We are hanging here...\n");
				IOSleep(i*60*1000);
			}
			/*NOTREACHED*/
		}
#endif
		printf("cannot mount root, errno = %d\n", err);
		boothowto |= RB_ASKNAME;
	}

	IOSecureBSDRoot(rootdevice);

	context.vc_thread = current_thread();
	context.vc_ucred = kernproc->p_ucred;
	mountlist.tqh_first->mnt_flag |= MNT_ROOTFS;

	bsd_init_kprintf("calling VFS_ROOT\n");
	/* Get the vnode for '/'.  Set fdp->fd_fd.fd_cdir to reference it. */
	if (VFS_ROOT(mountlist.tqh_first, &rootvnode, &context))
		panic("bsd_init: cannot find root vnode: %s", PE_boot_args());
	rootvnode->v_flag |= VROOT;
	(void)vnode_ref(rootvnode);
	(void)vnode_put(rootvnode);
	filedesc0.fd_cdir = rootvnode;

#if NFSCLIENT
	if (netboot) {
		int err;

		netboot = TRUE;
		/* post mount setup */
		if ((err = netboot_setup()) != 0) {
			PE_display_icon( 0, "noroot");  /* XXX a netboot-specific icon would be nicer */
			vc_progress_set(FALSE, 0);
			for (i=1; 1; i*=2) {
				printf("bsd_init: NetBoot could not find root, error %d: %s\n",
					err, PE_boot_args());
				printf("We are hanging here...\n");
				IOSleep(i*60*1000);
			}
			/*NOTREACHED*/
		}
	}
#endif
	

#if CONFIG_IMAGEBOOT
	/*
	 * See if a system disk image is present. If so, mount it and
	 * switch the root vnode to point to it
	 */ 
	if (netboot == FALSE && imageboot_needed()) {
		/* 
		 * An image was found.  No turning back: we're booted
		 * with a kernel from the disk image.
		 */
		imageboot_setup(); 
	}
#endif /* CONFIG_IMAGEBOOT */
  
	/* set initial time; all other resource data is  already zero'ed */
	microtime_with_abstime(&kernproc->p_start, &kernproc->p_stats->ps_start);

#if DEVFS
	{
	    char mounthere[] = "/dev";	/* !const because of internal casting */

	    bsd_init_kprintf("calling devfs_kernel_mount\n");
	    devfs_kernel_mount(mounthere);
	}
#endif /* DEVFS */

	/* Initialize signal state for process 0. */
	bsd_init_kprintf("calling siginit\n");
	siginit(kernproc);

	bsd_init_kprintf("calling bsd_utaskbootstrap\n");
	bsd_utaskbootstrap();

#if defined(__LP64__)
	kernproc->p_flag |= P_LP64;
#endif

	pal_kernel_announce();

	bsd_init_kprintf("calling mountroot_post_hook\n");

	/* invoke post-root-mount hook */
	if (mountroot_post_hook != NULL)
		mountroot_post_hook();

#if 0 /* not yet */
	consider_zone_gc(FALSE);
#endif


	bsd_init_kprintf("done\n");
}
Exemple #27
0
/*
 * Search a pathname.
 * This is a very central and rather complicated routine.
 *
 * The pathname is pointed to by ni_ptr and is of length ni_pathlen.
 * The starting directory is taken from ni_startdir. The pathname is
 * descended until done, or a symbolic link is encountered. The variable
 * ni_more is clear if the path is completed; it is set to one if a
 * symbolic link needing interpretation is encountered.
 *
 * The flag argument is LOOKUP, CREATE, RENAME, or DELETE depending on
 * whether the name is to be looked up, created, renamed, or deleted.
 * When CREATE, RENAME, or DELETE is specified, information usable in
 * creating, renaming, or deleting a directory entry may be calculated.
 * If flag has LOCKPARENT or'ed into it, the parent directory is returned
 * locked. If flag has WANTPARENT or'ed into it, the parent directory is
 * returned unlocked. Otherwise the parent directory is not returned. If
 * the target of the pathname exists and LOCKLEAF is or'ed into the flag
 * the target is returned locked, otherwise it is returned unlocked.
 * When creating or renaming and LOCKPARENT is specified, the target may not
 * be ".".  When deleting and LOCKPARENT is specified, the target may be ".".
 * 
 * Overall outline of lookup:
 *
 * dirloop:
 *	identify next component of name at ndp->ni_ptr
 *	handle degenerate case where name is null string
 *	if .. and crossing mount points and on mounted filesys, find parent
 *	call VNOP_LOOKUP routine for next component name
 *	    directory vnode returned in ni_dvp, unlocked unless LOCKPARENT set
 *	    component vnode returned in ni_vp (if it exists), locked.
 *	if result vnode is mounted on and crossing mount points,
 *	    find mounted on vnode
 *	if more components of name, do next level at dirloop
 *	return the answer in ni_vp, locked if LOCKLEAF set
 *	    if LOCKPARENT set, return locked parent in ni_dvp
 *	    if WANTPARENT set, return unlocked parent in ni_dvp
 *
 * Returns:	0			Success
 *		ENOENT			No such file or directory
 *		EBADF			Bad file descriptor
 *		ENOTDIR			Not a directory
 *		EROFS			Read-only file system [CREATE]
 *		EISDIR			Is a directory [CREATE]
 *		cache_lookup_path:ERECYCLE  (vnode was recycled from underneath us, redrive lookup again)
 *		vnode_authorize:EROFS
 *		vnode_authorize:EACCES
 *		vnode_authorize:EPERM
 *		vnode_authorize:???
 *		VNOP_LOOKUP:ENOENT	No such file or directory
 *		VNOP_LOOKUP:EJUSTRETURN	Restart system call (INTERNAL)
 *		VNOP_LOOKUP:???
 *		VFS_ROOT:ENOTSUP
 *		VFS_ROOT:ENOENT
 *		VFS_ROOT:???
 */
int
lookup(struct nameidata *ndp)
{
	char	*cp;		/* pointer into pathname argument */
	vnode_t		tdp;		/* saved dp */
	vnode_t		dp;		/* the directory we are searching */
	int docache = 1;		/* == 0 do not cache last component */
	int wantparent;			/* 1 => wantparent or lockparent flag */
	int rdonly;			/* lookup read-only flag bit */
	int dp_authorized = 0;
	int error = 0;
	struct componentname *cnp = &ndp->ni_cnd;
	vfs_context_t ctx = cnp->cn_context;
	int vbusyflags = 0;
	int nc_generation = 0;
	vnode_t last_dp = NULLVP;
	int keep_going;
	int atroot;

	/*
	 * Setup: break out flag bits into variables.
	 */
	if (cnp->cn_flags & (NOCACHE | DOWHITEOUT)) {
	        if ((cnp->cn_flags & NOCACHE) || (cnp->cn_nameiop == DELETE))
		        docache = 0;
	}
	wantparent = cnp->cn_flags & (LOCKPARENT | WANTPARENT);
	rdonly = cnp->cn_flags & RDONLY;
	cnp->cn_flags &= ~ISSYMLINK;
	cnp->cn_consume = 0;

	dp = ndp->ni_startdir;
	ndp->ni_startdir = NULLVP;

	if ((cnp->cn_flags & CN_NBMOUNTLOOK) != 0)
			vbusyflags = LK_NOWAIT;
	cp = cnp->cn_nameptr;

	if (*cp == '\0') {
	        if ( (vnode_getwithref(dp)) ) {
			dp = NULLVP;
		        error = ENOENT;
			goto bad;
		}
		ndp->ni_vp = dp;
		error = lookup_handle_emptyname(ndp, cnp, wantparent);
		if (error) {
			goto bad;
		}

		return 0;
	}
dirloop: 
	atroot = 0;
	ndp->ni_vp = NULLVP;

	if ( (error = cache_lookup_path(ndp, cnp, dp, ctx, &dp_authorized, last_dp)) ) {
		dp = NULLVP;
		goto bad;
	}
	if ((cnp->cn_flags & ISLASTCN)) {
	        if (docache)
		        cnp->cn_flags |= MAKEENTRY;
	} else
	        cnp->cn_flags |= MAKEENTRY;

	dp = ndp->ni_dvp;

	if (ndp->ni_vp != NULLVP) {
	        /*
		 * cache_lookup_path returned a non-NULL ni_vp then,
		 * we're guaranteed that the dp is a VDIR, it's 
		 * been authorized, and vp is not ".."
		 *
		 * make sure we don't try to enter the name back into
		 * the cache if this vp is purged before we get to that
		 * check since we won't have serialized behind whatever
		 * activity is occurring in the FS that caused the purge
		 */
	        if (dp != NULLVP)
		        nc_generation = dp->v_nc_generation - 1;

	        goto returned_from_lookup_path;
	}

	/*
	 * Handle "..": two special cases.
	 * 1. If at root directory (e.g. after chroot)
	 *    or at absolute root directory
	 *    then ignore it so can't get out.
	 * 2. If this vnode is the root of a mounted
	 *    filesystem, then replace it with the
	 *    vnode which was mounted on so we take the
	 *    .. in the other file system.
	 */
	if ( (cnp->cn_flags & ISDOTDOT) ) {
		for (;;) {
		        if (dp == ndp->ni_rootdir || dp == rootvnode) {
			        ndp->ni_dvp = dp;
				ndp->ni_vp = dp;
				/*
				 * we're pinned at the root
				 * we've already got one reference on 'dp'
				 * courtesy of cache_lookup_path... take
				 * another one for the ".."
				 * if we fail to get the new reference, we'll
				 * drop our original down in 'bad'
				 */
				if ( (vnode_get(dp)) ) {
					error = ENOENT;
					goto bad;
				}
				atroot = 1;
				goto returned_from_lookup_path;
			}
			if ((dp->v_flag & VROOT) == 0 ||
			    (cnp->cn_flags & NOCROSSMOUNT))
			        break;
			if (dp->v_mount == NULL) {	/* forced umount */
			        error = EBADF;
				goto bad;
			}
			tdp = dp;
			dp = tdp->v_mount->mnt_vnodecovered;

			vnode_put(tdp);

			if ( (vnode_getwithref(dp)) ) {
			        dp = NULLVP;
				error = ENOENT;
				goto bad;
			}
			ndp->ni_dvp = dp;
			dp_authorized = 0;
		}
	}

	/*
	 * We now have a segment name to search for, and a directory to search.
	 */
unionlookup:
	ndp->ni_vp = NULLVP;

	if (dp->v_type != VDIR) {
	        error = ENOTDIR;
	        goto lookup_error;
	}
	if ( (cnp->cn_flags & DONOTAUTH) != DONOTAUTH ) {
		error = lookup_authorize_search(dp, cnp, dp_authorized, ctx);
		if (error) {
			goto lookup_error;
		}
	}

	/*
	 * Now that we've authorized a lookup, can bail out if the filesystem
	 * will be doing a batched operation.  Return an iocount on dvp.
	 */
#if NAMEDRSRCFORK
	if ((cnp->cn_flags & ISLASTCN) && namei_compound_available(dp, ndp) && !(cnp->cn_flags & CN_WANTSRSRCFORK)) {
#else 
	if ((cnp->cn_flags & ISLASTCN) && namei_compound_available(dp, ndp)) {
#endif /* NAMEDRSRCFORK */
		ndp->ni_flag |= NAMEI_UNFINISHED;
		ndp->ni_ncgeneration = dp->v_nc_generation;
		return 0;
	}

        nc_generation = dp->v_nc_generation;

	error = VNOP_LOOKUP(dp, &ndp->ni_vp, cnp, ctx);


	if ( error ) {
lookup_error:
		if ((error == ENOENT) &&
		    (dp->v_flag & VROOT) && (dp->v_mount != NULL) &&
		    (dp->v_mount->mnt_flag & MNT_UNION)) {
#if CONFIG_VFS_FUNNEL
		        if ((cnp->cn_flags & FSNODELOCKHELD)) {
			        cnp->cn_flags &= ~FSNODELOCKHELD;
				unlock_fsnode(dp, NULL);
			}	
#endif /* CONFIG_VFS_FUNNEL */
			tdp = dp;
			dp = tdp->v_mount->mnt_vnodecovered;

			vnode_put(tdp);

			if ( (vnode_getwithref(dp)) ) {
			        dp = NULLVP;
				error = ENOENT;
				goto bad;
			}
			ndp->ni_dvp = dp;
			dp_authorized = 0;
			goto unionlookup;
		}

		if (error != EJUSTRETURN)
			goto bad;

		if (ndp->ni_vp != NULLVP)
			panic("leaf should be empty");

		error = lookup_validate_creation_path(ndp);
		if (error)
			goto bad;
		/*
		 * We return with ni_vp NULL to indicate that the entry
		 * doesn't currently exist, leaving a pointer to the
		 * referenced directory vnode in ndp->ni_dvp.
		 */
		if (cnp->cn_flags & SAVESTART) {
			if ( (vnode_get(ndp->ni_dvp)) ) {
				error = ENOENT;
				goto bad;
			}
			ndp->ni_startdir = ndp->ni_dvp;
		}
		if (!wantparent)
		        vnode_put(ndp->ni_dvp);

		if (kdebug_enable)
		        kdebug_lookup(ndp->ni_dvp, cnp);
		return (0);
	}
returned_from_lookup_path:
	/* We'll always have an iocount on ni_vp when this finishes. */
	error = lookup_handle_found_vnode(ndp, cnp, rdonly, vbusyflags, &keep_going, nc_generation, wantparent, atroot, ctx);
	if (error != 0) {
		goto bad2; 
	}

	if (keep_going) {
		dp = ndp->ni_vp;

		/* namei() will handle symlinks */
		if ((dp->v_type == VLNK) &&
				((cnp->cn_flags & FOLLOW) || (ndp->ni_flag & NAMEI_TRAILINGSLASH) || *ndp->ni_next == '/')) {
			return 0; 
		}

		/*
		 * Otherwise, there's more path to process.  
		 * cache_lookup_path is now responsible for dropping io ref on dp
		 * when it is called again in the dirloop.  This ensures we hold
		 * a ref on dp until we complete the next round of lookup.
		 */
		last_dp = dp;

		goto dirloop;
	}

	return (0);
bad2:
#if CONFIG_VFS_FUNNEL
	if ((cnp->cn_flags & FSNODELOCKHELD)) {
	        cnp->cn_flags &= ~FSNODELOCKHELD;
		unlock_fsnode(ndp->ni_dvp, NULL);
	}
#endif /* CONFIG_VFS_FUNNEL */
	if (ndp->ni_dvp)
		vnode_put(ndp->ni_dvp);

	vnode_put(ndp->ni_vp);
	ndp->ni_vp = NULLVP;

	if (kdebug_enable)
	        kdebug_lookup(dp, cnp);
	return (error);

bad:
#if CONFIG_VFS_FUNNEL
	if ((cnp->cn_flags & FSNODELOCKHELD)) {
	        cnp->cn_flags &= ~FSNODELOCKHELD;
		unlock_fsnode(ndp->ni_dvp, NULL);
	}	
#endif /* CONFIG_VFS_FUNNEL */
	if (dp)
	        vnode_put(dp);
	ndp->ni_vp = NULLVP;

	if (kdebug_enable)
	        kdebug_lookup(dp, cnp);
	return (error);
}

int 
lookup_validate_creation_path(struct nameidata *ndp)
{
	struct componentname *cnp = &ndp->ni_cnd;

	/*
	 * If creating and at end of pathname, then can consider
	 * allowing file to be created.
	 */
	if (cnp->cn_flags & RDONLY) {
		return EROFS;
	}
	if ((cnp->cn_flags & ISLASTCN) && (ndp->ni_flag & NAMEI_TRAILINGSLASH) && !(cnp->cn_flags & WILLBEDIR)) {
		return ENOENT;
	}
	
	return 0;
}

/*
 * Modifies only ni_vp.  Always returns with ni_vp still valid (iocount held).
 */
int
lookup_traverse_mountpoints(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, 
		int vbusyflags, vfs_context_t ctx)
{
	mount_t mp;
	vnode_t tdp;
	int error = 0;
	uthread_t uth;
	uint32_t depth = 0;
	int dont_cache_mp = 0;
	vnode_t	mounted_on_dp;
	int current_mount_generation = 0;
	
	mounted_on_dp = dp;
	current_mount_generation = mount_generation;

	while ((dp->v_type == VDIR) && dp->v_mountedhere &&
			((cnp->cn_flags & NOCROSSMOUNT) == 0)) {
#if CONFIG_TRIGGERS
		/*
		 * For a trigger vnode, call its resolver when crossing its mount (if requested)
		 */
		if (dp->v_resolve) {
			(void) vnode_trigger_resolve(dp, ndp, ctx);
		}
#endif
		vnode_lock(dp);

		if ((dp->v_type == VDIR) && (mp = dp->v_mountedhere)) {

			mp->mnt_crossref++;
			vnode_unlock(dp);


			if (vfs_busy(mp, vbusyflags)) {
				mount_dropcrossref(mp, dp, 0);
				if (vbusyflags == LK_NOWAIT) {
					error = ENOENT;
					goto out;
				}

				continue;
			}


			/*
			 * XXX - if this is the last component of the
			 * pathname, and it's either not a lookup operation
			 * or the NOTRIGGER flag is set for the operation,
			 * set a uthread flag to let VFS_ROOT() for autofs
			 * know it shouldn't trigger a mount.
			 */
			uth = (struct uthread *)get_bsdthread_info(current_thread());
			if ((cnp->cn_flags & ISLASTCN) &&
					(cnp->cn_nameiop != LOOKUP ||
					 (cnp->cn_flags & NOTRIGGER))) {
				uth->uu_notrigger = 1;
				dont_cache_mp = 1;
			}

			error = VFS_ROOT(mp, &tdp, ctx);
			/* XXX - clear the uthread flag */
			uth->uu_notrigger = 0;

			mount_dropcrossref(mp, dp, 0);
			vfs_unbusy(mp);

			if (error) {
				goto out;
			}

			vnode_put(dp);
			ndp->ni_vp = dp = tdp;
			depth++;

#if CONFIG_TRIGGERS
			/*
			 * Check if root dir is a trigger vnode
			 */
			if (dp->v_resolve) {
				error = vnode_trigger_resolve(dp, ndp, ctx);
				if (error) {
					goto out;
				}
			}
#endif			

		} else { 
			vnode_unlock(dp);
			break;
		}
	}

	if (depth && !dont_cache_mp) {
	        mp = mounted_on_dp->v_mountedhere;

		if (mp) {
		        mount_lock_spin(mp);
			mp->mnt_realrootvp_vid = dp->v_id;
			mp->mnt_realrootvp = dp;
			mp->mnt_generation = current_mount_generation;
			mount_unlock(mp);
		}
	}

	return 0;

out:
	return error;
}
Exemple #28
0
static int
ufs_extattr_autostart_locked(struct mount *mp, struct thread *td)
{
	struct vnode *rvp, *attr_dvp, *attr_system_dvp, *attr_user_dvp;
	struct ufsmount *ump = VFSTOUFS(mp);
	int error;

	/*
	 * UFS_EXTATTR applies only to UFS1, as UFS2 uses native extended
	 * attributes, so don't autostart.
	 */
	if (ump->um_fstype != UFS1)
		return (0);

	/*
	 * Does UFS_EXTATTR_FSROOTSUBDIR exist off the filesystem root?
	 * If so, automatically start EA's.
	 */
	error = VFS_ROOT(mp, LK_EXCLUSIVE, &rvp);
	if (error) {
		printf("ufs_extattr_autostart.VFS_ROOT() returned %d\n",
		    error);
		return (error);
	}

	error = ufs_extattr_lookup(rvp, UE_GETDIR_LOCKPARENT_DONT,
	    UFS_EXTATTR_FSROOTSUBDIR, &attr_dvp, td);
	if (error) {
		/* rvp ref'd but now unlocked */
		vrele(rvp);
		return (error);
	}
	if (rvp == attr_dvp) {
		/* Should never happen. */
		vput(rvp);
		vrele(attr_dvp);
		return (EINVAL);
	}
	vrele(rvp);

	if (attr_dvp->v_type != VDIR) {
		printf("ufs_extattr_autostart: %s != VDIR\n",
		    UFS_EXTATTR_FSROOTSUBDIR);
		goto return_vput_attr_dvp;
	}

	error = ufs_extattr_start_locked(ump, td);
	if (error) {
		printf("ufs_extattr_autostart: ufs_extattr_start failed (%d)\n",
		    error);
		goto return_vput_attr_dvp;
	}

	/*
	 * Look for two subdirectories: UFS_EXTATTR_SUBDIR_SYSTEM,
	 * UFS_EXTATTR_SUBDIR_USER.  For each, iterate over the sub-directory,
	 * and start with appropriate type.  Failures in either don't
	 * result in an over-all failure.  attr_dvp is left locked to
	 * be cleaned up on exit.
	 */
	error = ufs_extattr_lookup(attr_dvp, UE_GETDIR_LOCKPARENT,
	    UFS_EXTATTR_SUBDIR_SYSTEM, &attr_system_dvp, td);
	if (!error) {
		error = ufs_extattr_iterate_directory(VFSTOUFS(mp),
		    attr_system_dvp, EXTATTR_NAMESPACE_SYSTEM, td);
		if (error)
			printf("ufs_extattr_iterate_directory returned %d\n",
			    error);
		vput(attr_system_dvp);
	}

	error = ufs_extattr_lookup(attr_dvp, UE_GETDIR_LOCKPARENT,
	    UFS_EXTATTR_SUBDIR_USER, &attr_user_dvp, td);
	if (!error) {
		error = ufs_extattr_iterate_directory(VFSTOUFS(mp),
		    attr_user_dvp, EXTATTR_NAMESPACE_USER, td);
		if (error)
			printf("ufs_extattr_iterate_directory returned %d\n",
			    error);
		vput(attr_user_dvp);
	}

	/* Mask startup failures in sub-directories. */
	error = 0;

return_vput_attr_dvp:
	vput(attr_dvp);

	return (error);
}
Exemple #29
0
/*
 * Set the publicly exported filesystem (WebNFS). Currently, only
 * one public filesystem is possible in the spec (RFC 2054 and 2055)
 */
int
vfs_setpublicfs(struct mount *mp, struct netexport *nep,
    struct export_args *argp)
{
	int error;
	struct vnode *rvp;
	char *cp;

	/*
	 * mp == NULL -> invalidate the current info, the FS is
	 * no longer exported. May be called from either vfs_export
	 * or unmount, so check if it hasn't already been done.
	 */
	if (mp == NULL) {
		if (nfs_pub.np_valid) {
			nfs_pub.np_valid = 0;
			if (nfs_pub.np_index != NULL) {
				free(nfs_pub.np_index, M_TEMP);
				nfs_pub.np_index = NULL;
			}
		}
		return (0);
	}

	/*
	 * Only one allowed at a time.
	 */
	if (nfs_pub.np_valid != 0 && mp != nfs_pub.np_mount)
		return (EBUSY);

	/*
	 * Get real filehandle for root of exported FS.
	 */
	bzero(&nfs_pub.np_handle, sizeof(nfs_pub.np_handle));
	nfs_pub.np_handle.fh_fsid = mp->mnt_stat.f_fsid;

	if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rvp)))
		return (error);

	if ((error = VOP_VPTOFH(rvp, &nfs_pub.np_handle.fh_fid)))
		return (error);

	vput(rvp);

	/*
	 * If an indexfile was specified, pull it in.
	 */
	if (argp->ex_indexfile != NULL) {
		if (nfs_pub.np_index != NULL)
			nfs_pub.np_index = malloc(MAXNAMLEN + 1, M_TEMP,
			    M_WAITOK);
		error = copyinstr(argp->ex_indexfile, nfs_pub.np_index,
		    MAXNAMLEN, (size_t *)0);
		if (!error) {
			/*
			 * Check for illegal filenames.
			 */
			for (cp = nfs_pub.np_index; *cp; cp++) {
				if (*cp == '/') {
					error = EINVAL;
					break;
				}
			}
		}
		if (error) {
			free(nfs_pub.np_index, M_TEMP);
			nfs_pub.np_index = NULL;
			return (error);
		}
	}

	nfs_pub.np_mount = mp;
	nfs_pub.np_valid = 1;
	return (0);
}
Exemple #30
0
static int
auto_lookup(
	vnode_t *dvp,
	char *nm,
	vnode_t **vpp,
	pathname_t *pnp,
	int flags,
	vnode_t *rdir,
	cred_t *cred,
	caller_context_t *ct,
	int *direntflags,
	pathname_t *realpnp)
{
	int error = 0;
	vnode_t *newvp = NULL;
	vfs_t *vfsp;
	fninfo_t *dfnip;
	fnnode_t *dfnp = NULL;
	fnnode_t *fnp = NULL;
	char *searchnm;
	int operation;		/* either AUTOFS_LOOKUP or AUTOFS_MOUNT */

	dfnip = vfstofni(dvp->v_vfsp);
	AUTOFS_DPRINT((3, "auto_lookup: dvp=%p (%s) name=%s\n",
	    (void *)dvp, dfnip->fi_map, nm));

	if (nm[0] == 0) {
		VN_HOLD(dvp);
		*vpp = dvp;
		return (0);
	}

	if (error = VOP_ACCESS(dvp, VEXEC, 0, cred, ct))
		return (error);

	if (nm[0] == '.' && nm[1] == 0) {
		VN_HOLD(dvp);
		*vpp = dvp;
		return (0);
	}

	if (nm[0] == '.' && nm[1] == '.' && nm[2] == 0) {
		fnnode_t *pdfnp;

		pdfnp = (vntofn(dvp))->fn_parent;
		ASSERT(pdfnp != NULL);

		/*
		 * Since it is legitimate to have the VROOT flag set for the
		 * subdirectories of the indirect map in autofs filesystem,
		 * rootfnnodep is checked against fnnode of dvp instead of
		 * just checking whether VROOT flag is set in dvp
		 */

		if (pdfnp == pdfnp->fn_globals->fng_rootfnnodep) {
			vnode_t *vp;

			vfs_rlock_wait(dvp->v_vfsp);
			if (dvp->v_vfsp->vfs_flag & VFS_UNMOUNTED) {
				vfs_unlock(dvp->v_vfsp);
				return (EIO);
			}
			vp = dvp->v_vfsp->vfs_vnodecovered;
			VN_HOLD(vp);
			vfs_unlock(dvp->v_vfsp);
			error = VOP_LOOKUP(vp, nm, vpp, pnp, flags, rdir, cred,
			    ct, direntflags, realpnp);
			VN_RELE(vp);
			return (error);
		} else {
			*vpp = fntovn(pdfnp);
			VN_HOLD(*vpp);
			return (0);
		}
	}

top:
	dfnp = vntofn(dvp);
	searchnm = nm;
	operation = 0;

	ASSERT(vn_matchops(dvp, auto_vnodeops));

	AUTOFS_DPRINT((3, "auto_lookup: dvp=%p dfnp=%p\n", (void *)dvp,
	    (void *)dfnp));

	/*
	 * If a lookup or mount of this node is in progress, wait for it
	 * to finish, and return whatever result it got.
	 */
	mutex_enter(&dfnp->fn_lock);
	if (dfnp->fn_flags & (MF_LOOKUP | MF_INPROG)) {
		mutex_exit(&dfnp->fn_lock);
		error = auto_wait4mount(dfnp);
		if (error == AUTOFS_SHUTDOWN)
			error = ENOENT;
		if (error == EAGAIN)
			goto top;
		if (error)
			return (error);
	} else
		mutex_exit(&dfnp->fn_lock);


	error = vn_vfsrlock_wait(dvp);
	if (error)
		return (error);
	vfsp = vn_mountedvfs(dvp);
	if (vfsp != NULL) {
		error = VFS_ROOT(vfsp, &newvp);
		vn_vfsunlock(dvp);
		if (!error) {
			error = VOP_LOOKUP(newvp, nm, vpp, pnp,
			    flags, rdir, cred, ct, direntflags, realpnp);
			VN_RELE(newvp);
		}
		return (error);
	}
	vn_vfsunlock(dvp);

	rw_enter(&dfnp->fn_rwlock, RW_READER);
	error = auto_search(dfnp, nm, &fnp, cred);
	if (error) {
		if (dfnip->fi_flags & MF_DIRECT) {
			/*
			 * direct map.
			 */
			if (dfnp->fn_dirents) {
				/*
				 * Mount previously triggered.
				 * 'nm' not found
				 */
				error = ENOENT;
			} else {
				/*
				 * I need to contact the daemon to trigger
				 * the mount. 'dfnp' will be the mountpoint.
				 */
				operation = AUTOFS_MOUNT;
				VN_HOLD(fntovn(dfnp));
				fnp = dfnp;
				error = 0;
			}
		} else if (dvp == dfnip->fi_rootvp) {
			/*
			 * 'dfnp' is the root of the indirect AUTOFS.
			 */
			if (rw_tryupgrade(&dfnp->fn_rwlock) == 0) {
				/*
				 * Could not acquire writer lock, release
				 * reader, and wait until available. We
				 * need to search for 'nm' again, since we
				 * had to release the lock before reacquiring
				 * it.
				 */
				rw_exit(&dfnp->fn_rwlock);
				rw_enter(&dfnp->fn_rwlock, RW_WRITER);
				error = auto_search(dfnp, nm, &fnp, cred);
			}

			ASSERT(RW_WRITE_HELD(&dfnp->fn_rwlock));
			if (error) {
				/*
				 * create node being looked-up and request
				 * mount on it.
				 */
				error = auto_enter(dfnp, nm, &fnp, kcred);
				if (!error)
					operation = AUTOFS_LOOKUP;
			}
		} else if ((dfnp->fn_dirents == NULL) &&
		    ((dvp->v_flag & VROOT) == 0) &&
		    ((fntovn(dfnp->fn_parent))->v_flag & VROOT)) {
			/*
			 * dfnp is the actual 'mountpoint' of indirect map,
			 * it is the equivalent of a direct mount,
			 * ie, /home/'user1'
			 */
			operation = AUTOFS_MOUNT;
			VN_HOLD(fntovn(dfnp));
			fnp = dfnp;
			error = 0;
			searchnm = dfnp->fn_name;
		}
	}

	if (error == EAGAIN) {
		rw_exit(&dfnp->fn_rwlock);
		goto top;
	}
	if (error) {
		rw_exit(&dfnp->fn_rwlock);
		return (error);
	}

	/*
	 * We now have the actual fnnode we're interested in.
	 * The 'MF_LOOKUP' indicates another thread is currently
	 * performing a daemon lookup of this node, therefore we
	 * wait for its completion.
	 * The 'MF_INPROG' indicates another thread is currently
	 * performing a daemon mount of this node, we wait for it
	 * to be done if we are performing a MOUNT. We don't
	 * wait for it if we are performing a LOOKUP.
	 * We can release the reader/writer lock as soon as we acquire
	 * the mutex, since the state of the lock can only change by
	 * first acquiring the mutex.
	 */
	mutex_enter(&fnp->fn_lock);
	rw_exit(&dfnp->fn_rwlock);
	if ((fnp->fn_flags & MF_LOOKUP) ||
	    ((operation == AUTOFS_MOUNT) && (fnp->fn_flags & MF_INPROG))) {
		mutex_exit(&fnp->fn_lock);
		error = auto_wait4mount(fnp);
		VN_RELE(fntovn(fnp));
		if (error == AUTOFS_SHUTDOWN)
			error = ENOENT;
		if (error && error != EAGAIN)
			return (error);
		goto top;
	}

	if (operation == 0) {
		/*
		 * got the fnnode, check for any errors
		 * on the previous operation on that node.
		 */
		error = fnp->fn_error;
		if ((error == EINTR) || (error == EAGAIN)) {
			/*
			 * previous operation on this node was
			 * not completed, do a lookup now.
			 */
			operation = AUTOFS_LOOKUP;
		} else {
			/*
			 * previous operation completed. Return
			 * a pointer to the node only if there was
			 * no error.
			 */
			mutex_exit(&fnp->fn_lock);
			if (!error)
				*vpp = fntovn(fnp);
			else
				VN_RELE(fntovn(fnp));
			return (error);
		}
	}

	/*
	 * Since I got to this point, it means I'm the one
	 * responsible for triggering the mount/look-up of this node.
	 */
	switch (operation) {
	case AUTOFS_LOOKUP:
		AUTOFS_BLOCK_OTHERS(fnp, MF_LOOKUP);
		fnp->fn_error = 0;
		mutex_exit(&fnp->fn_lock);
		error = auto_lookup_aux(fnp, searchnm, cred);
		if (!error) {
			/*
			 * Return this vnode
			 */
			*vpp = fntovn(fnp);
		} else {
			/*
			 * release our reference to this vnode
			 * and return error
			 */
			VN_RELE(fntovn(fnp));
		}
		break;
	case AUTOFS_MOUNT:
		AUTOFS_BLOCK_OTHERS(fnp, MF_INPROG);
		fnp->fn_error = 0;
		mutex_exit(&fnp->fn_lock);
		/*
		 * auto_new_mount_thread fires up a new thread which
		 * calls automountd finishing up the work
		 */
		auto_new_mount_thread(fnp, searchnm, cred);

		/*
		 * At this point, we are simply another thread
		 * waiting for the mount to complete
		 */
		error = auto_wait4mount(fnp);
		if (error == AUTOFS_SHUTDOWN)
			error = ENOENT;

		/*
		 * now release our reference to this vnode
		 */
		VN_RELE(fntovn(fnp));
		if (!error)
			goto top;
		break;
	default:
		auto_log(dfnp->fn_globals->fng_verbose,
		    dfnp->fn_globals->fng_zoneid, CE_WARN,
		    "auto_lookup: unknown operation %d",
		    operation);
	}

	AUTOFS_DPRINT((5, "auto_lookup: name=%s *vpp=%p return=%d\n",
	    nm, (void *)*vpp, error));

	return (error);
}