Exemplo n.º 1
0
static void
set_rootvnode(void)
{
	struct proc *p;

	if (VFS_ROOT(TAILQ_FIRST(&mountlist), LK_EXCLUSIVE, &rootvnode))
		panic("Cannot find root vnode");

	VOP_UNLOCK(rootvnode, 0);

	p = curthread->td_proc;
	FILEDESC_XLOCK(p->p_fd);

	if (p->p_fd->fd_cdir != NULL)
		vrele(p->p_fd->fd_cdir);
	p->p_fd->fd_cdir = rootvnode;
	VREF(rootvnode);

	if (p->p_fd->fd_rdir != NULL)
		vrele(p->p_fd->fd_rdir);
	p->p_fd->fd_rdir = rootvnode;
	VREF(rootvnode);

	FILEDESC_XUNLOCK(p->p_fd);
}
Exemplo n.º 2
0
static __inline
struct vnode *
union_lock_upper(struct union_node *un, struct thread *td)
{
	struct vnode *uppervp;

	if ((uppervp = un->un_uppervp) != NULL) {
		VREF(uppervp);
		vn_lock(uppervp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY, td);
	}
	KASSERT((uppervp == NULL || vrefcnt(uppervp) > 0), ("uppervp usecount is 0"));
	return(uppervp);
}
Exemplo n.º 3
0
/*
 * lookup.  this is incredibly complicated in the
 * general case, however for most pseudo-filesystems
 * very little needs to be done.
 *
 * Locking isn't hard here, just poorly documented.
 *
 * If we're looking up ".", just vref the parent & return it.
 *
 * If we're looking up "..", unlock the parent, and lock "..". If everything
 * went ok, try to re-lock the parent. We do this to prevent lock races.
 *
 * For anything else, get the needed node.
 *
 * We try to exit with the parent locked in error cases.
 */
int
ptyfs_lookup(void *v)
{
	struct vop_lookup_args /* {
		struct vnode * a_dvp;
		struct vnode ** a_vpp;
		struct componentname * a_cnp;
	} */ *ap = v;
	struct componentname *cnp = ap->a_cnp;
	struct vnode **vpp = ap->a_vpp;
	struct vnode *dvp = ap->a_dvp;
	const char *pname = cnp->cn_nameptr;
	struct ptyfsnode *ptyfs;
	int pty, error;

	*vpp = NULL;

	if (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)
		return EROFS;

	if (cnp->cn_namelen == 1 && *pname == '.') {
		*vpp = dvp;
		VREF(dvp);
		return 0;
	}

	ptyfs = VTOPTYFS(dvp);
	switch (ptyfs->ptyfs_type) {
	case PTYFSroot:
		/*
		 * Shouldn't get here with .. in the root node.
		 */
		if (cnp->cn_flags & ISDOTDOT)
			return EIO;

		pty = atoi(pname, cnp->cn_namelen);

		if (pty < 0 || pty >= npty || pty_isfree(pty, 1))
			break;

		error = ptyfs_allocvp(dvp->v_mount, vpp, PTYFSpts, pty,
		    curlwp);
		return error;

	default:
		return ENOTDIR;
	}

	return cnp->cn_nameiop == LOOKUP ? ENOENT : EROFS;
}
Exemplo n.º 4
0
static __inline
struct vnode *
union_lock_other(struct union_node *un, struct thread *td)
{
	struct vnode *vp;

	if (un->un_uppervp != NULL) {
		vp = union_lock_upper(un, td);
	} else if ((vp = un->un_lowervp) != NULL) {
		VREF(vp);
		vn_lock(vp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY, td);
	}
	return(vp);
}
Exemplo n.º 5
0
VMBlockVFSRoot(struct mount *mp,        // IN: vmblock file system
               int flags,               // IN: lockmgr(9) flags
               struct vnode **vpp,      // OUT: root vnode
               struct thread *td)       // IN: caller's thread context
#endif
{
   struct vnode *vp;

   /*
    * Return locked reference to root.
    */
   vp = MNTTOVMBLOCKMNT(mp)->rootVnode;
   VREF(vp);
   compat_vn_lock(vp, flags | LK_RETRY, compat_td);
   *vpp = vp;
   return 0;
}
Exemplo n.º 6
0
/*
 * Change the trace vnode in a correct way (to avoid races).
 */
void
ktrsettracevnode(struct proc *p, struct vnode *newvp)
{
	struct vnode *vp;

	if (p->p_tracep == newvp)	/* avoid work */
		return;

	if (newvp != NULL)
		VREF(newvp);

	vp = p->p_tracep;
	p->p_tracep = newvp;

	if (vp != NULL)
		vrele(vp);
}
Exemplo n.º 7
0
/*
 * Increasing refcount of lower vnode is needed at least for the case
 * when lower FS is NFS to do sillyrename if the file is in use.
 * Unfortunately v_usecount is incremented in many places in
 * the kernel and, as such, there may be races that result in
 * the NFS client doing an extraneous silly rename, but that seems
 * preferable to not doing a silly rename when it is needed.
 */
static int
null_remove(struct vop_remove_args *ap)
{
	int retval, vreleit;
	struct vnode *lvp;

	if (vrefcnt(ap->a_vp) > 1) {
		lvp = NULLVPTOLOWERVP(ap->a_vp);
		VREF(lvp);
		vreleit = 1;
	} else
		vreleit = 0;
	retval = null_bypass(&ap->a_gen);
	if (vreleit != 0)
		vrele(lvp);
	return (retval);
}
static int vboxvfs_root(struct mount *mp, int flags, struct vnode **vpp, struct thread *td)
{
    int rc = 0;
    struct sf_glob_info *pShFlGlobalInfo = VFSMP2SFGLOBINFO(mp);
    struct vnode *vp;

    printf("%s: Enter\n", __FUNCTION__);

    vp = pShFlGlobalInfo->vnode_root;
    VREF(vp);

    vn_lock(vp, flags | LK_RETRY, td);
    *vpp = vp;

    printf("%s: Leave\n", __FUNCTION__);

    return rc;
}
Exemplo n.º 9
0
/*
 * Increasing refcount of lower vnode is needed at least for the case
 * when lower FS is NFS to do sillyrename if the file is in use.
 * Unfortunately v_usecount is incremented in many places in
 * the kernel and, as such, there may be races that result in
 * the NFS client doing an extraneous silly rename, but that seems
 * preferable to not doing a silly rename when it is needed.
 */
static int
crypto_remove(struct vop_remove_args *ap)
{
	int retval, vreleit;
	struct vnode *lvp, *vp;

	vp = ap->a_vp;
	if (vrefcnt(vp) > 1) {
		lvp = NULLVPTOLOWERVP(vp);
		VREF(lvp);
		vreleit = 1;
	} else
		vreleit = 0;
	VTONULL(vp)->crypto_flags |= NULLV_DROP;
	retval = crypto_bypass(&ap->a_gen);
	if (vreleit != 0)
		vrele(lvp);
	return (retval);
}
Exemplo n.º 10
0
static int
xfs_dnlc_lock(struct vnode *dvp,
	      xfs_componentname *cnp,
	      struct vnode **res)
{
    int error = 0;

    /*
     * Try to handle the (complex) BSD locking protocol.
     */

    if (*res == dvp) {		/* "." */
	VREF(dvp);
    } else if (cnp->cn_flags & ISDOTDOT) { /* ".." */
	u_long vpid = dvp->v_id;

#ifdef HAVE_FREEBSD_THREAD
	xfs_vfs_unlock(dvp, xfs_cnp_to_thread(cnp));
	error = xfs_do_vget(*res, LK_EXCLUSIVE, xfs_cnp_to_thread(cnp));
	xfs_vfs_writelock(dvp, xfs_cnp_to_thread(cnp));
#else
	xfs_vfs_unlock(dvp, xfs_cnp_to_proc(cnp));
	error = xfs_do_vget(*res, LK_EXCLUSIVE, xfs_cnp_to_proc(cnp));
	xfs_vfs_writelock(dvp, xfs_cnp_to_proc(cnp));
#endif

	if (error == 0 && dvp->v_id != vpid) {
	    vput(*res);
	    return 0;
	}
    } else {
#ifdef HAVE_FREEBSD_THREAD
	error = xfs_do_vget(*res, LK_EXCLUSIVE, xfs_cnp_to_thread(cnp));
#else
	error = xfs_do_vget(*res, LK_EXCLUSIVE, xfs_cnp_to_proc(cnp));
#endif
    }

    if (error == 0)
	return -1;
    else
	return 0;
}
Exemplo n.º 11
0
/*
 * File table vnode ioctl routine.
 */
int
vn_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
{
	struct vnode *vp = ((struct vnode *)fp->f_data);
	struct vattr vattr;
	int error;

	switch (vp->v_type) {

	case VREG:
	case VDIR:
		if (com == FIONREAD) {
			error = VOP_GETATTR(vp, &vattr, p->p_ucred, p);
			if (error)
				return (error);
			*(int *)data = vattr.va_size - fp->f_offset;
			return (0);
		}
		if (com == FIBMAP)
			return VOP_IOCTL(vp, com, data, fp->f_flag,
					 p->p_ucred, p);
		if (com == FIONBIO || com == FIOASYNC)  /* XXX */
			return (0);			/* XXX */
		/* FALLTHROUGH */
	default:
		return (ENOTTY);
		
	case VFIFO:
	case VCHR:
	case VBLK:
		error = VOP_IOCTL(vp, com, data, fp->f_flag, p->p_ucred, p);
		if (error == 0 && com == TIOCSCTTY) {
			if (p->p_session->s_ttyvp)
				vrele(p->p_session->s_ttyvp);
			p->p_session->s_ttyvp = vp;
			VREF(vp);
		}
		return (error);
	}
}
Exemplo n.º 12
0
/*
 * We have to carry on the locking protocol on the null layer vnodes
 * as we progress through the tree. We also have to enforce read-only
 * if this layer is mounted read-only.
 */
static int
null_lookup(struct vop_lookup_args *ap)
{
	struct componentname *cnp = ap->a_cnp;
	struct vnode *dvp = ap->a_dvp;
	int flags = cnp->cn_flags;
	struct vnode *vp, *ldvp, *lvp;
	int error;

	if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
		return (EROFS);
	/*
	 * Although it is possible to call null_bypass(), we'll do
	 * a direct call to reduce overhead
	 */
	ldvp = NULLVPTOLOWERVP(dvp);
	vp = lvp = NULL;
	error = VOP_LOOKUP(ldvp, &lvp, cnp);
	if (error == EJUSTRETURN && (flags & ISLASTCN) &&
	    (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
	    (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
		error = EROFS;

	if ((error == 0 || error == EJUSTRETURN) && lvp != NULL) {
		if (ldvp == lvp) {
			*ap->a_vpp = dvp;
			VREF(dvp);
			vrele(lvp);
		} else {
			error = null_nodeget(dvp->v_mount, lvp, &vp);
			if (error)
				vput(lvp);
			else
				*ap->a_vpp = vp;
		}
	}
	return (error);
}
Exemplo n.º 13
0
int
reiserfs_lookup(struct vop_cachedlookup_args *ap)
{
	int error, retval;
	struct vnode *vdp         = ap->a_dvp;
	struct vnode **vpp        = ap->a_vpp;
	struct componentname *cnp = ap->a_cnp;

	int flags         = cnp->cn_flags;
	struct thread *td = cnp->cn_thread;
	struct cpu_key *saved_ino;

	struct vnode *vp;
	struct vnode *pdp;  /* Saved dp during symlink work */
	struct reiserfs_node *dp;
	struct reiserfs_dir_entry de;
	INITIALIZE_PATH(path_to_entry);

	char c = cnp->cn_nameptr[cnp->cn_namelen];
	cnp->cn_nameptr[cnp->cn_namelen] = '\0';
	reiserfs_log(LOG_DEBUG, "looking for `%s', %ld (%s)\n",
	    cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_pnbuf);
	cnp->cn_nameptr[cnp->cn_namelen] = c;

	vp = NULL;
	dp = VTOI(vdp);

	if (REISERFS_MAX_NAME(dp->i_reiserfs->s_blocksize) < cnp->cn_namelen)
		return (ENAMETOOLONG);

	reiserfs_log(LOG_DEBUG, "searching entry\n");
	de.de_gen_number_bit_string = 0;
	retval = reiserfs_find_entry(dp, cnp->cn_nameptr, cnp->cn_namelen,
	    &path_to_entry, &de);
	pathrelse(&path_to_entry);

	if (retval == NAME_FOUND) {
		reiserfs_log(LOG_DEBUG, "found\n");
	} else {
		reiserfs_log(LOG_DEBUG, "not found\n");
	}

	if (retval == NAME_FOUND) {
#if 0
		/* Hide the .reiserfs_priv directory */
		if (reiserfs_xattrs(dp->i_reiserfs) &&
		    !old_format_only(dp->i_reiserfs) &&
		    REISERFS_SB(dp->i_reiserfs)->priv_root &&
		    REISERFS_SB(dp->i_reiserfs)->priv_root->d_inode &&
		    de.de_objectid == le32toh(INODE_PKEY(REISERFS_SB(
		    dp->i_reiserfs)->priv_root->d_inode)->k_objectid)) {
			return (EACCES);
		}
#endif

		reiserfs_log(LOG_DEBUG, "reading vnode\n");
		pdp = vdp;
		if (flags & ISDOTDOT) {
			saved_ino = (struct cpu_key *)&(de.de_dir_id);
			VOP_UNLOCK(pdp, 0);
			error = reiserfs_iget(vdp->v_mount,
			    saved_ino, &vp, td);
			vn_lock(pdp, LK_EXCLUSIVE | LK_RETRY);
			if (error != 0)
				return (error);
			*vpp = vp;
		} else if (de.de_objectid == dp->i_number &&
		    de.de_dir_id == dp->i_ino) {
			VREF(vdp); /* We want ourself, ie "." */
			*vpp = vdp;
		} else {
			if ((error = reiserfs_iget(vdp->v_mount,
			    (struct cpu_key *)&(de.de_dir_id), &vp, td)) != 0)
				return (error);
			*vpp = vp;
		}

		/*
		 * Propogate the priv_object flag so we know we're in the
		 * priv tree
		 */
		/*if (is_reiserfs_priv_object(dir))
			REISERFS_I(inode)->i_flags |= i_priv_object;*/
	} else {
		if (retval == IO_ERROR) {
			reiserfs_log(LOG_DEBUG, "IO error\n");
			return (EIO);
		}

		return (ENOENT);
	}

	/* Insert name into cache if appropriate. */
	if (cnp->cn_flags & MAKEENTRY)
		cache_enter(vdp, *vpp, cnp);

	reiserfs_log(LOG_DEBUG, "done\n");
	return (0);
}
Exemplo n.º 14
0
/* Note that we don't set CDirty here, this is OK because the unlink
 * RPC is called synchronously */
int
afs_remove(OSI_VC_DECL(adp), char *aname, afs_ucred_t *acred)
{
    struct vrequest treq;
    register struct dcache *tdc;
    struct VenusFid unlinkFid;
    register afs_int32 code;
    register struct vcache *tvc;
    afs_size_t offset, len;
    struct afs_fakestat_state fakestate;
    OSI_VC_CONVERT(adp);

    AFS_STATCNT(afs_remove);
    afs_Trace2(afs_iclSetp, CM_TRACE_REMOVE, ICL_TYPE_POINTER, adp,
	       ICL_TYPE_STRING, aname);


    if ((code = afs_InitReq(&treq, acred))) {
	return code;
    }

    afs_InitFakeStat(&fakestate);
    AFS_DISCON_LOCK();
    code = afs_EvalFakeStat(&adp, &fakestate, &treq);
    if (code)
	goto done;

    /* Check if this is dynroot */
    if (afs_IsDynroot(adp)) {
	code = afs_DynrootVOPRemove(adp, acred, aname);
	goto done;
    }
    if (afs_IsDynrootMount(adp)) {
	code = ENOENT;
	goto done;
    }

    if (strlen(aname) > AFSNAMEMAX) {
	code = ENAMETOOLONG;
	goto done;
    }
  tagain:
    code = afs_VerifyVCache(adp, &treq);
    tvc = NULL;
    if (code) {
	code = afs_CheckCode(code, &treq, 23);
	goto done;
    }

    /** If the volume is read-only, return error without making an RPC to the
      * fileserver
      */
    if (adp->f.states & CRO) {
	code = EROFS;
	goto done;
    }

    /* If we're running disconnected without logging, go no further... */
    if (AFS_IS_DISCONNECTED && !AFS_IS_DISCON_RW) {
        code = ENETDOWN;
	goto done;
    }
    
    tdc = afs_GetDCache(adp, (afs_size_t) 0, &treq, &offset, &len, 1);	/* test for error below */
    ObtainWriteLock(&adp->lock, 142);
    if (tdc)
	ObtainSharedLock(&tdc->lock, 638);

    /*
     * Make sure that the data in the cache is current. We may have
     * received a callback while we were waiting for the write lock.
     */
    if (!(adp->f.states & CStatd)
	|| (tdc && !hsame(adp->f.m.DataVersion, tdc->f.versionNo))) {
	ReleaseWriteLock(&adp->lock);
	if (tdc) {
	    ReleaseSharedLock(&tdc->lock);
	    afs_PutDCache(tdc);
	}
	goto tagain;
    }

    unlinkFid.Fid.Vnode = 0;
    if (!tvc) {
	tvc = osi_dnlc_lookup(adp, aname, WRITE_LOCK);
    }
    /* This should not be necessary since afs_lookup() has already
     * done the work.
     */
    if (!tvc)
	if (tdc) {
	    code = afs_dir_Lookup(tdc, aname, &unlinkFid.Fid);
	    if (code == 0) {
		afs_int32 cached = 0;

		unlinkFid.Cell = adp->f.fid.Cell;
		unlinkFid.Fid.Volume = adp->f.fid.Fid.Volume;
		if (unlinkFid.Fid.Unique == 0) {
		    tvc =
			afs_LookupVCache(&unlinkFid, &treq, &cached, adp,
					 aname);
		} else {
		    ObtainReadLock(&afs_xvcache);
		    tvc = afs_FindVCache(&unlinkFid, 0, DO_STATS);
		    ReleaseReadLock(&afs_xvcache);
		}
	    }
	}

    if (AFS_IS_DISCON_RW) {
	if (!adp->f.shadow.vnode && !(adp->f.ddirty_flags & VDisconCreate)) {
    	    /* Make shadow copy of parent dir. */
	    afs_MakeShadowDir(adp, tdc);
	}

	/* Can't hold a dcache lock whilst we're getting a vcache one */
	if (tdc)
	    ReleaseSharedLock(&tdc->lock);

        /* XXX - We're holding adp->lock still, and we've got no 
	 * guarantee about whether the ordering matches the lock hierarchy */
	ObtainWriteLock(&tvc->lock, 713);

	/* If we were locally created, then we don't need to do very
	 * much beyond ensuring that we don't exist anymore */	
    	if (tvc->f.ddirty_flags & VDisconCreate) {
	    afs_DisconRemoveDirty(tvc);
	} else {
	    /* Add removed file vcache to dirty list. */
	    afs_DisconAddDirty(tvc, VDisconRemove, 1);
        }
	adp->f.m.LinkCount--;
	ReleaseWriteLock(&tvc->lock);
	if (tdc)
	    ObtainSharedLock(&tdc->lock, 714);
     }

    if (tvc && osi_Active(tvc)) {
	/* about to delete whole file, prefetch it first */
	ReleaseWriteLock(&adp->lock);
	if (tdc)
	    ReleaseSharedLock(&tdc->lock);
	ObtainWriteLock(&tvc->lock, 143);
	FetchWholeEnchilada(tvc, &treq);
	ReleaseWriteLock(&tvc->lock);
	ObtainWriteLock(&adp->lock, 144);
	/* Technically I don't think we need this back, but let's hold it 
	   anyway; The "got" reference should actually be sufficient. */
	if (tdc) 
	    ObtainSharedLock(&tdc->lock, 640);
    }

    osi_dnlc_remove(adp, aname, tvc);

    Tadp1 = adp;
#ifndef AFS_DARWIN80_ENV
    Tadpr = VREFCOUNT(adp);
#endif
    Ttvc = tvc;
    Tnam = aname;
    Tnam1 = 0;
#ifndef AFS_DARWIN80_ENV
    if (tvc)
	Ttvcr = VREFCOUNT(tvc);
#endif
#ifdef	AFS_AIX_ENV
    if (tvc && VREFCOUNT_GT(tvc, 2) && tvc->opens > 0
	&& !(tvc->f.states & CUnlinked)) {
#else
    if (tvc && VREFCOUNT_GT(tvc, 1) && tvc->opens > 0
	&& !(tvc->f.states & CUnlinked)) {
#endif
	char *unlname = afs_newname();

	ReleaseWriteLock(&adp->lock);
	if (tdc)
	    ReleaseSharedLock(&tdc->lock);
	code = afsrename(adp, aname, adp, unlname, acred, &treq);
	Tnam1 = unlname;
	if (!code) {
	    struct VenusFid *oldmvid = NULL;
	    if (tvc->mvid) 
		oldmvid = tvc->mvid;
	    tvc->mvid = (struct VenusFid *)unlname;
	    if (oldmvid)
		osi_FreeSmallSpace(oldmvid);
	    crhold(acred);
	    if (tvc->uncred) {
		crfree(tvc->uncred);
	    }
	    tvc->uncred = acred;
	    tvc->f.states |= CUnlinked;
	    /* if rename succeeded, remove should not */
	    ObtainWriteLock(&tvc->lock, 715);
	    if (tvc->f.ddirty_flags & VDisconRemove) {
		tvc->f.ddirty_flags &= ~VDisconRemove;
	    }
	    ReleaseWriteLock(&tvc->lock);
	} else {
	    osi_FreeSmallSpace(unlname);
	}
	if (tdc)
	    afs_PutDCache(tdc);
	afs_PutVCache(tvc);
    } else {
	code = afsremove(adp, tdc, tvc, aname, acred, &treq);
    }
    done:
    afs_PutFakeStat(&fakestate);
#ifndef AFS_DARWIN80_ENV
    /* we can't track by thread, it's not exported in the KPI; only do
       this on !macos */
    osi_Assert(!WriteLocked(&adp->lock) || (adp->lock.pid_writer != MyPidxx));
#endif
    AFS_DISCON_UNLOCK();
    return code;
}


/* afs_remunlink -- This tries to delete the file at the server after it has
 *     been renamed when unlinked locally but now has been finally released.
 *
 * CAUTION -- may be called with avc unheld. */

int
afs_remunlink(register struct vcache *avc, register int doit)
{
    afs_ucred_t *cred;
    char *unlname;
    struct vcache *adp;
    struct vrequest treq;
    struct VenusFid dirFid;
    register struct dcache *tdc;
    afs_int32 code = 0;

    if (NBObtainWriteLock(&avc->lock, 423))
	return 0;
#if defined(AFS_DARWIN80_ENV)
    if (vnode_get(AFSTOV(avc))) {
	ReleaseWriteLock(&avc->lock);
	return 0;
    }
#endif

    if (avc->mvid && (doit || (avc->f.states & CUnlinkedDel))) {
	if ((code = afs_InitReq(&treq, avc->uncred))) {
	    ReleaseWriteLock(&avc->lock);
	} else {
	    /* Must bump the refCount because GetVCache may block.
	     * Also clear mvid so no other thread comes here if we block.
	     */
	    unlname = (char *)avc->mvid;
	    avc->mvid = NULL;
	    cred = avc->uncred;
	    avc->uncred = NULL;

#if defined(AFS_DARWIN_ENV) && !defined(AFS_DARWIN80_ENV)
	    VREF(AFSTOV(avc));
#else
	    AFS_FAST_HOLD(avc);
#endif

	    /* We'll only try this once. If it fails, just release the vnode.
	     * Clear after doing hold so that NewVCache doesn't find us yet.
	     */
	    avc->f.states &= ~(CUnlinked | CUnlinkedDel);

	    ReleaseWriteLock(&avc->lock);

	    dirFid.Cell = avc->f.fid.Cell;
	    dirFid.Fid.Volume = avc->f.fid.Fid.Volume;
	    dirFid.Fid.Vnode = avc->f.parent.vnode;
	    dirFid.Fid.Unique = avc->f.parent.unique;
	    adp = afs_GetVCache(&dirFid, &treq, NULL, NULL);

	    if (adp) {
		tdc = afs_FindDCache(adp, (afs_size_t) 0);
		ObtainWriteLock(&adp->lock, 159);
		if (tdc)
		    ObtainSharedLock(&tdc->lock, 639);

		/* afsremove releases the adp & tdc locks, and does vn_rele(avc) */
		code = afsremove(adp, tdc, avc, unlname, cred, &treq);
		afs_PutVCache(adp);
	    } else {
		/* we failed - and won't be back to try again. */
		afs_PutVCache(avc);
	    }
	    osi_FreeSmallSpace(unlname);
	    crfree(cred);
	}
    } else {
#if defined(AFS_DARWIN80_ENV)
	vnode_put(AFSTOV(avc));
#endif
	ReleaseWriteLock(&avc->lock);
    }

    return code;
}
Exemplo n.º 15
0
/*
 * Look up a FFS dinode number to find its incore vnode, otherwise read it
 * in from disk.  If it is in core, wait for the lock bit to clear, then
 * return the inode locked.  Detection and handling of mount points must be
 * done by the calling routine.
 */
int
ffs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
{
	struct fs *fs;
	struct inode *ip;
	struct ufs1_dinode *dp1;
#ifdef FFS2
	struct ufs2_dinode *dp2;
#endif
	struct ufsmount *ump;
	struct buf *bp;
	struct vnode *vp;
	dev_t dev;
	int error;

	ump = VFSTOUFS(mp);
	dev = ump->um_dev;
retry:
	if ((*vpp = ufs_ihashget(dev, ino)) != NULL)
		return (0);

	/* Allocate a new vnode/inode. */
	if ((error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) != 0) {
		*vpp = NULL;
		return (error);
	}
#ifdef VFSDEBUG
	vp->v_flag |= VLOCKSWORK;
#endif
	/* XXX - we use the same pool for ffs and mfs */
	ip = pool_get(&ffs_ino_pool, PR_WAITOK);
	bzero((caddr_t)ip, sizeof(struct inode));
	lockinit(&ip->i_lock, PINOD, "inode", 0, 0);
	ip->i_ump = ump;
	VREF(ip->i_devvp);
	vp->v_data = ip;
	ip->i_vnode = vp;
	ip->i_fs = fs = ump->um_fs;
	ip->i_dev = dev;
	ip->i_number = ino;
	ip->i_vtbl = &ffs_vtbl;

	/*
	 * Put it onto its hash chain and lock it so that other requests for
	 * this inode will block if they arrive while we are sleeping waiting
	 * for old data structures to be purged or for the contents of the
	 * disk portion of this inode to be read.
	 */
	error = ufs_ihashins(ip);
	
	if (error) {
		/*
		 * VOP_INACTIVE will treat this as a stale file
		 * and recycle it quickly
		 */
		vrele(vp);

		if (error == EEXIST)
			goto retry;

		return (error);
	}


	/* Read in the disk contents for the inode, copy into the inode. */
	error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
		      (int)fs->fs_bsize, NOCRED, &bp);
	if (error) {
		/*
		 * The inode does not contain anything useful, so it would
		 * be misleading to leave it on its hash chain. With mode
		 * still zero, it will be unlinked and returned to the free
		 * list by vput().
		 */
		vput(vp);
		brelse(bp);
		*vpp = NULL;
		return (error);
	}

#ifdef FFS2
	if (ip->i_ump->um_fstype == UM_UFS2) {
		ip->i_din2 = pool_get(&ffs_dinode2_pool, PR_WAITOK);
		dp2 = (struct ufs2_dinode *) bp->b_data + ino_to_fsbo(fs, ino);
		*ip->i_din2 = *dp2;
	} else
#endif
	{
		ip->i_din1 = pool_get(&ffs_dinode1_pool, PR_WAITOK);
		dp1 = (struct ufs1_dinode *) bp->b_data + ino_to_fsbo(fs, ino);
		*ip->i_din1 = *dp1;
	}

	brelse(bp);

	if (DOINGSOFTDEP(vp))
		softdep_load_inodeblock(ip);
	else
		ip->i_effnlink = DIP(ip, nlink);

	/*
	 * Initialize the vnode from the inode, check for aliases.
	 * Note that the underlying vnode may have changed.
	 */
	error = ufs_vinit(mp, ffs_specop_p, FFS_FIFOOPS, &vp);
	if (error) {
		vput(vp);
		*vpp = NULL;
		return (error);
	}

	/*
	 * Set up a generation number for this inode if it does not
	 * already have one. This should only happen on old filesystems.
	 */
	if (DIP(ip, gen) == 0) {
		DIP_ASSIGN(ip, gen, arc4random() & INT_MAX);
		if (DIP(ip, gen) == 0 || DIP(ip, gen) == -1)
			DIP_ASSIGN(ip, gen, 1);	/* Shouldn't happen */
		if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0)
			ip->i_flag |= IN_MODIFIED;
	}

	/*
	 * Ensure that uid and gid are correct. This is a temporary
	 * fix until fsck has been changed to do the update.
	 */
	if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_inodefmt < FS_44INODEFMT) {
		ip->i_ffs1_uid = ip->i_din1->di_ouid;
		ip->i_ffs1_gid = ip->i_din1->di_ogid;
	}

	*vpp = vp;

	return (0);
}
Exemplo n.º 16
0
/*
 * Look up a file or directory
 */
static int
pfs_lookup(struct vop_cachedlookup_args *va)
{
	struct vnode *vn = va->a_dvp;
	struct vnode **vpp = va->a_vpp;
	struct componentname *cnp = va->a_cnp;
	struct pfs_vdata *pvd = vn->v_data;
	struct pfs_node *pd = pvd->pvd_pn;
	struct pfs_node *pn, *pdn = NULL;
	pid_t pid = pvd->pvd_pid;
	char *pname;
	int error, i, namelen, visible;

	PFS_TRACE(("%.*s", (int)cnp->cn_namelen, cnp->cn_nameptr));
	pfs_assert_not_owned(pd);

	if (vn->v_type != VDIR)
		PFS_RETURN (ENOTDIR);
	KASSERT_PN_IS_DIR(pd);

	error = VOP_ACCESS(vn, VEXEC, cnp->cn_cred, cnp->cn_thread);
	if (error)
		PFS_RETURN (error);

	/*
	 * Don't support DELETE or RENAME.  CREATE is supported so
	 * that O_CREAT will work, but the lookup will still fail if
	 * the file does not exist.
	 */
	if ((cnp->cn_flags & ISLASTCN) &&
	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
		PFS_RETURN (EOPNOTSUPP);

	/* shortcut: check if the name is too long */
	if (cnp->cn_namelen >= PFS_NAMELEN)
		PFS_RETURN (ENOENT);

	/* check that parent directory is visible... */
	if (!pfs_visible(curthread, pd, pvd->pvd_pid, NULL))
		PFS_RETURN (ENOENT);

	/* self */
	namelen = cnp->cn_namelen;
	pname = cnp->cn_nameptr;
	if (namelen == 1 && pname[0] == '.') {
		pn = pd;
		*vpp = vn;
		VREF(vn);
		PFS_RETURN (0);
	}

	/* parent */
	if (cnp->cn_flags & ISDOTDOT) {
		if (pd->pn_type == pfstype_root)
			PFS_RETURN (EIO);
		VOP_UNLOCK(vn, 0, cnp->cn_thread);
		KASSERT(pd->pn_parent != NULL,
		    ("%s(): non-root directory has no parent", __func__));
		/*
		 * This one is tricky.  Descendents of procdir nodes
		 * inherit their parent's process affinity, but
		 * there's no easy reverse mapping.  For simplicity,
		 * we assume that if this node is a procdir, its
		 * parent isn't (which is correct as long as
		 * descendents of procdir nodes are never procdir
		 * nodes themselves)
		 */
		if (pd->pn_type == pfstype_procdir)
			pid = NO_PID;
		pfs_lock(pd);
		pn = pd->pn_parent;
		pfs_unlock(pd);
		goto got_pnode;
	}

	pfs_lock(pd);

	/* named node */
	for (pn = pd->pn_nodes; pn != NULL; pn = pn->pn_next)
		if (pn->pn_type == pfstype_procdir)
			pdn = pn;
		else if (pn->pn_name[namelen] == '\0' &&
		    bcmp(pname, pn->pn_name, namelen) == 0) {
			pfs_unlock(pd);
			goto got_pnode;
		}

	/* process dependent node */
	if ((pn = pdn) != NULL) {
		pid = 0;
		for (pid = 0, i = 0; i < namelen && isdigit(pname[i]); ++i)
			if ((pid = pid * 10 + pname[i] - '0') > PID_MAX)
				break;
		if (i == cnp->cn_namelen) {
			pfs_unlock(pd);
			goto got_pnode;
		}
	}

	pfs_unlock(pd);

	PFS_RETURN (ENOENT);

 got_pnode:
	pfs_assert_not_owned(pd);
	pfs_assert_not_owned(pn);
	visible = pfs_visible(curthread, pn, pid, NULL);
	if (!visible) {
		error = ENOENT;
		goto failed;
	}

	error = pfs_vncache_alloc(vn->v_mount, vpp, pn, pid);
	if (error)
		goto failed;

	if (cnp->cn_flags & ISDOTDOT)
		vn_lock(vn, LK_EXCLUSIVE|LK_RETRY, cnp->cn_thread);
	if (cnp->cn_flags & MAKEENTRY)
		cache_enter(vn, *vpp, cnp);
	PFS_RETURN (0);
 failed:
	if (cnp->cn_flags & ISDOTDOT)
		vn_lock(vn, LK_EXCLUSIVE|LK_RETRY, cnp->cn_thread);
	PFS_RETURN(error);
}
Exemplo n.º 17
0
static int
common_fhtovp(struct mount * mp,
	   struct fid * fhp,
	   struct vnode ** vpp)
{
#ifdef ARLA_KNFS
    struct netcred *np = NULL;
    struct xfs_node *xn;
    struct vnode *vp;
    xfs_handle handle;
    int error;

    NNPFSDEB(XDEBVFOPS, ("xfs_fhtovp\n"));

    if (fhp->fid_len != 16) {
	printf("xfs_fhtovp: *PANIC* got a invalid length of a fid\n");
	return EINVAL;
    }

    memcpy(&handle, fhp->fid_data, sizeof(handle));
    NNPFSDEB(XDEBVFOPS, ("xfs_fhtovp: fid: %d.%d.%d.%d\n", 
		       handle.a, handle.d, handle.c, handle.d));

    NNPFSDEB(XDEBVFOPS, ("xfs_fhtovp: xfs_vnode_find\n"));
    xn = xfs_node_find(&xfs[0].nodehead, &handle);

    if (xn == NULL) {
	struct xfs_message_getattr msg;

        error = xfs_getnewvnode(xfs[0].mp, &vp, &handle);
        if (error)
            return error;
	
	xfs_do_vget(vp, 0, curproc);

    } else {
	/* XXX access ? */
	vp = XNODE_TO_VNODE(xn);

	/* XXX wrong ? (we tell arla below) */
        if (vp->v_usecount <= 0) 
	    xfs_do_vget(vp, 0, curproc);
	else
	    VREF(vp);
	error = 0;
    }

    *vpp = vp;

    if (error == 0) {
	NNPFSDEB(XDEBVFOPS, ("xfs_fhtovp done\n"));

	/* 
	 * XXX tell arla about this node is hold by nfsd.
	 * There need to be code in xfs_write too.
	 */
    } else
	NNPFSDEB(XDEBVFOPS, ("xfs_fhtovp failed (%d)\n", error));

    return error;
#else /* !ARLA_KNFS */
    return EOPNOTSUPP;
#endif /* !ARLA_KNFS */
}
Exemplo n.º 18
0
static int
tmpfs_lookup(struct vop_cachedlookup_args *v)
{
	struct vnode *dvp = v->a_dvp;
	struct vnode **vpp = v->a_vpp;
	struct componentname *cnp = v->a_cnp;

	int error;
	struct tmpfs_dirent *de;
	struct tmpfs_node *dnode;

	dnode = VP_TO_TMPFS_DIR(dvp);
	*vpp = NULLVP;

	/* Check accessibility of requested node as a first step. */
	error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, cnp->cn_thread);
	if (error != 0)
		goto out;

	/* We cannot be requesting the parent directory of the root node. */
	MPASS(IMPLIES(dnode->tn_type == VDIR &&
	    dnode->tn_dir.tn_parent == dnode,
	    !(cnp->cn_flags & ISDOTDOT)));

	TMPFS_ASSERT_LOCKED(dnode);
	if (dnode->tn_dir.tn_parent == NULL) {
		error = ENOENT;
		goto out;
	}
	if (cnp->cn_flags & ISDOTDOT) {
		int ltype = 0;

		ltype = VOP_ISLOCKED(dvp);
		vhold(dvp);
		VOP_UNLOCK(dvp, 0);
		/* Allocate a new vnode on the matching entry. */
		error = tmpfs_alloc_vp(dvp->v_mount, dnode->tn_dir.tn_parent,
		    cnp->cn_lkflags, vpp);

		vn_lock(dvp, ltype | LK_RETRY);
		vdrop(dvp);
	} else if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
		VREF(dvp);
		*vpp = dvp;
		error = 0;
	} else {
		de = tmpfs_dir_lookup(dnode, NULL, cnp);
		if (de != NULL && de->td_node == NULL)
			cnp->cn_flags |= ISWHITEOUT;
		if (de == NULL || de->td_node == NULL) {
			/* The entry was not found in the directory.
			 * This is OK if we are creating or renaming an
			 * entry and are working on the last component of
			 * the path name. */
			if ((cnp->cn_flags & ISLASTCN) &&
			    (cnp->cn_nameiop == CREATE || \
			    cnp->cn_nameiop == RENAME ||
			    (cnp->cn_nameiop == DELETE &&
			    cnp->cn_flags & DOWHITEOUT &&
			    cnp->cn_flags & ISWHITEOUT))) {
				error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred,
				    cnp->cn_thread);
				if (error != 0)
					goto out;

				/* Keep the component name in the buffer for
				 * future uses. */
				cnp->cn_flags |= SAVENAME;

				error = EJUSTRETURN;
			} else
				error = ENOENT;
		} else {
			struct tmpfs_node *tnode;

			/* The entry was found, so get its associated
			 * tmpfs_node. */
			tnode = de->td_node;

			/* If we are not at the last path component and
			 * found a non-directory or non-link entry (which
			 * may itself be pointing to a directory), raise
			 * an error. */
			if ((tnode->tn_type != VDIR &&
			    tnode->tn_type != VLNK) &&
			    !(cnp->cn_flags & ISLASTCN)) {
				error = ENOTDIR;
				goto out;
			}

			/* If we are deleting or renaming the entry, keep
			 * track of its tmpfs_dirent so that it can be
			 * easily deleted later. */
			if ((cnp->cn_flags & ISLASTCN) &&
			    (cnp->cn_nameiop == DELETE ||
			    cnp->cn_nameiop == RENAME)) {
				error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred,
				    cnp->cn_thread);
				if (error != 0)
					goto out;

				/* Allocate a new vnode on the matching entry. */
				error = tmpfs_alloc_vp(dvp->v_mount, tnode,
						cnp->cn_lkflags, vpp);
				if (error != 0)
					goto out;

				if ((dnode->tn_mode & S_ISTXT) &&
				  VOP_ACCESS(dvp, VADMIN, cnp->cn_cred, cnp->cn_thread) &&
				  VOP_ACCESS(*vpp, VADMIN, cnp->cn_cred, cnp->cn_thread)) {
					error = EPERM;
					vput(*vpp);
					*vpp = NULL;
					goto out;
				}
				cnp->cn_flags |= SAVENAME;
			} else {
				error = tmpfs_alloc_vp(dvp->v_mount, tnode,
						cnp->cn_lkflags, vpp);
			}
		}
	}

	/* Store the result of this lookup in the cache.  Avoid this if the
	 * request was for creation, as it does not improve timings on
	 * emprical tests. */
	if ((cnp->cn_flags & MAKEENTRY) && cnp->cn_nameiop != CREATE)
		cache_enter(dvp, *vpp, cnp);

out:
	/* If there were no errors, *vpp cannot be null and it must be
	 * locked. */
	MPASS(IFF(error == 0, *vpp != NULLVP && VOP_ISLOCKED(*vpp)));

	return error;
}
Exemplo n.º 19
0
/*
 * Set up nameidata for a lookup() call and do it.
 *
 * If pubflag is set, this call is done for a lookup operation on the
 * public filehandle. In that case we allow crossing mountpoints and
 * absolute pathnames. However, the caller is expected to check that
 * the lookup result is within the public fs, and deny access if
 * it is not.
 *
 * nfs_namei() clears out garbage fields that namei() might leave garbage.
 * This is mainly ni_vp and ni_dvp when an error occurs, and ni_dvp when no
 * error occurs but the parent was not requested.
 *
 * dirp may be set whether an error is returned or not, and must be
 * released by the caller.
 */
int
nfs_namei(struct nameidata *ndp, struct nfsrv_descript *nfsd,
    fhandle_t *fhp, int len, struct nfssvc_sock *slp,
    struct sockaddr *nam, struct mbuf **mdp,
    caddr_t *dposp, struct vnode **retdirp, int v3, struct vattr *retdirattrp,
    int *retdirattr_retp, int pubflag)
{
	int i, rem;
	struct mbuf *md;
	char *fromcp, *tocp, *cp;
	struct iovec aiov;
	struct uio auio;
	struct vnode *dp;
	int error, rdonly, linklen;
	struct componentname *cnp = &ndp->ni_cnd;
	int lockleaf = (cnp->cn_flags & LOCKLEAF) != 0;

	*retdirp = NULL;
	cnp->cn_flags |= NOMACCHECK;
	cnp->cn_pnbuf = uma_zalloc(namei_zone, M_WAITOK);

	/*
	 * Copy the name from the mbuf list to ndp->ni_pnbuf
	 * and set the various ndp fields appropriately.
	 */
	fromcp = *dposp;
	tocp = cnp->cn_pnbuf;
	md = *mdp;
	rem = mtod(md, caddr_t) + md->m_len - fromcp;
	for (i = 0; i < len; i++) {
		while (rem == 0) {
			md = md->m_next;
			if (md == NULL) {
				error = EBADRPC;
				goto out;
			}
			fromcp = mtod(md, caddr_t);
			rem = md->m_len;
		}
		if (*fromcp == '\0' || (!pubflag && *fromcp == '/')) {
			error = EACCES;
			goto out;
		}
		*tocp++ = *fromcp++;
		rem--;
	}
	*tocp = '\0';
	*mdp = md;
	*dposp = fromcp;
	len = nfsm_rndup(len)-len;
	if (len > 0) {
		if (rem >= len)
			*dposp += len;
		else if ((error = nfs_adv(mdp, dposp, len, rem)) != 0)
			goto out;
	}

	if (!pubflag && nfs_ispublicfh(fhp))
		return (ESTALE);

	/*
	 * Extract and set starting directory.
	 */
	error = nfsrv_fhtovp(fhp, 0, &dp, nfsd, slp, nam, &rdonly);
	if (error)
		goto out;
	if (dp->v_type != VDIR) {
		vput(dp);
		error = ENOTDIR;
		goto out;
	}

	if (rdonly)
		cnp->cn_flags |= RDONLY;

	/*
	 * Set return directory.  Reference to dp is implicitly transfered
	 * to the returned pointer
	 */
	*retdirp = dp;
	if (v3) {
		*retdirattr_retp = VOP_GETATTR(dp, retdirattrp,
			ndp->ni_cnd.cn_cred);
	}

	VOP_UNLOCK(dp, 0);

	if (pubflag) {
		/*
		 * Oh joy. For WebNFS, handle those pesky '%' escapes,
		 * and the 'native path' indicator.
		 */
		cp = uma_zalloc(namei_zone, M_WAITOK);
		fromcp = cnp->cn_pnbuf;
		tocp = cp;
		if ((unsigned char)*fromcp >= WEBNFS_SPECCHAR_START) {
			switch ((unsigned char)*fromcp) {
			case WEBNFS_NATIVE_CHAR:
				/*
				 * 'Native' path for us is the same
				 * as a path according to the NFS spec,
				 * just skip the escape char.
				 */
				fromcp++;
				break;
			/*
			 * More may be added in the future, range 0x80-0xff
			 */
			default:
				error = EIO;
				uma_zfree(namei_zone, cp);
				goto out;
			}
		}
		/*
		 * Translate the '%' escapes, URL-style.
		 */
		while (*fromcp != '\0') {
			if (*fromcp == WEBNFS_ESC_CHAR) {
				if (fromcp[1] != '\0' && fromcp[2] != '\0') {
					fromcp++;
					*tocp++ = HEXSTRTOI(fromcp);
					fromcp += 2;
					continue;
				} else {
					error = ENOENT;
					uma_zfree(namei_zone, cp);
					goto out;
				}
			} else
				*tocp++ = *fromcp++;
		}
		*tocp = '\0';
		uma_zfree(namei_zone, cnp->cn_pnbuf);
		cnp->cn_pnbuf = cp;
	}

	ndp->ni_pathlen = (tocp - cnp->cn_pnbuf) + 1;
	ndp->ni_segflg = UIO_SYSSPACE;

	if (pubflag) {
		ndp->ni_rootdir = rootvnode;
		ndp->ni_loopcnt = 0;

		if (cnp->cn_pnbuf[0] == '/')
			dp = rootvnode;
	} else {
		cnp->cn_flags |= NOCROSSMOUNT;
	}

	/*
	 * Initialize for scan, set ni_startdir and bump ref on dp again
	 * because lookup() will dereference ni_startdir.
	 */

	cnp->cn_thread = curthread;
	VREF(dp);
	ndp->ni_startdir = dp;

	if (!lockleaf)
		cnp->cn_flags |= LOCKLEAF;
	for (;;) {
		cnp->cn_nameptr = cnp->cn_pnbuf;
		/*
		 * Call lookup() to do the real work.  If an error occurs,
		 * ndp->ni_vp and ni_dvp are left uninitialized or NULL and
		 * we do not have to dereference anything before returning.
		 * In either case ni_startdir will be dereferenced and NULLed
		 * out.
		 */
		error = lookup(ndp);
		if (error)
			break;

		/*
		 * Check for encountering a symbolic link.  Trivial
		 * termination occurs if no symlink encountered.
		 * Note: zfree is safe because error is 0, so we will
		 * not zfree it again when we break.
		 */
		if ((cnp->cn_flags & ISSYMLINK) == 0) {
			if (cnp->cn_flags & (SAVENAME | SAVESTART))
				cnp->cn_flags |= HASBUF;
			else
				uma_zfree(namei_zone, cnp->cn_pnbuf);
			if (ndp->ni_vp && !lockleaf)
				VOP_UNLOCK(ndp->ni_vp, 0);
			break;
		}

		/*
		 * Validate symlink
		 */
		if ((cnp->cn_flags & LOCKPARENT) && ndp->ni_pathlen == 1)
			VOP_UNLOCK(ndp->ni_dvp, 0);
		if (!pubflag) {
			error = EINVAL;
			goto badlink2;
		}

		if (ndp->ni_loopcnt++ >= MAXSYMLINKS) {
			error = ELOOP;
			goto badlink2;
		}
		if (ndp->ni_pathlen > 1)
			cp = uma_zalloc(namei_zone, M_WAITOK);
		else
			cp = cnp->cn_pnbuf;
		aiov.iov_base = cp;
		aiov.iov_len = MAXPATHLEN;
		auio.uio_iov = &aiov;
		auio.uio_iovcnt = 1;
		auio.uio_offset = 0;
		auio.uio_rw = UIO_READ;
		auio.uio_segflg = UIO_SYSSPACE;
		auio.uio_td = NULL;
		auio.uio_resid = MAXPATHLEN;
		error = VOP_READLINK(ndp->ni_vp, &auio, cnp->cn_cred);
		if (error) {
		badlink1:
			if (ndp->ni_pathlen > 1)
				uma_zfree(namei_zone, cp);
		badlink2:
			vput(ndp->ni_vp);
			vrele(ndp->ni_dvp);
			break;
		}
		linklen = MAXPATHLEN - auio.uio_resid;
		if (linklen == 0) {
			error = ENOENT;
			goto badlink1;
		}
		if (linklen + ndp->ni_pathlen >= MAXPATHLEN) {
			error = ENAMETOOLONG;
			goto badlink1;
		}

		/*
		 * Adjust or replace path
		 */
		if (ndp->ni_pathlen > 1) {
			bcopy(ndp->ni_next, cp + linklen, ndp->ni_pathlen);
			uma_zfree(namei_zone, cnp->cn_pnbuf);
			cnp->cn_pnbuf = cp;
		} else
			cnp->cn_pnbuf[linklen] = '\0';
		ndp->ni_pathlen += linklen;

		/*
		 * Cleanup refs for next loop and check if root directory
		 * should replace current directory.  Normally ni_dvp
		 * becomes the new base directory and is cleaned up when
		 * we loop.  Explicitly null pointers after invalidation
		 * to clarify operation.
		 */
		vput(ndp->ni_vp);
		ndp->ni_vp = NULL;

		if (cnp->cn_pnbuf[0] == '/') {
			vrele(ndp->ni_dvp);
			ndp->ni_dvp = ndp->ni_rootdir;
			VREF(ndp->ni_dvp);
		}
		ndp->ni_startdir = ndp->ni_dvp;
		ndp->ni_dvp = NULL;
	}
	if (!lockleaf)
		cnp->cn_flags &= ~LOCKLEAF;

	/*
	 * nfs_namei() guarentees that fields will not contain garbage
	 * whether an error occurs or not.  This allows the caller to track
	 * cleanup state trivially.
	 */
out:
	if (error) {
		uma_zfree(namei_zone, cnp->cn_pnbuf);
		ndp->ni_vp = NULL;
		ndp->ni_dvp = NULL;
		ndp->ni_startdir = NULL;
		cnp->cn_flags &= ~HASBUF;
	} else if ((ndp->ni_cnd.cn_flags & (WANTPARENT|LOCKPARENT)) == 0) {
		ndp->ni_dvp = NULL;
	}
	return (error);
}
Exemplo n.º 20
0
/*
 * Rename system call.
 * 	rename("foo", "bar");
 * is essentially
 *	unlink("bar");
 *	link("foo", "bar");
 *	unlink("foo");
 * but ``atomically''.  Can't do full commit without saving state in the
 * inode on disk which isn't feasible at this time.  Best we can do is
 * always guarantee the target exists.
 *
 * Basic algorithm is:
 *
 * 1) Bump link count on source while we're linking it to the
 *    target.  This also ensure the inode won't be deleted out
 *    from underneath us while we work (it may be truncated by
 *    a concurrent `trunc' or `open' for creation).
 * 2) Link source to destination.  If destination already exists,
 *    delete it first.
 * 3) Unlink source reference to inode if still around. If a
 *    directory was moved and the parent of the destination
 *    is different from the source, patch the ".." entry in the
 *    directory.
 */
static int
ext2_rename(struct vop_rename_args *ap)
{
	struct vnode *tvp = ap->a_tvp;
	struct vnode *tdvp = ap->a_tdvp;
	struct vnode *fvp = ap->a_fvp;
	struct vnode *fdvp = ap->a_fdvp;
	struct componentname *tcnp = ap->a_tcnp;
	struct componentname *fcnp = ap->a_fcnp;
	struct inode *ip, *xp, *dp;
	struct dirtemplate dirbuf;
	int doingdirectory = 0, oldparent = 0, newparent = 0;
	int error = 0;
	u_char namlen;

#ifdef DIAGNOSTIC
	if ((tcnp->cn_flags & HASBUF) == 0 ||
	    (fcnp->cn_flags & HASBUF) == 0)
		panic("ext2_rename: no name");
#endif
	/*
	 * Check for cross-device rename.
	 */
	if ((fvp->v_mount != tdvp->v_mount) ||
	    (tvp && (fvp->v_mount != tvp->v_mount))) {
		error = EXDEV;
abortit:
		if (tdvp == tvp)
			vrele(tdvp);
		else
			vput(tdvp);
		if (tvp)
			vput(tvp);
		vrele(fdvp);
		vrele(fvp);
		return (error);
	}

	if (tvp && ((VTOI(tvp)->i_flags & (NOUNLINK | IMMUTABLE | APPEND)) ||
	    (VTOI(tdvp)->i_flags & APPEND))) {
		error = EPERM;
		goto abortit;
	}

	/*
	 * Renaming a file to itself has no effect.  The upper layers should
	 * not call us in that case.  Temporarily just warn if they do.
	 */
	if (fvp == tvp) {
		printf("ext2_rename: fvp == tvp (can't happen)\n");
		error = 0;
		goto abortit;
	}

	if ((error = vn_lock(fvp, LK_EXCLUSIVE)) != 0)
		goto abortit;
	dp = VTOI(fdvp);
	ip = VTOI(fvp);
	if (ip->i_nlink >= EXT2_LINK_MAX) {
 		VOP_UNLOCK(fvp, 0);
 		error = EMLINK;
 		goto abortit;
 	}
	if ((ip->i_flags & (NOUNLINK | IMMUTABLE | APPEND))
	    || (dp->i_flags & APPEND)) {
		VOP_UNLOCK(fvp, 0);
		error = EPERM;
		goto abortit;
	}
	if ((ip->i_mode & IFMT) == IFDIR) {
		/*
		 * Avoid ".", "..", and aliases of "." for obvious reasons.
		 */
		if ((fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.') ||
		    dp == ip || (fcnp->cn_flags | tcnp->cn_flags) & ISDOTDOT ||
		    (ip->i_flag & IN_RENAME)) {
			VOP_UNLOCK(fvp, 0);
			error = EINVAL;
			goto abortit;
		}
		ip->i_flag |= IN_RENAME;
		oldparent = dp->i_number;
		doingdirectory++;
	}
	vrele(fdvp);

	/*
	 * When the target exists, both the directory
	 * and target vnodes are returned locked.
	 */
	dp = VTOI(tdvp);
	xp = NULL;
	if (tvp)
		xp = VTOI(tvp);

	/*
	 * 1) Bump link count while we're moving stuff
	 *    around.  If we crash somewhere before
	 *    completing our work, the link count
	 *    may be wrong, but correctable.
	 */
	ip->i_nlink++;
	ip->i_flag |= IN_CHANGE;
	if ((error = ext2_update(fvp, !DOINGASYNC(fvp))) != 0) {
		VOP_UNLOCK(fvp, 0);
		goto bad;
	}

	/*
	 * If ".." must be changed (ie the directory gets a new
	 * parent) then the source directory must not be in the
	 * directory hierarchy above the target, as this would
	 * orphan everything below the source directory. Also
	 * the user must have write permission in the source so
	 * as to be able to change "..". We must repeat the call
	 * to namei, as the parent directory is unlocked by the
	 * call to checkpath().
	 */
	error = VOP_ACCESS(fvp, VWRITE, tcnp->cn_cred, tcnp->cn_thread);
	VOP_UNLOCK(fvp, 0);
	if (oldparent != dp->i_number)
		newparent = dp->i_number;
	if (doingdirectory && newparent) {
		if (error)	/* write access check above */
			goto bad;
		if (xp != NULL)
			vput(tvp);
		error = ext2_checkpath(ip, dp, tcnp->cn_cred);
		if (error)
			goto out;
		VREF(tdvp);
		error = relookup(tdvp, &tvp, tcnp);
		if (error)
			goto out;
		vrele(tdvp);
		dp = VTOI(tdvp);
		xp = NULL;
		if (tvp)
			xp = VTOI(tvp);
	}
	/*
	 * 2) If target doesn't exist, link the target
	 *    to the source and unlink the source.
	 *    Otherwise, rewrite the target directory
	 *    entry to reference the source inode and
	 *    expunge the original entry's existence.
	 */
	if (xp == NULL) {
		if (dp->i_devvp != ip->i_devvp)
			panic("ext2_rename: EXDEV");
		/*
		 * Account for ".." in new directory.
		 * When source and destination have the same
		 * parent we don't fool with the link count.
		 */
		if (doingdirectory && newparent) {
			if ((nlink_t)dp->i_nlink >= EXT2_LINK_MAX) {
				error = EMLINK;
				goto bad;
			}
			dp->i_nlink++;
			dp->i_flag |= IN_CHANGE;
			error = ext2_update(tdvp, !DOINGASYNC(tdvp));
			if (error)
				goto bad;
		}
		error = ext2_direnter(ip, tdvp, tcnp);
		if (error) {
			if (doingdirectory && newparent) {
				dp->i_nlink--;
				dp->i_flag |= IN_CHANGE;
				(void)ext2_update(tdvp, 1);
			}
			goto bad;
		}
		vput(tdvp);
	} else {
		if (xp->i_devvp != dp->i_devvp || xp->i_devvp != ip->i_devvp)
		       panic("ext2_rename: EXDEV");
		/*
		 * Short circuit rename(foo, foo).
		 */
		if (xp->i_number == ip->i_number)
			panic("ext2_rename: same file");
		/*
		 * If the parent directory is "sticky", then the user must
		 * own the parent directory, or the destination of the rename,
		 * otherwise the destination may not be changed (except by
		 * root). This implements append-only directories.
		 */
		if ((dp->i_mode & S_ISTXT) && tcnp->cn_cred->cr_uid != 0 &&
		    tcnp->cn_cred->cr_uid != dp->i_uid &&
		    xp->i_uid != tcnp->cn_cred->cr_uid) {
			error = EPERM;
			goto bad;
		}
		/*
		 * Target must be empty if a directory and have no links
		 * to it. Also, ensure source and target are compatible
		 * (both directories, or both not directories).
		 */
		if ((xp->i_mode&IFMT) == IFDIR) {
			if (! ext2_dirempty(xp, dp->i_number, tcnp->cn_cred) || 
			    xp->i_nlink > 2) {
				error = ENOTEMPTY;
				goto bad;
			}
			if (!doingdirectory) {
				error = ENOTDIR;
				goto bad;
			}
			cache_purge(tdvp);
		} else if (doingdirectory) {
			error = EISDIR;
			goto bad;
		}
		error = ext2_dirrewrite(dp, ip, tcnp);
		if (error)
			goto bad;
		/*
		 * If the target directory is in the same
		 * directory as the source directory,
		 * decrement the link count on the parent
		 * of the target directory.
		 */
		if (doingdirectory && !newparent) {
		       dp->i_nlink--;
		       dp->i_flag |= IN_CHANGE;
		}
		vput(tdvp);
		/*
		 * Adjust the link count of the target to
		 * reflect the dirrewrite above.  If this is
		 * a directory it is empty and there are
		 * no links to it, so we can squash the inode and
		 * any space associated with it.  We disallowed
		 * renaming over top of a directory with links to
		 * it above, as the remaining link would point to
		 * a directory without "." or ".." entries.
		 */
		xp->i_nlink--;
		if (doingdirectory) {
			if (--xp->i_nlink != 0)
				panic("ext2_rename: linked directory");
			error = ext2_truncate(tvp, (off_t)0, IO_SYNC,
			    tcnp->cn_cred, tcnp->cn_thread);
		}
		xp->i_flag |= IN_CHANGE;
		vput(tvp);
		xp = NULL;
	}

	/*
	 * 3) Unlink the source.
	 */
	fcnp->cn_flags &= ~MODMASK;
	fcnp->cn_flags |= LOCKPARENT | LOCKLEAF;
	VREF(fdvp);
	error = relookup(fdvp, &fvp, fcnp);
	if (error == 0)
		vrele(fdvp);
	if (fvp != NULL) {
		xp = VTOI(fvp);
		dp = VTOI(fdvp);
	} else {
		/*
		 * From name has disappeared.
		 */
		if (doingdirectory)
			panic("ext2_rename: lost dir entry");
		vrele(ap->a_fvp);
		return (0);
	}
	/*
	 * Ensure that the directory entry still exists and has not
	 * changed while the new name has been entered. If the source is
	 * a file then the entry may have been unlinked or renamed. In
	 * either case there is no further work to be done. If the source
	 * is a directory then it cannot have been rmdir'ed; its link
	 * count of three would cause a rmdir to fail with ENOTEMPTY.
	 * The IN_RENAME flag ensures that it cannot be moved by another
	 * rename.
	 */
	if (xp != ip) {
		if (doingdirectory)
			panic("ext2_rename: lost dir entry");
	} else {
		/*
		 * If the source is a directory with a
		 * new parent, the link count of the old
		 * parent directory must be decremented
		 * and ".." set to point to the new parent.
		 */
		if (doingdirectory && newparent) {
			dp->i_nlink--;
			dp->i_flag |= IN_CHANGE;
			error = vn_rdwr(UIO_READ, fvp, (caddr_t)&dirbuf,
				sizeof(struct dirtemplate), (off_t)0,
				UIO_SYSSPACE, IO_NODELOCKED | IO_NOMACCHECK,
				tcnp->cn_cred, NOCRED, NULL, NULL);
			if (error == 0) {
				/* Like ufs little-endian: */
				namlen = dirbuf.dotdot_type;
				if (namlen != 2 ||
				    dirbuf.dotdot_name[0] != '.' ||
				    dirbuf.dotdot_name[1] != '.') {
					ext2_dirbad(xp, (doff_t)12,
					    "rename: mangled dir");
				} else {
					dirbuf.dotdot_ino = newparent;
					(void) vn_rdwr(UIO_WRITE, fvp,
					    (caddr_t)&dirbuf,
					    sizeof(struct dirtemplate),
					    (off_t)0, UIO_SYSSPACE,
					    IO_NODELOCKED | IO_SYNC |
					    IO_NOMACCHECK, tcnp->cn_cred,
					    NOCRED, NULL, NULL);
					cache_purge(fdvp);
				}
			}
		}
		error = ext2_dirremove(fdvp, fcnp);
		if (!error) {
			xp->i_nlink--;
			xp->i_flag |= IN_CHANGE;
		}
		xp->i_flag &= ~IN_RENAME;
	}
	if (dp)
		vput(fdvp);
	if (xp)
		vput(fvp);
	vrele(ap->a_fvp);
	return (error);

bad:
	if (xp)
		vput(ITOV(xp));
	vput(ITOV(dp));
out:
	if (doingdirectory)
		ip->i_flag &= ~IN_RENAME;
	if (vn_lock(fvp, LK_EXCLUSIVE) == 0) {
		ip->i_nlink--;
		ip->i_flag |= IN_CHANGE;
		ip->i_flag &= ~IN_RENAME;
		vput(fvp);
	} else
		vrele(fvp);
	return (error);
}
Exemplo n.º 21
0
/*
 * Convert a pathname into a pointer to a vnode.
 *
 * The FOLLOW flag is set when symbolic links are to be followed
 * when they occur at the end of the name translation process.
 * Symbolic links are always followed for all other pathname
 * components other than the last.
 *
 * If the LOCKLEAF flag is set, a locked vnode is returned.
 *
 * The segflg defines whether the name is to be copied from user
 * space or kernel space.
 *
 * Overall outline of namei:
 *
 *	copy in name
 *	get starting directory
 *	while (!done && !error) {
 *		call lookup to search path.
 *		if symbolic link, massage name in buffer and continue
 *	}
 */
int
namei(struct nameidata *ndp)
{
	struct filedesc *fdp;		/* pointer to file descriptor state */
	char *cp;			/* pointer into pathname argument */
	struct vnode *dp;		/* the directory we are searching */
	struct iovec aiov;		/* uio for reading symbolic links */
	struct uio auio;
	int error, linklen;
	struct componentname *cnp = &ndp->ni_cnd;
	struct proc *p = cnp->cn_proc;

	ndp->ni_cnd.cn_cred = ndp->ni_cnd.cn_proc->p_ucred;
#ifdef DIAGNOSTIC
	if (!cnp->cn_cred || !cnp->cn_proc)
		panic ("namei: bad cred/proc");
	if (cnp->cn_nameiop & (~OPMASK))
		panic ("namei: nameiop contaminated with flags");
	if (cnp->cn_flags & OPMASK)
		panic ("namei: flags contaminated with nameiops");
#endif
	fdp = cnp->cn_proc->p_fd;

	/*
	 * Get a buffer for the name to be translated, and copy the
	 * name into the buffer.
	 */
	if ((cnp->cn_flags & HASBUF) == 0)
		cnp->cn_pnbuf = pool_get(&namei_pool, PR_WAITOK);
	if (ndp->ni_segflg == UIO_SYSSPACE)
		error = copystr(ndp->ni_dirp, cnp->cn_pnbuf,
			    MAXPATHLEN, &ndp->ni_pathlen);
	else
		error = copyinstr(ndp->ni_dirp, cnp->cn_pnbuf,
			    MAXPATHLEN, &ndp->ni_pathlen);

	/*
	 * Fail on null pathnames
	 */
	if (error == 0 && ndp->ni_pathlen == 1)
		error = ENOENT;

	if (error) {
		pool_put(&namei_pool, cnp->cn_pnbuf);
		ndp->ni_vp = NULL;
		return (error);
	}

#ifdef KTRACE
	if (KTRPOINT(cnp->cn_proc, KTR_NAMEI))
		ktrnamei(cnp->cn_proc, cnp->cn_pnbuf);
#endif
#if NSYSTRACE > 0
	if (ISSET(cnp->cn_proc->p_flag, P_SYSTRACE))
		systrace_namei(ndp);
#endif

	/*
	 *  Strip trailing slashes, as requested
	 */
	if (cnp->cn_flags & STRIPSLASHES) {
		char *end = cnp->cn_pnbuf + ndp->ni_pathlen - 2;

		cp = end;
		while (cp >= cnp->cn_pnbuf && (*cp == '/'))
			cp--;

		/* Still some remaining characters in the buffer */
		if (cp >= cnp->cn_pnbuf) {
			ndp->ni_pathlen -= (end - cp);
			*(cp + 1) = '\0';
		}
	}

	ndp->ni_loopcnt = 0;

	/*
	 * Get starting point for the translation.
	 */
	if ((ndp->ni_rootdir = fdp->fd_rdir) == NULL)
		ndp->ni_rootdir = rootvnode;
	/*
	 * Check if starting from root directory or current directory.
	 */
	if (cnp->cn_pnbuf[0] == '/') {
		dp = ndp->ni_rootdir;
		VREF(dp);
	} else {
		dp = fdp->fd_cdir;
		VREF(dp);
	}
	for (;;) {
		if (!dp->v_mount) {
			/* Give up if the directory is no longer mounted */
			pool_put(&namei_pool, cnp->cn_pnbuf);
			return (ENOENT);
		}
		cnp->cn_nameptr = cnp->cn_pnbuf;
		ndp->ni_startdir = dp;
		if ((error = lookup(ndp)) != 0) {
			pool_put(&namei_pool, cnp->cn_pnbuf);
			return (error);
		}
		/*
		 * Check for symbolic link
		 */
		if ((cnp->cn_flags & ISSYMLINK) == 0) {
			if ((cnp->cn_flags & (SAVENAME | SAVESTART)) == 0)
				pool_put(&namei_pool, cnp->cn_pnbuf);
			else
				cnp->cn_flags |= HASBUF;
			return (0);
		}
		if ((cnp->cn_flags & LOCKPARENT) && (cnp->cn_flags & ISLASTCN))
			VOP_UNLOCK(ndp->ni_dvp, 0, p);
		if (ndp->ni_loopcnt++ >= MAXSYMLINKS) {
			error = ELOOP;
			break;
		}
		if (ndp->ni_pathlen > 1)
			cp = pool_get(&namei_pool, PR_WAITOK);
		else
			cp = cnp->cn_pnbuf;
		aiov.iov_base = cp;
		aiov.iov_len = MAXPATHLEN;
		auio.uio_iov = &aiov;
		auio.uio_iovcnt = 1;
		auio.uio_offset = 0;
		auio.uio_rw = UIO_READ;
		auio.uio_segflg = UIO_SYSSPACE;
		auio.uio_procp = cnp->cn_proc;
		auio.uio_resid = MAXPATHLEN;
		error = VOP_READLINK(ndp->ni_vp, &auio, cnp->cn_cred);
		if (error) {
badlink:
			if (ndp->ni_pathlen > 1)
				pool_put(&namei_pool, cp);
			break;
		}
		linklen = MAXPATHLEN - auio.uio_resid;
		if (linklen + ndp->ni_pathlen >= MAXPATHLEN) {
			error = ENAMETOOLONG;
			goto badlink;
		}
		if (ndp->ni_pathlen > 1) {
			bcopy(ndp->ni_next, cp + linklen, ndp->ni_pathlen);
			pool_put(&namei_pool, cnp->cn_pnbuf);
			cnp->cn_pnbuf = cp;
		} else
			cnp->cn_pnbuf[linklen] = '\0';
		ndp->ni_pathlen += linklen;
		vput(ndp->ni_vp);
		dp = ndp->ni_dvp;
		/*
		 * Check if root directory should replace current directory.
		 */
		if (cnp->cn_pnbuf[0] == '/') {
			vrele(dp);
			dp = ndp->ni_rootdir;
			VREF(dp);
		}
	}
	pool_put(&namei_pool, cnp->cn_pnbuf);
	vrele(ndp->ni_dvp);
	vput(ndp->ni_vp);
	ndp->ni_vp = NULL;
	return (error);
}
Exemplo n.º 22
0
/*
 * vp is the current namei directory
 * ndp is the name to locate in that directory...
 */
static int
fdesc_lookup(struct vop_lookup_args *ap)
{
	struct vnode **vpp = ap->a_vpp;
	struct vnode *dvp = ap->a_dvp;
	struct componentname *cnp = ap->a_cnp;
	char *pname = cnp->cn_nameptr;
	struct thread *td = cnp->cn_thread;
	struct file *fp;
	struct fdesc_get_ino_args arg;
	cap_rights_t rights;
	int nlen = cnp->cn_namelen;
	u_int fd, fd1;
	int error;
	struct vnode *fvp;

	if ((cnp->cn_flags & ISLASTCN) &&
	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) {
		error = EROFS;
		goto bad;
	}

	if (cnp->cn_namelen == 1 && *pname == '.') {
		*vpp = dvp;
		VREF(dvp);
		return (0);
	}

	if (VTOFDESC(dvp)->fd_type != Froot) {
		error = ENOTDIR;
		goto bad;
	}

	fd = 0;
	/* the only time a leading 0 is acceptable is if it's "0" */
	if (*pname == '0' && nlen != 1) {
		error = ENOENT;
		goto bad;
	}
	while (nlen--) {
		if (*pname < '0' || *pname > '9') {
			error = ENOENT;
			goto bad;
		}
		fd1 = 10 * fd + *pname++ - '0';
		if (fd1 < fd) {
			error = ENOENT;
			goto bad;
		}
		fd = fd1;
	}

	/*
	 * No rights to check since 'fp' isn't actually used.
	 */
	if ((error = fget(td, fd, cap_rights_init(&rights), &fp)) != 0)
		goto bad;

	/* Check if we're looking up ourselves. */
	if (VTOFDESC(dvp)->fd_ix == FD_DESC + fd) {
		/*
		 * In case we're holding the last reference to the file, the dvp
		 * will be re-acquired.
		 */
		vhold(dvp);
		VOP_UNLOCK(dvp, 0);
		fdrop(fp, td);

		/* Re-aquire the lock afterwards. */
		vn_lock(dvp, LK_RETRY | LK_EXCLUSIVE);
		vdrop(dvp);
		fvp = dvp;
		if ((dvp->v_iflag & VI_DOOMED) != 0)
			error = ENOENT;
	} else {
		/*
		 * Unlock our root node (dvp) when doing this, since we might
		 * deadlock since the vnode might be locked by another thread
		 * and the root vnode lock will be obtained afterwards (in case
		 * we're looking up the fd of the root vnode), which will be the
		 * opposite lock order. Vhold the root vnode first so we don't
		 * lose it.
		 */
		arg.ftype = Fdesc;
		arg.fd_fd = fd;
		arg.ix = FD_DESC + fd;
		arg.fp = fp;
		arg.td = td;
		error = vn_vget_ino_gen(dvp, fdesc_get_ino_alloc, &arg,
		    LK_EXCLUSIVE, &fvp);
	}
	
	if (error)
		goto bad;
	*vpp = fvp;
	return (0);

bad:
	*vpp = NULL;
	return (error);
}
Exemplo n.º 23
0
/*
 * This is the 10-Apr-92 bypass routine.
 *    This version has been optimized for speed, throwing away some
 * safety checks.  It should still always work, but it's not as
 * robust to programmer errors.
 *
 * In general, we map all vnodes going down and unmap them on the way back.
 * As an exception to this, vnodes can be marked "unmapped" by setting
 * the Nth bit in operation's vdesc_flags.
 *
 * Also, some BSD vnode operations have the side effect of vrele'ing
 * their arguments.  With stacking, the reference counts are held
 * by the upper node, not the lower one, so we must handle these
 * side-effects here.  This is not of concern in Sun-derived systems
 * since there are no such side-effects.
 *
 * This makes the following assumptions:
 * - only one returned vpp
 * - no INOUT vpp's (Sun's vop_open has one of these)
 * - the vnode operation vector of the first vnode should be used
 *   to determine what implementation of the op should be invoked
 * - all mapped vnodes are of our vnode-type (NEEDSWORK:
 *   problems on rmdir'ing mount points and renaming?)
 */
int
null_bypass(struct vop_generic_args *ap)
{
	struct vnode **this_vp_p;
	int error;
	struct vnode *old_vps[VDESC_MAX_VPS];
	struct vnode **vps_p[VDESC_MAX_VPS];
	struct vnode ***vppp;
	struct vnodeop_desc *descp = ap->a_desc;
	int reles, i;

	if (null_bug_bypass)
		printf ("null_bypass: %s\n", descp->vdesc_name);

#ifdef DIAGNOSTIC
	/*
	 * We require at least one vp.
	 */
	if (descp->vdesc_vp_offsets == NULL ||
	    descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
		panic ("null_bypass: no vp's in map");
#endif

	/*
	 * Map the vnodes going in.
	 * Later, we'll invoke the operation based on
	 * the first mapped vnode's operation vector.
	 */
	reles = descp->vdesc_flags;
	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
			break;   /* bail out at end of list */
		vps_p[i] = this_vp_p =
			VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[i],ap);
		/*
		 * We're not guaranteed that any but the first vnode
		 * are of our type.  Check for and don't map any
		 * that aren't.  (We must always map first vp or vclean fails.)
		 */
		if (i && (*this_vp_p == NULLVP ||
		    (*this_vp_p)->v_op != &null_vnodeops)) {
			old_vps[i] = NULLVP;
		} else {
			old_vps[i] = *this_vp_p;
			*(vps_p[i]) = NULLVPTOLOWERVP(*this_vp_p);
			/*
			 * XXX - Several operations have the side effect
			 * of vrele'ing their vp's.  We must account for
			 * that.  (This should go away in the future.)
			 */
			if (reles & VDESC_VP0_WILLRELE)
				VREF(*this_vp_p);
		}

	}

	/*
	 * Call the operation on the lower layer
	 * with the modified argument structure.
	 */
	if (vps_p[0] && *vps_p[0])
		error = VCALL(ap);
	else {
		printf("null_bypass: no map for %s\n", descp->vdesc_name);
		error = EINVAL;
	}

	/*
	 * Maintain the illusion of call-by-value
	 * by restoring vnodes in the argument structure
	 * to their original value.
	 */
	reles = descp->vdesc_flags;
	for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) {
		if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
			break;   /* bail out at end of list */
		if (old_vps[i]) {
			*(vps_p[i]) = old_vps[i];
#if 0
			if (reles & VDESC_VP0_WILLUNLOCK)
				VOP_UNLOCK(*(vps_p[i]), 0);
#endif
			if (reles & VDESC_VP0_WILLRELE)
				vrele(*(vps_p[i]));
		}
	}

	/*
	 * Map the possible out-going vpp
	 * (Assumes that the lower layer always returns
	 * a VREF'ed vpp unless it gets an error.)
	 */
	if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET &&
	    !(descp->vdesc_flags & VDESC_NOMAP_VPP) &&
	    !error) {
		/*
		 * XXX - even though some ops have vpp returned vp's,
		 * several ops actually vrele this before returning.
		 * We must avoid these ops.
		 * (This should go away when these ops are regularized.)
		 */
		if (descp->vdesc_flags & VDESC_VPP_WILLRELE)
			goto out;
		vppp = VOPARG_OFFSETTO(struct vnode***,
				 descp->vdesc_vpp_offset,ap);
		if (*vppp)
			error = null_nodeget(old_vps[0]->v_mount, **vppp, *vppp);
	}

 out:
	return (error);
}
Exemplo n.º 24
0
/*
 * Reacquire a path name component.
 */
int
relookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
{
	struct proc *p = cnp->cn_proc;
	struct vnode *dp = 0;		/* the directory we are searching */
	int wantparent;			/* 1 => wantparent or lockparent flag */
	int rdonly;			/* lookup read-only flag bit */
	int error = 0;
#ifdef NAMEI_DIAGNOSTIC
	int newhash;			/* DEBUG: check name hash */
	char *cp;			/* DEBUG: check name ptr/len */
#endif

	/*
	 * Setup: break out flag bits into variables.
	 */
	wantparent = cnp->cn_flags & (LOCKPARENT|WANTPARENT);
	rdonly = cnp->cn_flags & RDONLY;
	cnp->cn_flags &= ~ISSYMLINK;
	dp = dvp;
	vn_lock(dp, LK_EXCLUSIVE | LK_RETRY, p);

/* dirloop: */
	/*
	 * Search a new directory.
	 *
	 * The cn_hash value is for use by vfs_cache.
	 * The last component of the filename is left accessible via
	 * cnp->cn_nameptr for callers that need the name. Callers needing
	 * the name set the SAVENAME flag. When done, they assume
	 * responsibility for freeing the pathname buffer.
	 */
#ifdef NAMEI_DIAGNOSTIC
	cp = NULL;
	newhash = hash32_stre(cnp->cn_nameptr, '/', &cp, HASHINIT);
	if (newhash != cnp->cn_hash)
		panic("relookup: bad hash");
	if (cnp->cn_namelen != cp - cnp->cn_nameptr)
		panic ("relookup: bad len");
	if (*cp != 0)
		panic("relookup: not last component");
	printf("{%s}: ", cnp->cn_nameptr);
#endif

	/*
	 * Check for degenerate name (e.g. / or "")
	 * which is a way of talking about a directory,
	 * e.g. like "/." or ".".
	 */
	if (cnp->cn_nameptr[0] == '\0')
		panic("relookup: null name");

	if (cnp->cn_flags & ISDOTDOT)
		panic ("relookup: lookup on dot-dot");

	/*
	 * We now have a segment name to search for, and a directory to search.
	 */
	if ((error = VOP_LOOKUP(dp, vpp, cnp)) != 0) {
#ifdef DIAGNOSTIC
		if (*vpp != NULL)
			panic("leaf should be empty");
#endif
		if (error != EJUSTRETURN)
			goto bad;
		/*
		 * If creating and at end of pathname, then can consider
		 * allowing file to be created.
		 */
		if (rdonly || (dvp->v_mount->mnt_flag & MNT_RDONLY)) {
			error = EROFS;
			goto bad;
		}
		/* ASSERT(dvp == ndp->ni_startdir) */
		if (cnp->cn_flags & SAVESTART)
			VREF(dvp);
		/*
		 * We return with ni_vp NULL to indicate that the entry
		 * doesn't currently exist, leaving a pointer to the
		 * (possibly locked) directory inode in ndp->ni_dvp.
		 */
		return (0);
	}
	dp = *vpp;

#ifdef DIAGNOSTIC
	/*
	 * Check for symbolic link
	 */
	if (dp->v_type == VLNK && (cnp->cn_flags & FOLLOW))
		panic ("relookup: symlink found.");
#endif

	/*
	 * Check for read-only file systems.
	 */
	if (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME) {
		/*
		 * Disallow directory write attempts on read-only
		 * file systems.
		 */
		if (rdonly || (dp->v_mount->mnt_flag & MNT_RDONLY) ||
		    (wantparent &&
		    (dvp->v_mount->mnt_flag & MNT_RDONLY))) {
			error = EROFS;
			goto bad2;
		}
	}
	/* ASSERT(dvp == ndp->ni_startdir) */
	if (cnp->cn_flags & SAVESTART)
		VREF(dvp);
	if (!wantparent)
		vrele(dvp);
	if ((cnp->cn_flags & LOCKLEAF) == 0)
		VOP_UNLOCK(dp, 0, p);
	return (0);

bad2:
	if ((cnp->cn_flags & LOCKPARENT) && (cnp->cn_flags & ISLASTCN))
		VOP_UNLOCK(dvp, 0, p);
	vrele(dvp);
bad:
	vput(dp);
	*vpp = NULL;
	return (error);
}
Exemplo n.º 25
0
/* Common routine shared by sys___getcwd() and vn_isunder() */
int
vfs_getcwd_common(struct vnode *lvp, struct vnode *rvp, char **bpp, char *bufp,
    int limit, int flags, struct proc *p)
{
	struct filedesc *fdp = p->p_fd;
	struct vnode *uvp = NULL;
	char *bp = NULL;
	int error, perms = VEXEC;

	if (rvp == NULL) {
		rvp = fdp->fd_rdir;
		if (rvp == NULL)
			rvp = rootvnode;
	}

	VREF(rvp);
	VREF(lvp);

	error = vn_lock(lvp, LK_EXCLUSIVE | LK_RETRY, p);
	if (error) {
		vrele(lvp);
		lvp = NULL;
		goto out;
	}

	if (bufp)
		bp = *bpp;

	if (lvp == rvp) {
		if (bp)
			*(--bp) = '/';
		goto out;
	}

	/*
	 * This loop will terminate when we hit the root, VOP_READDIR() or
	 * VOP_LOOKUP() fails, or we run out of space in the user buffer.
	 */
	do {
		if (lvp->v_type != VDIR) {
			error = ENOTDIR;
			goto out;
		}

		/* Check for access if caller cares */
		if (flags & GETCWD_CHECK_ACCESS) {
			error = VOP_ACCESS(lvp, perms, p->p_ucred, p);
			if (error)
				goto out;
			perms = VEXEC|VREAD;
		}

		/* Step up if we're a covered vnode */
		while (lvp->v_flag & VROOT) {
			struct vnode *tvp;

			if (lvp == rvp)
				goto out;
			
			tvp = lvp;
			lvp = lvp->v_mount->mnt_vnodecovered;

			vput(tvp);

			if (lvp == NULL) {
				error = ENOENT;
				goto out;
			}

			VREF(lvp);

			error = vn_lock(lvp, LK_EXCLUSIVE | LK_RETRY, p);
			if (error) {
				vrele(lvp);
				lvp = NULL;
				goto out;
			}
		}

		/* Look in the name cache */
		error = vfs_getcwd_getcache(&lvp, &uvp, &bp, bufp);

		if (error == -1) {
			/* If that fails, look in the directory */
			error = vfs_getcwd_scandir(&lvp, &uvp, &bp, bufp, p);
		}

		if (error)
			goto out;

#ifdef DIAGNOSTIC
		if (lvp != NULL)
			panic("getcwd: oops, forgot to null lvp");
		if (bufp && (bp <= bufp)) {
			panic("getcwd: oops, went back too far");
		}
#endif

		if (bp)
			*(--bp) = '/';

		lvp = uvp;
		uvp = NULL;
		limit--;

	} while ((lvp != rvp) && (limit > 0)); 

out:

	if (bpp)
		*bpp = bp;

	if (uvp)
		vput(uvp);

	if (lvp)
		vput(lvp);

	vrele(rvp);

	return (error);
}
Exemplo n.º 26
0
/*
 * When we search a directory the blocks containing directory entries are
 * read and examined.  The directory entries contain information that would
 * normally be in the inode of a unix filesystem.  This means that some of
 * a directory's contents may also be in memory resident denodes (sort of
 * an inode).  This can cause problems if we are searching while some other
 * process is modifying a directory.  To prevent one process from accessing
 * incompletely modified directory information we depend upon being the
 * sole owner of a directory block.  bread/brelse provide this service.
 * This being the case, when a process modifies a directory it must first
 * acquire the disk block that contains the directory entry to be modified.
 * Then update the disk block and the denode, and then write the disk block
 * out to disk.  This way disk blocks containing directory entries and in
 * memory denode's will be in synch.
 */
static int
msdosfs_lookup_(struct vnode *vdp, struct vnode **vpp,
    struct componentname *cnp, u_int64_t *dd_inum)
{
	struct mbnambuf nb;
	daddr_t bn;
	int error;
	int slotcount;
	int slotoffset = 0;
	int frcn;
	u_long cluster;
	int blkoff;
	int diroff;
	int blsize;
	int isadir;		/* ~0 if found direntry is a directory	 */
	u_long scn;		/* starting cluster number		 */
	struct vnode *pdp;
	struct denode *dp;
	struct denode *tdp;
	struct msdosfsmount *pmp;
	struct buf *bp = 0;
	struct direntry *dep = NULL;
	u_char dosfilename[12];
	int flags = cnp->cn_flags;
	int nameiop = cnp->cn_nameiop;
	int unlen;
	u_int64_t inode1;

	int wincnt = 1;
	int chksum = -1, chksum_ok;
	int olddos = 1;

#ifdef MSDOSFS_DEBUG
	printf("msdosfs_lookup(): looking for %s\n", cnp->cn_nameptr);
#endif
	dp = VTODE(vdp);
	pmp = dp->de_pmp;
#ifdef MSDOSFS_DEBUG
	printf("msdosfs_lookup(): vdp %p, dp %p, Attr %02x\n",
	    vdp, dp, dp->de_Attributes);
#endif

 restart:
	if (vpp != NULL)
		*vpp = NULL;
	/*
	 * If they are going after the . or .. entry in the root directory,
	 * they won't find it.  DOS filesystems don't have them in the root
	 * directory.  So, we fake it. deget() is in on this scam too.
	 */
	if ((vdp->v_vflag & VV_ROOT) && cnp->cn_nameptr[0] == '.' &&
	    (cnp->cn_namelen == 1 ||
		(cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.'))) {
		isadir = ATTR_DIRECTORY;
		scn = MSDOSFSROOT;
#ifdef MSDOSFS_DEBUG
		printf("msdosfs_lookup(): looking for . or .. in root directory\n");
#endif
		cluster = MSDOSFSROOT;
		blkoff = MSDOSFSROOT_OFS;
		goto foundroot;
	}

	switch (unix2dosfn((const u_char *)cnp->cn_nameptr, dosfilename,
	    cnp->cn_namelen, 0, pmp)) {
	case 0:
		return (EINVAL);
	case 1:
		break;
	case 2:
		wincnt = winSlotCnt((const u_char *)cnp->cn_nameptr,
		    cnp->cn_namelen, pmp) + 1;
		break;
	case 3:
		olddos = 0;
		wincnt = winSlotCnt((const u_char *)cnp->cn_nameptr,
		    cnp->cn_namelen, pmp) + 1;
		break;
	}
	if (pmp->pm_flags & MSDOSFSMNT_SHORTNAME) {
		wincnt = 1;
		olddos = 1;
	}
	unlen = winLenFixup(cnp->cn_nameptr, cnp->cn_namelen);

	/*
	 * Suppress search for slots unless creating
	 * file and at end of pathname, in which case
	 * we watch for a place to put the new file in
	 * case it doesn't already exist.
	 */
	slotcount = wincnt;
	if ((nameiop == CREATE || nameiop == RENAME) &&
	    (flags & ISLASTCN))
		slotcount = 0;

#ifdef MSDOSFS_DEBUG
	printf("msdosfs_lookup(): dos version of filename %s, length %ld\n",
	    dosfilename, cnp->cn_namelen);
#endif
	/*
	 * Search the directory pointed at by vdp for the name pointed at
	 * by cnp->cn_nameptr.
	 */
	tdp = NULL;
	mbnambuf_init(&nb);
	/*
	 * The outer loop ranges over the clusters that make up the
	 * directory.  Note that the root directory is different from all
	 * other directories.  It has a fixed number of blocks that are not
	 * part of the pool of allocatable clusters.  So, we treat it a
	 * little differently. The root directory starts at "cluster" 0.
	 */
	diroff = 0;
	for (frcn = 0;; frcn++) {
		error = pcbmap(dp, frcn, &bn, &cluster, &blsize);
		if (error) {
			if (error == E2BIG)
				break;
			return (error);
		}
		error = bread(pmp->pm_devvp, bn, blsize, NOCRED, &bp);
		if (error) {
			brelse(bp);
			return (error);
		}
		for (blkoff = 0; blkoff < blsize;
		     blkoff += sizeof(struct direntry),
		     diroff += sizeof(struct direntry)) {
			dep = (struct direntry *)(bp->b_data + blkoff);
			/*
			 * If the slot is empty and we are still looking
			 * for an empty then remember this one.  If the
			 * slot is not empty then check to see if it
			 * matches what we are looking for.  If the slot
			 * has never been filled with anything, then the
			 * remainder of the directory has never been used,
			 * so there is no point in searching it.
			 */
			if (dep->deName[0] == SLOT_EMPTY ||
			    dep->deName[0] == SLOT_DELETED) {
				/*
				 * Drop memory of previous long matches
				 */
				chksum = -1;
				mbnambuf_init(&nb);

				if (slotcount < wincnt) {
					slotcount++;
					slotoffset = diroff;
				}
				if (dep->deName[0] == SLOT_EMPTY) {
					brelse(bp);
					goto notfound;
				}
			} else {
				/*
				 * If there wasn't enough space for our winentries,
				 * forget about the empty space
				 */
				if (slotcount < wincnt)
					slotcount = 0;

				/*
				 * Check for Win95 long filename entry
				 */
				if (dep->deAttributes == ATTR_WIN95) {
					if (pmp->pm_flags & MSDOSFSMNT_SHORTNAME)
						continue;

					chksum = win2unixfn(&nb,
					    (struct winentry *)dep, chksum,
					    pmp);
					continue;
				}

				chksum = winChkName(&nb,
				    (const u_char *)cnp->cn_nameptr, unlen,
				    chksum, pmp);
				if (chksum == -2) {
					chksum = -1;
					continue;
				}

				/*
				 * Ignore volume labels (anywhere, not just
				 * the root directory).
				 */
				if (dep->deAttributes & ATTR_VOLUME) {
					chksum = -1;
					continue;
				}

				/*
				 * Check for a checksum or name match
				 */
				chksum_ok = (chksum == winChksum(dep->deName));
				if (!chksum_ok
				    && (!olddos || bcmp(dosfilename, dep->deName, 11))) {
					chksum = -1;
					continue;
				}
#ifdef MSDOSFS_DEBUG
				printf("msdosfs_lookup(): match blkoff %d, diroff %d\n",
				    blkoff, diroff);
#endif
				/*
				 * Remember where this directory
				 * entry came from for whoever did
				 * this lookup.
				 */
				dp->de_fndoffset = diroff;
				if (chksum_ok && nameiop == RENAME) {
					/*
					 * Target had correct long name
					 * directory entries, reuse them
					 * as needed.
					 */
					dp->de_fndcnt = wincnt - 1;
				} else {
					/*
					 * Long name directory entries
					 * not present or corrupt, can only
					 * reuse dos directory entry.
					 */
					dp->de_fndcnt = 0;
				}

				goto found;
			}
		}	/* for (blkoff = 0; .... */
		/*
		 * Release the buffer holding the directory cluster just
		 * searched.
		 */
		brelse(bp);
	}	/* for (frcn = 0; ; frcn++) */

notfound:
	/*
	 * We hold no disk buffers at this point.
	 */

	/*
	 * Fixup the slot description to point to the place where
	 * we might put the new DOS direntry (putting the Win95
	 * long name entries before that)
	 */
	if (!slotcount) {
		slotcount = 1;
		slotoffset = diroff;
	}
	if (wincnt > slotcount)
		slotoffset += sizeof(struct direntry) * (wincnt - slotcount);

	/*
	 * If we get here we didn't find the entry we were looking for. But
	 * that's ok if we are creating or renaming and are at the end of
	 * the pathname and the directory hasn't been removed.
	 */
#ifdef MSDOSFS_DEBUG
	printf("msdosfs_lookup(): op %d, refcnt %ld\n",
	    nameiop, dp->de_refcnt);
	printf("               slotcount %d, slotoffset %d\n",
	       slotcount, slotoffset);
#endif
	if ((nameiop == CREATE || nameiop == RENAME) &&
	    (flags & ISLASTCN) && dp->de_refcnt != 0) {
		/*
		 * Access for write is interpreted as allowing
		 * creation of files in the directory.
		 */
		error = VOP_ACCESS(vdp, VWRITE, cnp->cn_cred, cnp->cn_thread);
		if (error)
			return (error);
		/*
		 * Return an indication of where the new directory
		 * entry should be put.
		 */
		dp->de_fndoffset = slotoffset;
		dp->de_fndcnt = wincnt - 1;

		/*
		 * We return with the directory locked, so that
		 * the parameters we set up above will still be
		 * valid if we actually decide to do a direnter().
		 * We return ni_vp == NULL to indicate that the entry
		 * does not currently exist; we leave a pointer to
		 * the (locked) directory inode in ndp->ni_dvp.
		 * The pathname buffer is saved so that the name
		 * can be obtained later.
		 *
		 * NB - if the directory is unlocked, then this
		 * information cannot be used.
		 */
		cnp->cn_flags |= SAVENAME;
		return (EJUSTRETURN);
	}
#if 0
	/*
	 * Insert name into cache (as non-existent) if appropriate.
	 *
	 * XXX Negative caching is broken for msdosfs because the name
	 * cache doesn't understand peculiarities such as case insensitivity
	 * and 8.3 filenames.  Hence, it may not invalidate all negative
	 * entries if a file with this name is later created.
	 */
	if ((cnp->cn_flags & MAKEENTRY) && nameiop != CREATE)
		cache_enter(vdp, *vpp, cnp);
#endif
	return (ENOENT);

found:
	/*
	 * NOTE:  We still have the buffer with matched directory entry at
	 * this point.
	 */
	isadir = dep->deAttributes & ATTR_DIRECTORY;
	scn = getushort(dep->deStartCluster);
	if (FAT32(pmp)) {
		scn |= getushort(dep->deHighClust) << 16;
		if (scn == pmp->pm_rootdirblk) {
			/*
			 * There should actually be 0 here.
			 * Just ignore the error.
			 */
			scn = MSDOSFSROOT;
		}
	}

	if (isadir) {
		cluster = scn;
		if (cluster == MSDOSFSROOT)
			blkoff = MSDOSFSROOT_OFS;
		else
			blkoff = 0;
	} else if (cluster == MSDOSFSROOT)
		blkoff = diroff;

	/*
	 * Now release buf to allow deget to read the entry again.
	 * Reserving it here and giving it to deget could result
	 * in a deadlock.
	 */
	brelse(bp);
	bp = 0;
	
foundroot:
	/*
	 * If we entered at foundroot, then we are looking for the . or ..
	 * entry of the filesystems root directory.  isadir and scn were
	 * setup before jumping here.  And, bp is already null.
	 */
	if (FAT32(pmp) && scn == MSDOSFSROOT)
		scn = pmp->pm_rootdirblk;

	if (dd_inum != NULL) {
		*dd_inum = (uint64_t)pmp->pm_bpcluster * scn + blkoff;
		return (0);
	}

	/*
	 * If deleting, and at end of pathname, return
	 * parameters which can be used to remove file.
	 */
	if (nameiop == DELETE && (flags & ISLASTCN)) {
		/*
		 * Don't allow deleting the root.
		 */
		if (blkoff == MSDOSFSROOT_OFS)
			return (EBUSY);

		/*
		 * Write access to directory required to delete files.
		 */
		error = VOP_ACCESS(vdp, VWRITE, cnp->cn_cred, cnp->cn_thread);
		if (error)
			return (error);

		/*
		 * Return pointer to current entry in dp->i_offset.
		 * Save directory inode pointer in ndp->ni_dvp for dirremove().
		 */
		if (dp->de_StartCluster == scn && isadir) {	/* "." */
			VREF(vdp);
			*vpp = vdp;
			return (0);
		}
		error = deget(pmp, cluster, blkoff, &tdp);
		if (error)
			return (error);
		*vpp = DETOV(tdp);
		return (0);
	}

	/*
	 * If rewriting (RENAME), return the inode and the
	 * information required to rewrite the present directory
	 * Must get inode of directory entry to verify it's a
	 * regular file, or empty directory.
	 */
	if (nameiop == RENAME && (flags & ISLASTCN)) {
		if (blkoff == MSDOSFSROOT_OFS)
			return (EBUSY);

		error = VOP_ACCESS(vdp, VWRITE, cnp->cn_cred, cnp->cn_thread);
		if (error)
			return (error);

		/*
		 * Careful about locking second inode.
		 * This can only occur if the target is ".".
		 */
		if (dp->de_StartCluster == scn && isadir)
			return (EISDIR);

		if ((error = deget(pmp, cluster, blkoff, &tdp)) != 0)
			return (error);
		*vpp = DETOV(tdp);
		cnp->cn_flags |= SAVENAME;
		return (0);
	}

	/*
	 * Step through the translation in the name.  We do not `vput' the
	 * directory because we may need it again if a symbolic link
	 * is relative to the current directory.  Instead we save it
	 * unlocked as "pdp".  We must get the target inode before unlocking
	 * the directory to insure that the inode will not be removed
	 * before we get it.  We prevent deadlock by always fetching
	 * inodes from the root, moving down the directory tree. Thus
	 * when following backward pointers ".." we must unlock the
	 * parent directory before getting the requested directory.
	 */
	pdp = vdp;
	if (flags & ISDOTDOT) {
		error = msdosfs_deget_dotdot(pdp, cluster, blkoff, vpp);
		if (error) {
			*vpp = NULL;
			return (error);
		}
		/*
		 * Recheck that ".." still points to the inode we
		 * looked up before pdp lock was dropped.
		 */
		error = msdosfs_lookup_(pdp, NULL, cnp, &inode1);
		if (error) {
			vput(*vpp);
			*vpp = NULL;
			return (error);
		}
		if (VTODE(*vpp)->de_inode != inode1) {
			vput(*vpp);
			goto restart;
		}
	} else if (dp->de_StartCluster == scn && isadir) {
		VREF(vdp);	/* we want ourself, ie "." */
		*vpp = vdp;
	} else {
		if ((error = deget(pmp, cluster, blkoff, &tdp)) != 0)
			return (error);
		*vpp = DETOV(tdp);
	}

	/*
	 * Insert name into cache if appropriate.
	 */
	if (cnp->cn_flags & MAKEENTRY)
		cache_enter(vdp, *vpp, cnp);
	return (0);
}
Exemplo n.º 27
0
/*
 * internal version with extra arguments to allow accessing resource fork
 */
int
hfs_vget_internal(struct mount *mp, ino_t ino, uint8_t fork,
    struct vnode **vpp)
{
	struct hfsmount *hmp;
	struct hfsnode *hnode;
	struct vnode *vp;
	hfs_callback_args cbargs;
	hfs_cnid_t cnid;
	hfs_catalog_keyed_record_t rec;
	hfs_catalog_key_t key; /* the search key used to find this file on disk */
	dev_t dev;
	int error;

#ifdef HFS_DEBUG	
	printf("vfsop = hfs_vget()\n");
#endif /* HFS_DEBUG */

	hnode = NULL;
	vp = NULL;
	hmp = VFSTOHFS(mp);
	dev = hmp->hm_dev;
	cnid = (hfs_cnid_t)ino;

	if (fork != HFS_RSRCFORK)
	    fork = HFS_DATAFORK;

 retry:
	/* Check if this vnode has already been allocated. If so, just return it. */
	if ((*vpp = hfs_nhashget(dev, cnid, fork, LK_EXCLUSIVE)) != NULL)
		return 0;

	/* Allocate a new vnode/inode. */
	if ((error = getnewvnode(VT_HFS, mp, hfs_vnodeop_p, &vp)) != 0)
		goto error;
	MALLOC(hnode, struct hfsnode *, sizeof(struct hfsnode), M_TEMP,
		M_WAITOK + M_ZERO);

	/*
	 * If someone beat us to it while sleeping in getnewvnode(),
	 * push back the freshly allocated vnode we don't need, and return.
	 */
	mutex_enter(&hfs_hashlock);
	if (hfs_nhashget(dev, cnid, fork, 0) != NULL) {
		mutex_exit(&hfs_hashlock);
		ungetnewvnode(vp);
		FREE(hnode, M_TEMP);
		goto retry;
	}

	vp->v_vflag |= VV_LOCKSWORK;	
	vp->v_data = hnode;
	genfs_node_init(vp, &hfs_genfsops);
	
	hnode->h_vnode = vp;
	hnode->h_hmp = hmp;
	hnode->dummy = 0x1337BABE;
	
	/*
	 * We need to put this vnode into the hash chain and lock it so that other
	 * requests for this inode will block if they arrive while we are sleeping
	 * waiting for old data structures to be purged or for the contents of the
	 * disk portion of this inode to be read. The hash chain requires the node's
	 * device and cnid to be known. Since this information was passed in the
	 * arguments, fill in the appropriate hfsnode fields without reading having
	 * to read the disk.
	 */
	hnode->h_dev = dev;
	hnode->h_rec.u.cnid = cnid;
	hnode->h_fork = fork;

	hfs_nhashinsert(hnode);
	mutex_exit(&hfs_hashlock);


	/*
	 * Read catalog record from disk.
	 */
	hfslib_init_cbargs(&cbargs);
	
	if (hfslib_find_catalog_record_with_cnid(&hmp->hm_vol, cnid,
		&rec, &key, &cbargs) != 0) {
		vput(vp);
		error = EBADF;
		goto error;
	}
		
	memcpy(&hnode->h_rec, &rec, sizeof(hnode->h_rec));
	hnode->h_parent = key.parent_cnid;

	/* XXX Eventually need to add an "ignore permissions" mount option */

	/*
	 * Now convert some of the catalog record's fields into values that make
	 * sense on this system.
	 */
	/* DATE AND TIME */

	/*
	 * Initialize the vnode from the hfsnode, check for aliases.
	 * Note that the underlying vnode may change.
	 */
	hfs_vinit(mp, hfs_specop_p, hfs_fifoop_p, &vp);

	hnode->h_devvp = hmp->hm_devvp;	
	VREF(hnode->h_devvp);  /* Increment the ref count to the volume's device. */

	/* Make sure UVM has allocated enough memory. (?) */
	if (hnode->h_rec.u.rec_type == HFS_REC_FILE) {
		if (hnode->h_fork == HFS_DATAFORK)
			uvm_vnp_setsize(vp,
			    hnode->h_rec.file.data_fork.logical_size);
		else
			uvm_vnp_setsize(vp,
			    hnode->h_rec.file.rsrc_fork.logical_size);
	}
	else
		uvm_vnp_setsize(vp, 0); /* no directly reading directories */
		
	*vpp = vp;
	
	return 0;

error:
	*vpp = NULL;
	return error;
}
Exemplo n.º 28
0
/*
 *	Routine:	macx_swapon
 *	Function:
 *		Syscall interface to add a file to backing store
 */
int
macx_swapon(
    char 	*filename,
    int	flags,
    long	size,
    long	priority)
{
    struct vnode		*vp = 0;
    struct nameidata 	nd, *ndp;
    struct proc		*p =  current_proc();
    pager_file_t		pf;
    register int		error;
    kern_return_t		kr;
    mach_port_t		backing_store;
    memory_object_default_t	default_pager;
    int			i;
    boolean_t		funnel_state;

    struct vattr	vattr;

    AUDIT_MACH_SYSCALL_ENTER(AUE_SWAPON);
    AUDIT_ARG(value, priority);

    funnel_state = thread_funnel_set(kernel_flock, TRUE);
    ndp = &nd;

    if ((error = suser(p->p_ucred, &p->p_acflag)))
        goto swapon_bailout;

    if(default_pager_init_flag == 0) {
        start_def_pager(NULL);
        default_pager_init_flag = 1;
    }

    /*
     * Get a vnode for the paging area.
     */
    NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, UIO_USERSPACE,
           filename, p);

    if ((error = namei(ndp)))
        goto swapon_bailout;
    vp = ndp->ni_vp;

    if (vp->v_type != VREG) {
        error = EINVAL;
        VOP_UNLOCK(vp, 0, p);
        goto swapon_bailout;
    }
    UBCINFOCHECK("macx_swapon", vp);

    if (error = VOP_GETATTR(vp, &vattr, p->p_ucred, p)) {
        VOP_UNLOCK(vp, 0, p);
        goto swapon_bailout;
    }

    if (vattr.va_size < (u_quad_t)size) {
        vattr_null(&vattr);
        vattr.va_size = (u_quad_t)size;
        error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
        if (error) {
            VOP_UNLOCK(vp, 0, p);
            goto swapon_bailout;
        }
    }

    /* add new backing store to list */
    i = 0;
    while(bs_port_table[i].vp != 0) {
        if(i == MAX_BACKING_STORE)
            break;
        i++;
    }
    if(i == MAX_BACKING_STORE) {
        error = ENOMEM;
        VOP_UNLOCK(vp, 0, p);
        goto swapon_bailout;
    }

    /* remember the vnode. This vnode has namei() reference */
    bs_port_table[i].vp = vp;

    /*
     * Look to see if we are already paging to this file.
     */
    /* make certain the copy send of kernel call will work */
    default_pager = MEMORY_OBJECT_DEFAULT_NULL;
    kr = host_default_memory_manager(host_priv_self(), &default_pager, 0);
    if(kr != KERN_SUCCESS) {
        error = EAGAIN;
        VOP_UNLOCK(vp, 0, p);
        bs_port_table[i].vp = 0;
        goto swapon_bailout;
    }

    kr = default_pager_backing_store_create(default_pager,
                                            -1, /* default priority */
                                            0, /* default cluster size */
                                            &backing_store);
    memory_object_default_deallocate(default_pager);

    if(kr != KERN_SUCCESS) {
        error = ENOMEM;
        VOP_UNLOCK(vp, 0, p);
        bs_port_table[i].vp = 0;
        goto swapon_bailout;
    }

    /*
     * NOTE: we are able to supply PAGE_SIZE here instead of
     *	an actual record size or block number because:
     *	a: we do not support offsets from the beginning of the
     *		file (allowing for non page size/record modulo offsets.
     *	b: because allow paging will be done modulo page size
     */

    VOP_UNLOCK(vp, 0, p);
    kr = default_pager_add_file(backing_store, vp, PAGE_SIZE,
                                ((int)vattr.va_size)/PAGE_SIZE);
    if(kr != KERN_SUCCESS) {
        bs_port_table[i].vp = 0;
        if(kr == KERN_INVALID_ARGUMENT)
            error = EINVAL;
        else
            error = ENOMEM;
        goto swapon_bailout;
    }
    bs_port_table[i].bs = (void *)backing_store;
    error = 0;
    if (!ubc_hold(vp))
        panic("macx_swapon: hold");

    /* Mark this vnode as being used for swapfile */
    SET(vp->v_flag, VSWAP);

    ubc_setcred(vp, p);

    /*
     * take an extra reference on the vnode to keep
     * vnreclaim() away from this vnode.
     */
    VREF(vp);

    /* Hold on to the namei  reference to the paging file vnode */
    vp = 0;

swapon_bailout:
    if (vp) {
        vrele(vp);
    }
    (void) thread_funnel_set(kernel_flock, FALSE);
    AUDIT_MACH_SYSCALL_EXIT(error);
    return(error);
}
Exemplo n.º 29
0
void
doit_in_col_major (const char * description,
		   const int N, const int NRHS,
		   double A[N][N], double X[NRHS][N], double B[NRHS][N],
		   double expected_X[NRHS][N])
{
  lapack_int	Anrows	= N;
  lapack_int	Ancols	= N;
  lapack_int	ldA	= Anrows;	/* leading dimension of A */
  lapack_int	ldB	= N;		/* leading dimension of B */

  /* Result of computation: permuted matrix A decomposed in LU. */
  double	packedLU[Ancols][Anrows];


  /* Result of computation: tuple  of partial pivot indexes representing
     the permutation matrix. */
  lapack_int	ipiv_dim = MIN(Anrows, Ancols);
  lapack_int	ipiv[ipiv_dim];

  /* Result of computation: error code, zero if success. */
  lapack_int	info;

  /* Data  needed  to  reconstruct   A  from  the  results:  permutation
     vector. */
  int		perms[Anrows];

  /* Data needed to reconstruct A  from the results: permutation matrix,
     such that A = PLU. */
  int		Pnrows = Anrows;
  int		Pncols = Anrows;
  int		P[Pncols][Pnrows];

  /* Lower-triangular factor L. */
  lapack_int	Lnrows		= Anrows;
  lapack_int	Lncols		= MIN(Anrows,Ancols);
  lapack_int	ldL		= Lncols;
  double	L[Lncols][Lnrows];

  /* Upper-triangular factor U. */
  lapack_int	Unrows		= MIN(Anrows,Ancols);
  lapack_int	Uncols		= Ancols;
  lapack_int	ldU		= Uncols;
  double	U[Uncols][Unrows];

  /* Data needed  to reconstruct A  from the  results: product A1  = LU,
     such that A = P A1. */
  double	A1[Ancols][Anrows];

  /* Data needed to reconstruct A from the results:
   *
   *     reconstructed_A_ipiv = P A1 = PLU
   *
   * reconstructed by applying IPIV to A1 backwards.
   */
  double	reconstructed_A_ipiv[Ancols][Anrows];

  /* Data needed to reconstruct A from the results:
   *
   *     reconstructed_A_P = P A1 = PLU
   *
   * reconstructed by left-multiplying A1 by the permutations matrix P.
   */
  double	reconstructed_A_P[Ancols][Anrows];

  /* Load the original  coefficients matrix from A to  packedLU.  The LU
     factorisation  result  of  dgesv()  will  be  stored  in  packedLU,
     overwriting it. */
  memcpy(packedLU, A, sizeof(double) * Anrows * Ancols);

  /* Load  the right-hand  side from  B to  X.  The  unknowns result  of
     dgesv() will be stored in X, overwriting it. */
  memcpy(X, B, sizeof(double) * N * NRHS);

  /* Do it. */
  info	= LAPACKE_dgesv(LAPACK_COL_MAJOR, N, NRHS,
			MREF(packedLU), ldA, VREF(ipiv), MREF(X), ldB);

  /* If something went wrong in the function call INFO is non-zero: exit
     with failure. */
  if (0 != info) {
    printf("Error computing solution with row-major operands: INFO=%d.\n", info);
    exit(EXIT_FAILURE);
  }

  /* Reconstructing A from the results. */
  {
    col_major_PLU_permutation_matrix_from_ipiv (Anrows, Ancols, ipiv, perms, P);
    real_col_major_split_LU(Anrows, Ancols, MIN(Anrows, Ancols), packedLU, L, U);

    /* Multiply L and U to verify that  the result is indeed PA; we need
     * CBLAS for this.  In general DGEMM does:
     *
     *   \alpha A B + \beta C
     *
     * where  A, B  and C  are matrices.   We need  to inspect  both the
     * header  file "cblas.h"  and  the source  file  "dgemm.f" for  the
     * documentation of the parameters; the prototype of "cblas_dgemm()"
     * is:
     *
     *    void cblas_dgemm(const enum CBLAS_ORDER Order,
     *                     const enum CBLAS_TRANSPOSE TransA,
     *                     const enum CBLAS_TRANSPOSE TransB,
     *                     const int M, const int N, const int K,
     *                     const double alpha,
     *                     const double *A, const int lda,
     *                     const double *B, const int ldb,
     *                     const double beta,
     *                     double *C, const int ldc);
     *
     * In our  case all the matrices  are in col-major order  and we the
     * representations in the  arrays A and B are not  transposed, so: M
     * is the number of rows of A and C; N is the number of columns of B
     * and of columns of C; K is the  number of columns of A and rows of
     * B.  In other words:
     *
     *    A has dimensions M x K
     *    B has dimensions K x N
     *    C has dimensions M x N
     *
     * obviously the product AB has dimensions M x N.
     *
     * Here we want to do:
     *
     *   A1 = 1.0 L U + 0 A1
     *
     * where A1 is  a matrix whose contents at input  are not important,
     * and whose contents at output are the result of the operation.
     */
    {
      double	alpha = 1.0;
      double	beta  = 0.0;
      cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans,
		  Anrows, Ancols, Lncols,
		  alpha, MREF(L), ldL, MREF(U), ldU, beta, MREF(A1), ldA);
      real_col_major_apply_ipiv (Anrows, Ancols, ipiv, BACKWARD_IPIV_APPLICATION,
				 reconstructed_A_ipiv, A1);
      real_col_major_apply_permutation_matrix (Anrows, Ancols, reconstructed_A_P, P, A1);
    }
  }

  printf("Column-major dgesv results, %s:\n", description);

  /* Result verification. */
  {
    compare_real_col_major_result_and_expected_result("computed unknowns",
						      N, NRHS, X, expected_X);
    compare_real_col_major_result_and_expected_result("reconstructed A with IPIV application",
						      Anrows, Ancols, reconstructed_A_ipiv, A);
    compare_real_col_major_result_and_expected_result("reconstructed A with P application",
						      Anrows, Ancols, reconstructed_A_P, A);
  }

  /* Results logging. */
  {
    print_real_col_major_matrix("X, resulting unknowns", N, NRHS, X);
    print_real_col_major_matrix("A, original coefficient matrix", Anrows, Ancols, A);
    print_col_major_PLU_partial_pivoting_vectors_and_matrix (Anrows, Ancols, ipiv, perms, P);
    print_real_col_major_matrix("packedLU representing L and U packed in single matrix",
				Anrows, Ancols, packedLU);
    print_real_col_major_matrix("L, elements of packedLU", Lnrows, Lncols, L);
    print_real_col_major_matrix("U, elements of packedLU", Unrows, Uncols, U);
    print_real_col_major_matrix("A1 = LU, it must be such that A = PR", Anrows, Ancols, A1);
    print_real_col_major_matrix("reconstructed_A_ipiv = PA1 = PLU, it must be such that A = reconstructed_A",
				Anrows, Ancols, reconstructed_A_ipiv);
    print_real_col_major_matrix("reconstructed_A_P = PA1 = PLU, it must be such that A = reconstructed_A",
				Anrows, Ancols, reconstructed_A_P);
  }
}
Exemplo n.º 30
0
/*
 * lookup.  this is incredibly complicated in the
 * general case, however for most pseudo-filesystems
 * very little needs to be done.
 *
 * unless you want to get a migraine, just make sure your
 * filesystem doesn't do any locking of its own.  otherwise
 * read and inwardly digest ufs_lookup().
 */
int
procfs_lookup(void *v)
{
	struct vop_lookup_args *ap = v;
	struct componentname *cnp = ap->a_cnp;
	struct vnode **vpp = ap->a_vpp;
	struct vnode *dvp = ap->a_dvp;
	char *pname = cnp->cn_nameptr;
	struct proc *curp = curproc;
	const struct proc_target *pt;
	struct vnode *fvp;
	pid_t pid;
	struct pfsnode *pfs;
	struct proc *p = NULL;
	int i, error, wantpunlock, iscurproc = 0, isself = 0;

	*vpp = NULL;
	cnp->cn_flags &= ~PDIRUNLOCK;

	if (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)
		return (EROFS);

	if (cnp->cn_namelen == 1 && *pname == '.') {
		*vpp = dvp;
		VREF(dvp);
		return (0);
	}

	wantpunlock = (~cnp->cn_flags & (LOCKPARENT | ISLASTCN));
	pfs = VTOPFS(dvp);
	switch (pfs->pfs_type) {
	case Proot:
		if (cnp->cn_flags & ISDOTDOT)
			return (EIO);

		iscurproc = CNEQ(cnp, "curproc", 7);
		isself = CNEQ(cnp, "self", 4);

		if (iscurproc || isself) {
			error = procfs_allocvp(dvp->v_mount, vpp, 0,
			    iscurproc ? Pcurproc : Pself);
			if ((error == 0) && (wantpunlock)) {
				VOP_UNLOCK(dvp, 0, curp);
				cnp->cn_flags |= PDIRUNLOCK;
			}
			return (error);
		}

		for (i = 0; i < nproc_root_targets; i++) {
			pt = &proc_root_targets[i];
			if (cnp->cn_namelen == pt->pt_namlen &&
			    memcmp(pt->pt_name, pname, cnp->cn_namelen) == 0 &&
			    (pt->pt_valid == NULL ||
			     (*pt->pt_valid)(p, dvp->v_mount)))
				break;
		}

		if (i != nproc_root_targets) {
			error = procfs_allocvp(dvp->v_mount, vpp, 0,
			    pt->pt_pfstype);
			if ((error == 0) && (wantpunlock)) {
				VOP_UNLOCK(dvp, 0, curp);
				cnp->cn_flags |= PDIRUNLOCK;
			}
			return (error);
		}

		pid = atopid(pname, cnp->cn_namelen);
		if (pid == NO_PID)
			break;

		p = pfind(pid);
		if (p == 0)
			break;

		error = procfs_allocvp(dvp->v_mount, vpp, pid, Pproc);
		if ((error == 0) && wantpunlock) {
			VOP_UNLOCK(dvp, 0, curp);
			cnp->cn_flags |= PDIRUNLOCK;
		}
		return (error);

	case Pproc:
		/*
		 * do the .. dance. We unlock the directory, and then
		 * get the root dir. That will automatically return ..
		 * locked. Then if the caller wanted dvp locked, we
		 * re-lock.
		 */
		if (cnp->cn_flags & ISDOTDOT) {
			VOP_UNLOCK(dvp, 0, p);
			cnp->cn_flags |= PDIRUNLOCK;
			error = procfs_root(dvp->v_mount, vpp);
			if ((error == 0) && (wantpunlock == 0) &&
			    ((error = vn_lock(dvp, LK_EXCLUSIVE, curp)) == 0))
				cnp->cn_flags &= ~PDIRUNLOCK;
			return (error);
		}

		p = pfind(pfs->pfs_pid);
		if (p == 0)
			break;

		for (pt = proc_targets, i = 0; i < nproc_targets; pt++, i++) {
			if (cnp->cn_namelen == pt->pt_namlen &&
			    bcmp(pt->pt_name, pname, cnp->cn_namelen) == 0 &&
			    (pt->pt_valid == NULL ||
			     (*pt->pt_valid)(p, dvp->v_mount)))
				goto found;
		}
		break;

	found:
		if (pt->pt_pfstype == Pfile) {
			fvp = p->p_textvp;
			/* We already checked that it exists. */
			VREF(fvp);
			vn_lock(fvp, LK_EXCLUSIVE | LK_RETRY, curp);
			if (wantpunlock) {
				VOP_UNLOCK(dvp, 0, curp);
				cnp->cn_flags |= PDIRUNLOCK;
			}
			*vpp = fvp;
			return (0);
		}

		error =  procfs_allocvp(dvp->v_mount, vpp, pfs->pfs_pid,
		    pt->pt_pfstype);
		if ((error == 0) && (wantpunlock)) {
			VOP_UNLOCK(dvp, 0, curp);
			cnp->cn_flags |= PDIRUNLOCK;
		}
		return (error);

	default:
		return (ENOTDIR);
	}

	return (cnp->cn_nameiop == LOOKUP ? ENOENT : EROFS);
}