Exemplo n.º 1
0
afs_rename(OSI_VC_DECL(aodp), char *aname1, struct vcache *andp, char *aname2, struct AFS_UCRED *acred)
#endif
{
    register afs_int32 code;
    struct afs_fakestat_state ofakestate;
    struct afs_fakestat_state nfakestate;
    struct vrequest treq;
    OSI_VC_CONVERT(aodp);

    code = afs_InitReq(&treq, acred);
    if (code)
	return code;
    afs_InitFakeStat(&ofakestate);
    afs_InitFakeStat(&nfakestate);

    AFS_DISCON_LOCK();
    
    code = afs_EvalFakeStat(&aodp, &ofakestate, &treq);
    if (code)
	goto done;
    code = afs_EvalFakeStat(&andp, &nfakestate, &treq);
    if (code)
	goto done;
    code = afsrename(aodp, aname1, andp, aname2, acred, &treq);
  done:
    afs_PutFakeStat(&ofakestate);
    afs_PutFakeStat(&nfakestate);

    AFS_DISCON_UNLOCK();
    
    code = afs_CheckCode(code, &treq, 25);
    return code;
}
Exemplo n.º 2
0
/* ptr_parm 0 is the pathname, size_parm 0 to the fetch is the chunk number */
static void
BPath(struct brequest *ab)
{
    struct dcache *tdc = NULL;
    struct vcache *tvc = NULL;
    struct vnode *tvn = NULL;
#ifdef AFS_LINUX22_ENV
    struct dentry *dp = NULL;
#endif
    afs_size_t offset, len;
    struct vrequest treq;
    afs_int32 code;

    AFS_STATCNT(BPath);
    if ((code = afs_InitReq(&treq, ab->cred)))
	return;
    AFS_GUNLOCK();
#ifdef AFS_LINUX22_ENV
    code = gop_lookupname((char *)ab->ptr_parm[0], AFS_UIOSYS, 1, &dp);
    if (dp)
	tvn = (struct vnode *)dp->d_inode;
#else
    code = gop_lookupname((char *)ab->ptr_parm[0], AFS_UIOSYS, 1, &tvn);
#endif
    AFS_GLOCK();
    osi_FreeLargeSpace((char *)ab->ptr_parm[0]);	/* free path name buffer here */
    if (code)
	return;
    /* now path may not have been in afs, so check that before calling our cache manager */
    if (!tvn || !IsAfsVnode(tvn)) {
	/* release it and give up */
	if (tvn) {
#ifdef AFS_LINUX22_ENV
	    dput(dp);
#else
	    AFS_RELE(tvn);
#endif
	}
	return;
    }
    tvc = VTOAFS(tvn);
    /* here we know its an afs vnode, so we can get the data for the chunk */
    tdc = afs_GetDCache(tvc, ab->size_parm[0], &treq, &offset, &len, 1);
    if (tdc) {
	afs_PutDCache(tdc);
    }
#ifdef AFS_LINUX22_ENV
    dput(dp);
#else
    AFS_RELE(tvn);
#endif
}
Exemplo n.º 3
0
static void
BPrefetchNoCache(struct brequest *ab)
{
    struct vrequest treq;
    afs_size_t len;

    if ((len = afs_InitReq(&treq, ab->cred)))
	return;

#ifndef UKERNEL
    /* OS-specific prefetch routine */
    afs_PrefetchNoCache(ab->vc, ab->cred, (struct nocache_read_request *) ab->ptr_parm[0]);
#endif
}
Exemplo n.º 4
0
static void
BStore(struct brequest *ab)
{
    struct vcache *tvc;
    afs_int32 code;
    struct vrequest treq;
#if defined(AFS_SGI_ENV)
    struct cred *tmpcred;
#endif

    AFS_STATCNT(BStore);
    if ((code = afs_InitReq(&treq, ab->cred)))
	return;
    code = 0;
    tvc = ab->vc;
#if defined(AFS_SGI_ENV)
    /*
     * Since StoreOnLastReference can end up calling osi_SyncVM which
     * calls into VM code that assumes that u.u_cred has the
     * correct credentials, we set our to theirs for this xaction
     */
    tmpcred = OSI_GET_CURRENT_CRED();
    OSI_SET_CURRENT_CRED(ab->cred);

    /*
     * To avoid recursion since the WriteLock may be released during VM
     * operations, we hold the VOP_RWLOCK across this transaction as
     * do the other callers of StoreOnLastReference
     */
    AFS_RWLOCK((vnode_t *) tvc, 1);
#endif
    ObtainWriteLock(&tvc->lock, 209);
    code = afs_StoreOnLastReference(tvc, &treq);
    ReleaseWriteLock(&tvc->lock);
#if defined(AFS_SGI_ENV)
    OSI_SET_CURRENT_CRED(tmpcred);
    AFS_RWUNLOCK((vnode_t *) tvc, 1);
#endif
    /* now set final return code, and wakeup anyone waiting */
    if ((ab->flags & BUVALID) == 0) {
	ab->code = afs_CheckCode(code, &treq, 43);	/* set final code, since treq doesn't go across processes */
	ab->flags |= BUVALID;
	if (ab->flags & BUWAIT) {
	    ab->flags &= ~BUWAIT;
	    afs_osi_Wakeup(ab);
	}
    }
}
Exemplo n.º 5
0
/* afs_root - stat the root of the file system. AFS global held on entry. */
static int
afs_root(struct super_block *afsp)
{
    afs_int32 code = 0;
    struct vrequest treq;
    struct vcache *tvp = 0;

    AFS_STATCNT(afs_root);
    if (afs_globalVp && (afs_globalVp->f.states & CStatd)) {
	tvp = afs_globalVp;
    } else {
	cred_t *credp = crref();

	if (afs_globalVp) {
	    afs_PutVCache(afs_globalVp);
	    afs_globalVp = NULL;
	}

	if (!(code = afs_InitReq(&treq, credp)) && !(code = afs_CheckInit())) {
	    tvp = afs_GetVCache(&afs_rootFid, &treq, NULL, NULL);
	    if (tvp) {
		struct inode *ip = AFSTOV(tvp);
		struct vattr vattr;

		afs_getattr(tvp, &vattr, credp);
		afs_fill_inode(ip, &vattr);

		/* setup super_block and mount point inode. */
		afs_globalVp = tvp;
#if defined(HAVE_LINUX_D_MAKE_ROOT)
		afsp->s_root = d_make_root(ip);
#else
		afsp->s_root = d_alloc_root(ip);
#endif
#if !defined(STRUCT_SUPER_BLOCK_HAS_S_D_OP)
		afsp->s_root->d_op = &afs_dentry_operations;
#endif
	    } else
		code = ENOENT;
	}
	crfree(credp);
    }

    afs_Trace2(afs_iclSetp, CM_TRACE_VFSROOT, ICL_TYPE_POINTER, afs_globalVp,
	       ICL_TYPE_INT32, code);
    return code;
}
Exemplo n.º 6
0
int
afs_readlink(OSI_VC_DECL(avc), struct uio *auio, afs_ucred_t *acred)
{
    afs_int32 code;
    struct vrequest treq;
    char *tp;
    struct afs_fakestat_state fakestat;
    OSI_VC_CONVERT(avc);

    AFS_STATCNT(afs_readlink);
    afs_Trace1(afs_iclSetp, CM_TRACE_READLINK, ICL_TYPE_POINTER, avc);
    if ((code = afs_InitReq(&treq, acred)))
	return code;
    afs_InitFakeStat(&fakestat);

    AFS_DISCON_LOCK();
    
    code = afs_EvalFakeStat(&avc, &fakestat, &treq);
    if (code)
	goto done;
    code = afs_VerifyVCache(avc, &treq);
    if (code)
	goto done;
    if (vType(avc) != VLNK) {
	code = EINVAL;
	goto done;
    }
    ObtainWriteLock(&avc->lock, 158);
    code = afs_HandleLink(avc, &treq);
    /* finally uiomove it to user-land */
    if (code == 0) {
	tp = avc->linkData;
	if (tp)
	    AFS_UIOMOVE(tp, strlen(tp), UIO_READ, auio, code);
	else {
	    code = EIO;
	}
    }
    ReleaseWriteLock(&avc->lock);
  done:
    afs_PutFakeStat(&fakestat);
    AFS_DISCON_UNLOCK();
    code = afs_CheckCode(code, &treq, 32);
    return code;
}
Exemplo n.º 7
0
/* size_parm 0 to the fetch is the chunk number,
 * ptr_parm 0 is the dcache entry to wakeup,
 * size_parm 1 is true iff we should release the dcache entry here.
 */
static void
BPrefetch(struct brequest *ab)
{
    struct dcache *tdc;
    struct vcache *tvc;
    afs_size_t offset, len, abyte, totallen = 0;
    struct vrequest treq;

    AFS_STATCNT(BPrefetch);
    if ((len = afs_InitReq(&treq, ab->cred)))
	return;
    abyte = ab->size_parm[0];
    tvc = ab->vc;
    do {
	tdc = afs_GetDCache(tvc, abyte, &treq, &offset, &len, 1);
	if (tdc) {
	    afs_PutDCache(tdc);
	}
	abyte+=len;
	totallen += len;
    } while ((totallen < afs_preCache) && tdc && (len > 0));
    /* now, dude may be waiting for us to clear DFFetchReq bit; do so.  Can't
     * use tdc from GetDCache since afs_GetDCache may fail, but someone may
     * be waiting for our wakeup anyway.
     */
    tdc = (struct dcache *)(ab->ptr_parm[0]);
    ObtainSharedLock(&tdc->lock, 640);
    if (tdc->mflags & DFFetchReq) {
	UpgradeSToWLock(&tdc->lock, 641);
	tdc->mflags &= ~DFFetchReq;
	ReleaseWriteLock(&tdc->lock);
    } else {
	ReleaseSharedLock(&tdc->lock);
    }
    afs_osi_Wakeup(&tdc->validPos);
    if (ab->size_parm[1]) {
	afs_PutDCache(tdc);	/* put this one back, too */
    }
}
Exemplo n.º 8
0
int
afs_root(OSI_VFS_DECL(afsp), struct vnode **avpp)
{
    afs_int32 code = 0;
    struct vrequest treq;
    struct vcache *tvp = 0;
    OSI_VFS_CONVERT(afsp);

    AFS_STATCNT(afs_root);
    if (afs_globalVp && (afs_globalVp->f.states & CStatd)) {
	tvp = afs_globalVp;
    } else {
	if (afs_globalVp) {
	    afs_PutVCache(afs_globalVp);
	    afs_globalVp = NULL;
	}

	if (!(code = afs_InitReq(&treq, get_user_struct()->u_cred))
	    && !(code = afs_CheckInit())) {
	    tvp = afs_GetVCache(&afs_rootFid, &treq, NULL, NULL);
	    /* we really want this to stay around */
	    if (tvp) {
		afs_globalVp = tvp;
	    } else
		code = ENOENT;
	}
    }
    if (tvp) {
	VN_HOLD(AFSTOV(tvp));

	AFSTOV(tvp)->v_flag |= VROOT;	/* No-op on Ultrix 2.2 */
	afs_globalVFS = afsp;
	*avpp = AFSTOV(tvp);
    }

    afs_Trace3(afs_iclSetp, CM_TRACE_GOPEN, ICL_TYPE_POINTER, *avpp,
	       ICL_TYPE_INT32, 0, ICL_TYPE_INT32, code);
    return code;
}
Exemplo n.º 9
0
afs_root(struct mount *mp, struct vnode **vpp)
#endif
{
    int error;
    struct vrequest treq;
    register struct vcache *tvp = 0;
#ifdef AFS_FBSD50_ENV
#ifndef AFS_FBSD53_ENV
    struct thread *td = curthread;
#endif
    struct ucred *cr = td->td_ucred;
#else
    struct proc *p = curproc;
    struct ucred *cr = p->p_cred->pc_ucred;
#endif

    AFS_GLOCK();
    AFS_STATCNT(afs_root);
    crhold(cr);
    if (afs_globalVp && (afs_globalVp->f.states & CStatd)) {
	tvp = afs_globalVp;
	error = 0;
    } else {
tryagain:
#ifndef AFS_FBSD80_ENV
	if (afs_globalVp) {
	    afs_PutVCache(afs_globalVp);
	    /* vrele() needed here or not? */
	    afs_globalVp = NULL;
	}
#endif
	if (!(error = afs_InitReq(&treq, cr)) && !(error = afs_CheckInit())) {
	    tvp = afs_GetVCache(&afs_rootFid, &treq, NULL, NULL);
	    /* we really want this to stay around */
	    if (tvp)
		afs_globalVp = tvp;
	    else
		error = ENOENT;
	}
    }
    if (tvp) {
	struct vnode *vp = AFSTOV(tvp);

#ifdef AFS_FBSD50_ENV
	ASSERT_VI_UNLOCKED(vp, "afs_root");
#endif
	AFS_GUNLOCK();
	/*
	 * I'm uncomfortable about this.  Shouldn't this happen at a
	 * higher level, and shouldn't we busy the top-level directory
	 * to prevent recycling?
	 */
#ifdef AFS_FBSD50_ENV
	error = vget(vp, LK_EXCLUSIVE | LK_RETRY, td);
	vp->v_vflag |= VV_ROOT;
#else
	error = vget(vp, LK_EXCLUSIVE | LK_RETRY, p);
	vp->v_flag |= VROOT;
#endif
	AFS_GLOCK();
	if (error != 0)
		goto tryagain;

	afs_globalVFS = mp;
	*vpp = vp;
    }

    afs_Trace2(afs_iclSetp, CM_TRACE_VFSROOT, ICL_TYPE_POINTER, tvp ? AFSTOV(tvp) : NULL,
	       ICL_TYPE_INT32, error);
    AFS_GUNLOCK();
    crfree(cr);
    return error;
}
Exemplo n.º 10
0
/* Note that we don't set CDirty here, this is OK because the unlink
 * RPC is called synchronously */
int
afs_remove(OSI_VC_DECL(adp), char *aname, afs_ucred_t *acred)
{
    struct vrequest treq;
    register struct dcache *tdc;
    struct VenusFid unlinkFid;
    register afs_int32 code;
    register struct vcache *tvc;
    afs_size_t offset, len;
    struct afs_fakestat_state fakestate;
    OSI_VC_CONVERT(adp);

    AFS_STATCNT(afs_remove);
    afs_Trace2(afs_iclSetp, CM_TRACE_REMOVE, ICL_TYPE_POINTER, adp,
	       ICL_TYPE_STRING, aname);


    if ((code = afs_InitReq(&treq, acred))) {
	return code;
    }

    afs_InitFakeStat(&fakestate);
    AFS_DISCON_LOCK();
    code = afs_EvalFakeStat(&adp, &fakestate, &treq);
    if (code)
	goto done;

    /* Check if this is dynroot */
    if (afs_IsDynroot(adp)) {
	code = afs_DynrootVOPRemove(adp, acred, aname);
	goto done;
    }
    if (afs_IsDynrootMount(adp)) {
	code = ENOENT;
	goto done;
    }

    if (strlen(aname) > AFSNAMEMAX) {
	code = ENAMETOOLONG;
	goto done;
    }
  tagain:
    code = afs_VerifyVCache(adp, &treq);
    tvc = NULL;
    if (code) {
	code = afs_CheckCode(code, &treq, 23);
	goto done;
    }

    /** If the volume is read-only, return error without making an RPC to the
      * fileserver
      */
    if (adp->f.states & CRO) {
	code = EROFS;
	goto done;
    }

    /* If we're running disconnected without logging, go no further... */
    if (AFS_IS_DISCONNECTED && !AFS_IS_DISCON_RW) {
        code = ENETDOWN;
	goto done;
    }
    
    tdc = afs_GetDCache(adp, (afs_size_t) 0, &treq, &offset, &len, 1);	/* test for error below */
    ObtainWriteLock(&adp->lock, 142);
    if (tdc)
	ObtainSharedLock(&tdc->lock, 638);

    /*
     * Make sure that the data in the cache is current. We may have
     * received a callback while we were waiting for the write lock.
     */
    if (!(adp->f.states & CStatd)
	|| (tdc && !hsame(adp->f.m.DataVersion, tdc->f.versionNo))) {
	ReleaseWriteLock(&adp->lock);
	if (tdc) {
	    ReleaseSharedLock(&tdc->lock);
	    afs_PutDCache(tdc);
	}
	goto tagain;
    }

    unlinkFid.Fid.Vnode = 0;
    if (!tvc) {
	tvc = osi_dnlc_lookup(adp, aname, WRITE_LOCK);
    }
    /* This should not be necessary since afs_lookup() has already
     * done the work.
     */
    if (!tvc)
	if (tdc) {
	    code = afs_dir_Lookup(tdc, aname, &unlinkFid.Fid);
	    if (code == 0) {
		afs_int32 cached = 0;

		unlinkFid.Cell = adp->f.fid.Cell;
		unlinkFid.Fid.Volume = adp->f.fid.Fid.Volume;
		if (unlinkFid.Fid.Unique == 0) {
		    tvc =
			afs_LookupVCache(&unlinkFid, &treq, &cached, adp,
					 aname);
		} else {
		    ObtainReadLock(&afs_xvcache);
		    tvc = afs_FindVCache(&unlinkFid, 0, DO_STATS);
		    ReleaseReadLock(&afs_xvcache);
		}
	    }
	}

    if (AFS_IS_DISCON_RW) {
	if (!adp->f.shadow.vnode && !(adp->f.ddirty_flags & VDisconCreate)) {
    	    /* Make shadow copy of parent dir. */
	    afs_MakeShadowDir(adp, tdc);
	}

	/* Can't hold a dcache lock whilst we're getting a vcache one */
	if (tdc)
	    ReleaseSharedLock(&tdc->lock);

        /* XXX - We're holding adp->lock still, and we've got no 
	 * guarantee about whether the ordering matches the lock hierarchy */
	ObtainWriteLock(&tvc->lock, 713);

	/* If we were locally created, then we don't need to do very
	 * much beyond ensuring that we don't exist anymore */	
    	if (tvc->f.ddirty_flags & VDisconCreate) {
	    afs_DisconRemoveDirty(tvc);
	} else {
	    /* Add removed file vcache to dirty list. */
	    afs_DisconAddDirty(tvc, VDisconRemove, 1);
        }
	adp->f.m.LinkCount--;
	ReleaseWriteLock(&tvc->lock);
	if (tdc)
	    ObtainSharedLock(&tdc->lock, 714);
     }

    if (tvc && osi_Active(tvc)) {
	/* about to delete whole file, prefetch it first */
	ReleaseWriteLock(&adp->lock);
	if (tdc)
	    ReleaseSharedLock(&tdc->lock);
	ObtainWriteLock(&tvc->lock, 143);
	FetchWholeEnchilada(tvc, &treq);
	ReleaseWriteLock(&tvc->lock);
	ObtainWriteLock(&adp->lock, 144);
	/* Technically I don't think we need this back, but let's hold it 
	   anyway; The "got" reference should actually be sufficient. */
	if (tdc) 
	    ObtainSharedLock(&tdc->lock, 640);
    }

    osi_dnlc_remove(adp, aname, tvc);

    Tadp1 = adp;
#ifndef AFS_DARWIN80_ENV
    Tadpr = VREFCOUNT(adp);
#endif
    Ttvc = tvc;
    Tnam = aname;
    Tnam1 = 0;
#ifndef AFS_DARWIN80_ENV
    if (tvc)
	Ttvcr = VREFCOUNT(tvc);
#endif
#ifdef	AFS_AIX_ENV
    if (tvc && VREFCOUNT_GT(tvc, 2) && tvc->opens > 0
	&& !(tvc->f.states & CUnlinked)) {
#else
    if (tvc && VREFCOUNT_GT(tvc, 1) && tvc->opens > 0
	&& !(tvc->f.states & CUnlinked)) {
#endif
	char *unlname = afs_newname();

	ReleaseWriteLock(&adp->lock);
	if (tdc)
	    ReleaseSharedLock(&tdc->lock);
	code = afsrename(adp, aname, adp, unlname, acred, &treq);
	Tnam1 = unlname;
	if (!code) {
	    struct VenusFid *oldmvid = NULL;
	    if (tvc->mvid) 
		oldmvid = tvc->mvid;
	    tvc->mvid = (struct VenusFid *)unlname;
	    if (oldmvid)
		osi_FreeSmallSpace(oldmvid);
	    crhold(acred);
	    if (tvc->uncred) {
		crfree(tvc->uncred);
	    }
	    tvc->uncred = acred;
	    tvc->f.states |= CUnlinked;
	    /* if rename succeeded, remove should not */
	    ObtainWriteLock(&tvc->lock, 715);
	    if (tvc->f.ddirty_flags & VDisconRemove) {
		tvc->f.ddirty_flags &= ~VDisconRemove;
	    }
	    ReleaseWriteLock(&tvc->lock);
	} else {
	    osi_FreeSmallSpace(unlname);
	}
	if (tdc)
	    afs_PutDCache(tdc);
	afs_PutVCache(tvc);
    } else {
	code = afsremove(adp, tdc, tvc, aname, acred, &treq);
    }
    done:
    afs_PutFakeStat(&fakestate);
#ifndef AFS_DARWIN80_ENV
    /* we can't track by thread, it's not exported in the KPI; only do
       this on !macos */
    osi_Assert(!WriteLocked(&adp->lock) || (adp->lock.pid_writer != MyPidxx));
#endif
    AFS_DISCON_UNLOCK();
    return code;
}


/* afs_remunlink -- This tries to delete the file at the server after it has
 *     been renamed when unlinked locally but now has been finally released.
 *
 * CAUTION -- may be called with avc unheld. */

int
afs_remunlink(register struct vcache *avc, register int doit)
{
    afs_ucred_t *cred;
    char *unlname;
    struct vcache *adp;
    struct vrequest treq;
    struct VenusFid dirFid;
    register struct dcache *tdc;
    afs_int32 code = 0;

    if (NBObtainWriteLock(&avc->lock, 423))
	return 0;
#if defined(AFS_DARWIN80_ENV)
    if (vnode_get(AFSTOV(avc))) {
	ReleaseWriteLock(&avc->lock);
	return 0;
    }
#endif

    if (avc->mvid && (doit || (avc->f.states & CUnlinkedDel))) {
	if ((code = afs_InitReq(&treq, avc->uncred))) {
	    ReleaseWriteLock(&avc->lock);
	} else {
	    /* Must bump the refCount because GetVCache may block.
	     * Also clear mvid so no other thread comes here if we block.
	     */
	    unlname = (char *)avc->mvid;
	    avc->mvid = NULL;
	    cred = avc->uncred;
	    avc->uncred = NULL;

#if defined(AFS_DARWIN_ENV) && !defined(AFS_DARWIN80_ENV)
	    VREF(AFSTOV(avc));
#else
	    AFS_FAST_HOLD(avc);
#endif

	    /* We'll only try this once. If it fails, just release the vnode.
	     * Clear after doing hold so that NewVCache doesn't find us yet.
	     */
	    avc->f.states &= ~(CUnlinked | CUnlinkedDel);

	    ReleaseWriteLock(&avc->lock);

	    dirFid.Cell = avc->f.fid.Cell;
	    dirFid.Fid.Volume = avc->f.fid.Fid.Volume;
	    dirFid.Fid.Vnode = avc->f.parent.vnode;
	    dirFid.Fid.Unique = avc->f.parent.unique;
	    adp = afs_GetVCache(&dirFid, &treq, NULL, NULL);

	    if (adp) {
		tdc = afs_FindDCache(adp, (afs_size_t) 0);
		ObtainWriteLock(&adp->lock, 159);
		if (tdc)
		    ObtainSharedLock(&tdc->lock, 639);

		/* afsremove releases the adp & tdc locks, and does vn_rele(avc) */
		code = afsremove(adp, tdc, avc, unlname, cred, &treq);
		afs_PutVCache(adp);
	    } else {
		/* we failed - and won't be back to try again. */
		afs_PutVCache(avc);
	    }
	    osi_FreeSmallSpace(unlname);
	    crfree(cred);
	}
    } else {
#if defined(AFS_DARWIN80_ENV)
	vnode_put(AFSTOV(avc));
#endif
	ReleaseWriteLock(&avc->lock);
    }

    return code;
}
Exemplo n.º 11
0
static int
VLDB_Same(struct VenusFid *afid, struct vrequest *areq)
{
    struct vrequest treq;
    struct afs_conn *tconn;
    int i, type = 0;
    union {
	struct vldbentry tve;
	struct nvldbentry ntve;
	struct uvldbentry utve;
    } *v;
    struct volume *tvp;
    struct cell *tcell;
    char *bp, tbuf[CVBS];	/* biggest volume id is 2^32, ~ 4*10^9 */
    unsigned int changed;
    struct server *(oldhosts[NMAXNSERVERS]);

    AFS_STATCNT(CheckVLDB);
    afs_FinalizeReq(areq);

    if ((i = afs_InitReq(&treq, afs_osi_credp)))
	return DUNNO;
    v = afs_osi_Alloc(sizeof(*v));
    tcell = afs_GetCell(afid->Cell, READ_LOCK);
    bp = afs_cv2string(&tbuf[CVBS], afid->Fid.Volume);
    do {
	VSleep(2);		/* Better safe than sorry. */
	tconn =
	    afs_ConnByMHosts(tcell->cellHosts, tcell->vlport, tcell->cellNum,
			     &treq, SHARED_LOCK);
	if (tconn) {
	    if (tconn->srvr->server->flags & SNO_LHOSTS) {
		type = 0;
		RX_AFS_GUNLOCK();
		i = VL_GetEntryByNameO(tconn->id, bp, &v->tve);
		RX_AFS_GLOCK();
	    } else if (tconn->srvr->server->flags & SYES_LHOSTS) {
		type = 1;
		RX_AFS_GUNLOCK();
		i = VL_GetEntryByNameN(tconn->id, bp, &v->ntve);
		RX_AFS_GLOCK();
	    } else {
		type = 2;
		RX_AFS_GUNLOCK();
		i = VL_GetEntryByNameU(tconn->id, bp, &v->utve);
		RX_AFS_GLOCK();
		if (!(tconn->srvr->server->flags & SVLSRV_UUID)) {
		    if (i == RXGEN_OPCODE) {
			type = 1;
			RX_AFS_GUNLOCK();
			i = VL_GetEntryByNameN(tconn->id, bp, &v->ntve);
			RX_AFS_GLOCK();
			if (i == RXGEN_OPCODE) {
			    type = 0;
			    tconn->srvr->server->flags |= SNO_LHOSTS;
			    RX_AFS_GUNLOCK();
			    i = VL_GetEntryByNameO(tconn->id, bp, &v->tve);
			    RX_AFS_GLOCK();
			} else if (!i)
			    tconn->srvr->server->flags |= SYES_LHOSTS;
		    } else if (!i)
			tconn->srvr->server->flags |= SVLSRV_UUID;
		}
		lastcode = i;
	    }
	} else
	    i = -1;
    } while (afs_Analyze(tconn, i, NULL, &treq, -1,	/* no op code for this */
			 SHARED_LOCK, tcell));

    afs_PutCell(tcell, READ_LOCK);
    afs_Trace2(afs_iclSetp, CM_TRACE_CHECKVLDB, ICL_TYPE_FID, &afid,
	       ICL_TYPE_INT32, i);

    if (i) {
	afs_osi_Free(v, sizeof(*v));
	return DUNNO;
    }
    /* have info, copy into serverHost array */
    changed = 0;
    tvp = afs_FindVolume(afid, WRITE_LOCK);
    if (tvp) {
	ObtainWriteLock(&tvp->lock, 107);
	for (i = 0; i < NMAXNSERVERS && tvp->serverHost[i]; i++) {
	    oldhosts[i] = tvp->serverHost[i];
	}

	if (type == 2) {
	    InstallUVolumeEntry(tvp, &v->utve, afid->Cell, tcell, &treq);
	} else if (type == 1) {
	    InstallNVolumeEntry(tvp, &v->ntve, afid->Cell);
	} else {
	    InstallVolumeEntry(tvp, &v->tve, afid->Cell);
	}

	if (i < NMAXNSERVERS && tvp->serverHost[i]) {
	    changed = 1;
	}
	for (--i; !changed && i >= 0; i--) {
	    if (tvp->serverHost[i] != oldhosts[i]) {
		changed = 1;	/* also happens if prefs change.  big deal. */
	    }
	}

	ReleaseWriteLock(&tvp->lock);
	afs_PutVolume(tvp, WRITE_LOCK);
    } else {			/* can't find volume */
	tvp = afs_GetVolume(afid, &treq, WRITE_LOCK);
	if (tvp) {
	    afs_PutVolume(tvp, WRITE_LOCK);
	    afs_osi_Free(v, sizeof(*v));
	    return DIFFERENT;
	} else {
	    afs_osi_Free(v, sizeof(*v));
	    return DUNNO;
	}
    }

    afs_osi_Free(v, sizeof(*v));
    return (changed ? DIFFERENT : SAME);
}				/*VLDB_Same */
Exemplo n.º 12
0
int
afs_mkdir(OSI_VC_DECL(adp), char *aname, struct vattr *attrs, 
     register struct vcache **avcp, afs_ucred_t *acred)
{
    struct vrequest treq;
    register afs_int32 code;
    register struct afs_conn *tc;
    struct VenusFid newFid;
    register struct dcache *tdc;
#ifdef AFS_DISCON_ENV
    struct dcache *new_dc;
#endif
    afs_size_t offset, len;
    register struct vcache *tvc;
    struct AFSStoreStatus InStatus;
    struct AFSFetchStatus OutFidStatus, OutDirStatus;
    struct AFSCallBack CallBack;
    struct AFSVolSync tsync;
    afs_int32 now;
    struct afs_fakestat_state fakestate;
    XSTATS_DECLS;
    OSI_VC_CONVERT(adp);

    AFS_STATCNT(afs_mkdir);
    afs_Trace2(afs_iclSetp, CM_TRACE_MKDIR, ICL_TYPE_POINTER, adp,
	       ICL_TYPE_STRING, aname);

    if ((code = afs_InitReq(&treq, acred)))
	goto done2;
    afs_InitFakeStat(&fakestate);

    if (strlen(aname) > AFSNAMEMAX) {
	code = ENAMETOOLONG;
	goto done3;
    }

    if (!afs_ENameOK(aname)) {
	code = EINVAL;
	goto done3;
    }
    
    AFS_DISCON_LOCK();

    code = afs_EvalFakeStat(&adp, &fakestate, &treq);
    if (code)
	goto done;
    code = afs_VerifyVCache(adp, &treq);
    if (code)
	goto done;

    /** If the volume is read-only, return error without making an RPC to the
      * fileserver
      */
    if (adp->f.states & CRO) {
	code = EROFS;
	goto done;
    }
   
    if (AFS_IS_DISCONNECTED && !AFS_IS_DISCON_RW) {
	/*printf("Network is down in afs_mkdir\n");*/
	code = ENETDOWN;
	goto done;
    }
    InStatus.Mask = AFS_SETMODTIME | AFS_SETMODE | AFS_SETGROUP;
    InStatus.ClientModTime = osi_Time();
    InStatus.UnixModeBits = attrs->va_mode & 0xffff;	/* only care about protection bits */
    InStatus.Group = (afs_int32) afs_cr_gid(acred);
    tdc = afs_GetDCache(adp, (afs_size_t) 0, &treq, &offset, &len, 1);
    ObtainWriteLock(&adp->lock, 153);

    if (!AFS_IS_DISCON_RW) {
    	do {
	    tc = afs_Conn(&adp->f.fid, &treq, SHARED_LOCK);
	    if (tc) {
	    	XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_MAKEDIR);
	    	now = osi_Time();
	    	RX_AFS_GUNLOCK();
	    	code =
		    RXAFS_MakeDir(tc->id,
		    		(struct AFSFid *)&adp->f.fid.Fid,
				aname,
				&InStatus,
				(struct AFSFid *)&newFid.Fid,
				&OutFidStatus,
				&OutDirStatus,
				&CallBack,
				&tsync);
	    	RX_AFS_GLOCK();
	    	XSTATS_END_TIME;
	    	CallBack.ExpirationTime += now;
	    	/* DON'T forget to Set the callback value... */
	    } else
	    	code = -1;
    	} while (afs_Analyze
		    (tc, code, &adp->f.fid, &treq, AFS_STATS_FS_RPCIDX_MAKEDIR,
		     SHARED_LOCK, NULL));

    	if (code) {
	    if (code < 0) {
	    	ObtainWriteLock(&afs_xcbhash, 490);
	    	afs_DequeueCallback(adp);
	    	adp->f.states &= ~CStatd;
	    	ReleaseWriteLock(&afs_xcbhash);
	    	osi_dnlc_purgedp(adp);
	    }
	    ReleaseWriteLock(&adp->lock);
	    if (tdc)
	    	afs_PutDCache(tdc);
	    goto done;
        }

    } else {
#if defined(AFS_DISCON_ENV)
    	/* Disconnected. */

	/* We have the dir entry now, we can use it while disconnected. */
	if (adp->mvid == NULL) {
	    /* If not mount point, generate a new fid. */
	    newFid.Cell = adp->f.fid.Cell;
    	    newFid.Fid.Volume = adp->f.fid.Fid.Volume;
	    afs_GenFakeFid(&newFid, VDIR, 1);
	}
    	/* XXX: If mount point???*/

	/* Operations with the actual dir's cache entry are further
	 * down, where the dir entry gets created.
	 */
#endif
    }			/* if (!AFS_IS_DISCON_RW) */

    /* otherwise, we should see if we can make the change to the dir locally */
    if (tdc)
	ObtainWriteLock(&tdc->lock, 632);
    if (AFS_IS_DISCON_RW || afs_LocalHero(adp, tdc, &OutDirStatus, 1)) {
	/* we can do it locally */
	ObtainWriteLock(&afs_xdcache, 294);
	code = afs_dir_Create(tdc, aname, &newFid.Fid);
	ReleaseWriteLock(&afs_xdcache);
	if (code) {
	    ZapDCE(tdc);	/* surprise error -- use invalid value */
	    DZap(tdc);
	}
    }
    if (tdc) {
	ReleaseWriteLock(&tdc->lock);
	afs_PutDCache(tdc);
    }

    if (AFS_IS_DISCON_RW)
	/* We will have to settle with the local link count. */
	adp->f.m.LinkCount++;
    else
	adp->f.m.LinkCount = OutDirStatus.LinkCount;
    newFid.Cell = adp->f.fid.Cell;
    newFid.Fid.Volume = adp->f.fid.Fid.Volume;
    ReleaseWriteLock(&adp->lock);
    if (AFS_IS_DISCON_RW) {
#if defined(AFS_DISCON_ENV)
    	/* When disconnected, we have to create the full dir here. */

	/* Generate a new vcache and fill it. */
	tvc = afs_NewVCache(&newFid, NULL);
	if (tvc) {
	    *avcp = tvc;
	} else {
	    code = ENOENT;
	    goto done;
	}

	ObtainWriteLock(&tvc->lock, 738);
	afs_GenDisconStatus(adp, tvc, &newFid, attrs, &treq, VDIR);
	ReleaseWriteLock(&tvc->lock);

	/* And now make an empty dir, containing . and .. : */
	/* Get a new dcache for it first. */
	new_dc = afs_GetDCache(tvc, (afs_size_t) 0, &treq, &offset, &len, 1);
	if (!new_dc) {
	    /* printf("afs_mkdir: can't get new dcache for dir.\n"); */
	    code = ENOENT;
	    goto done;
	}

	ObtainWriteLock(&afs_xdcache, 739);
	code = afs_dir_MakeDir(new_dc,
			       (afs_int32 *) &newFid.Fid,
			       (afs_int32 *) &adp->f.fid.Fid);
	ReleaseWriteLock(&afs_xdcache);
	/* if (code) printf("afs_mkdir: afs_dirMakeDir code = %u\n", code); */

	afs_PutDCache(new_dc);

	ObtainWriteLock(&tvc->lock, 731);
	/* Update length in the vcache. */
	tvc->f.m.Length = new_dc->f.chunkBytes;

	afs_DisconAddDirty(tvc, VDisconCreate, 1);
	ReleaseWriteLock(&tvc->lock);
#endif				/* #ifdef AFS_DISCON_ENV */
    } else {
    	/* now we're done with parent dir, create the real dir's cache entry */
    	tvc = afs_GetVCache(&newFid, &treq, NULL, NULL);
    	if (tvc) {
	    code = 0;
	    *avcp = tvc;
    	} else
	    code = ENOENT;
    }				/* if (AFS_DISCON_RW) */

  done:
    AFS_DISCON_UNLOCK();
  done3:
    afs_PutFakeStat(&fakestate);
    code = afs_CheckCode(code, &treq, 26);
  done2:
    return code;
}
Exemplo n.º 13
0
static int afs_export_get_name(struct dentry *parent, char *name,
			       struct dentry *child)
{
    struct afs_fakestat_state fakestate;
    struct get_name_data data;
    struct vrequest treq;
    struct volume *tvp;
    struct vcache *vcp;
    struct dcache *tdc;
    cred_t *credp;
    afs_size_t dirOffset, dirLen;
    afs_int32 code = 0;

    if (!parent->d_inode) {
#ifdef OSI_EXPORT_DEBUG
	/* can't lookup name in a negative dentry */
	printk("afs: get_name(%s, %s): no parent inode\n",
	       parent->d_name.name ? (char *)parent->d_name.name : "?",
	       child->d_name.name  ? (char *)child->d_name.name  : "?");
#endif
	return -EIO;
    }
    if (!child->d_inode) {
#ifdef OSI_EXPORT_DEBUG
	/* can't find the FID of negative dentry */
	printk("afs: get_name(%s, %s): no child inode\n",
	       parent->d_name.name ? (char *)parent->d_name.name : "?",
	       child->d_name.name  ? (char *)child->d_name.name  : "?");
#endif
	return -ENOENT;
    }

    afs_InitFakeStat(&fakestate);

    credp = crref();
    AFS_GLOCK();

    vcp = VTOAFS(child->d_inode);

    /* special case dynamic mount directory */
    if (afs_IsDynrootMount(vcp)) {
#ifdef OSI_EXPORT_DEBUG
	printk("afs: get_name(%s, 0x%08x/%d/%d.%d): this is the dynmount dir\n",
	       parent->d_name.name ? (char *)parent->d_name.name : "?",
	       vcp->f.fid.Cell,      vcp->f.fid.Fid.Volume,
	       vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique);
#endif
	data.fid = vcp->f.fid;
	if (VTOAFS(parent->d_inode) == afs_globalVp)
	    strcpy(name, AFS_DYNROOT_MOUNTNAME);
	else
	    code = -ENOENT;
	goto done;
    }

    /* Figure out what FID to look for */
    if (vcp->mvstat == 2) { /* volume root */
	tvp = afs_GetVolume(&vcp->f.fid, 0, READ_LOCK);
	if (!tvp) {
#ifdef OSI_EXPORT_DEBUG
	    printk("afs: get_name(%s, 0x%08x/%d/%d.%d): no volume for root\n",
		   parent->d_name.name ? (char *)parent->d_name.name : "?",
		   vcp->f.fid.Cell,      vcp->f.fid.Fid.Volume,
		   vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique);
#endif
	    code = ENOENT;
	    goto done;
	}
	data.fid = tvp->mtpoint;
	afs_PutVolume(tvp, READ_LOCK);
    } else {
	data.fid = vcp->f.fid;
    }

    vcp = VTOAFS(parent->d_inode);
#ifdef OSI_EXPORT_DEBUG
    printk("afs: get_name(%s, 0x%08x/%d/%d.%d): parent is 0x%08x/%d/%d.%d\n",
	   parent->d_name.name ? (char *)parent->d_name.name : "?",
	   data.fid.Cell,      data.fid.Fid.Volume,
	   data.fid.Fid.Vnode, data.fid.Fid.Unique,
	   vcp->f.fid.Cell,      vcp->f.fid.Fid.Volume,
	   vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique);
#endif

    code = afs_InitReq(&treq, credp);
    if (code) {
#ifdef OSI_EXPORT_DEBUG
	printk("afs: get_name(%s, 0x%08x/%d/%d.%d): afs_InitReq: %d\n",
	       parent->d_name.name ? (char *)parent->d_name.name : "?",
	       data.fid.Cell,      data.fid.Fid.Volume,
	       data.fid.Fid.Vnode, data.fid.Fid.Unique, code);
#endif
	goto done;
    }

    /* a dynamic mount point in the dynamic mount directory */
    if (afs_IsDynrootMount(vcp) && afs_IsDynrootAnyFid(&data.fid)
	&& VNUM_TO_VNTYPE(data.fid.Fid.Vnode) == VN_TYPE_MOUNT) {
#ifdef OSI_EXPORT_DEBUG
	printk("afs: get_name(%s, 0x%08x/%d/%d.%d): dynamic mount point\n",
	       parent->d_name.name ? (char *)parent->d_name.name : "?",
	       data.fid.Cell,      data.fid.Fid.Volume,
	       data.fid.Fid.Vnode, data.fid.Fid.Unique);
#endif
	vcp = afs_GetVCache(&data.fid, &treq, NULL, NULL);
	if (vcp) {
	    ObtainReadLock(&vcp->lock);
	    if (strlen(vcp->linkData + 1) <= NAME_MAX)
		strcpy(name, vcp->linkData + 1);
	    else
		code = ENOENT;
	    ReleaseReadLock(&vcp->lock);
	    afs_PutVCache(vcp);
	} else {
#ifdef OSI_EXPORT_DEBUG
	    printk("afs: get_name(%s, 0x%08x/%d/%d.%d): no vcache\n",
		   parent->d_name.name ? (char *)parent->d_name.name : "?",
		   data.fid.Cell,      data.fid.Fid.Volume,
		   data.fid.Fid.Vnode, data.fid.Fid.Unique);
#endif
	    code = ENOENT;
	}
	goto done;
    }

    code = afs_EvalFakeStat(&vcp, &fakestate, &treq);
    if (code)
	goto done;

    if (vcp->f.fid.Cell != data.fid.Cell ||
	vcp->f.fid.Fid.Volume != data.fid.Fid.Volume) {
	/* parent is not the expected cell and volume; thus it
	 * cannot possibly contain the fid we are looking for */
#ifdef OSI_EXPORT_DEBUG
	printk("afs: get_name(%s, 0x%08x/%d/%d.%d): wrong parent 0x%08x/%d\n",
	       parent->d_name.name ? (char *)parent->d_name.name : "?",
	       data.fid.Cell,      data.fid.Fid.Volume,
	       data.fid.Fid.Vnode, data.fid.Fid.Unique,
	       vcp->f.fid.Cell,      vcp->f.fid.Fid.Volume);
#endif
	code = ENOENT;
	goto done;
    }


redo:
    if (!(vcp->f.states & CStatd)) {
	if ((code = afs_VerifyVCache2(vcp, &treq))) {
#ifdef OSI_EXPORT_DEBUG
	    printk("afs: get_name(%s, 0x%08x/%d/%d.%d): VerifyVCache2(0x%08x/%d/%d.%d): %d\n",
		   parent->d_name.name ? (char *)parent->d_name.name : "?",
		   data.fid.Cell,      data.fid.Fid.Volume,
		   data.fid.Fid.Vnode, data.fid.Fid.Unique,
		   vcp->f.fid.Cell,      vcp->f.fid.Fid.Volume,
		   vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique, code);
#endif
	    goto done;
	}
    }

    tdc = afs_GetDCache(vcp, (afs_size_t) 0, &treq, &dirOffset, &dirLen, 1);
    if (!tdc) {
#ifdef OSI_EXPORT_DEBUG
	printk("afs: get_name(%s, 0x%08x/%d/%d.%d): GetDCache(0x%08x/%d/%d.%d): %d\n",
	       parent->d_name.name ? (char *)parent->d_name.name : "?",
	       data.fid.Cell,      data.fid.Fid.Volume,
	       data.fid.Fid.Vnode, data.fid.Fid.Unique,
	       vcp->f.fid.Cell,      vcp->f.fid.Fid.Volume,
	       vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique, code);
#endif
	code = EIO;
	goto done;
    }

    ObtainReadLock(&vcp->lock);
    ObtainReadLock(&tdc->lock);

    /*
     * Make sure that the data in the cache is current. There are two
     * cases we need to worry about:
     * 1. The cache data is being fetched by another process.
     * 2. The cache data is no longer valid
     */
    while ((vcp->f.states & CStatd)
	   && (tdc->dflags & DFFetching)
	   && hsame(vcp->f.m.DataVersion, tdc->f.versionNo)) {
	ReleaseReadLock(&tdc->lock);
	ReleaseReadLock(&vcp->lock);
	afs_osi_Sleep(&tdc->validPos);
	ObtainReadLock(&vcp->lock);
	ObtainReadLock(&tdc->lock);
    }
    if (!(vcp->f.states & CStatd)
	|| !hsame(vcp->f.m.DataVersion, tdc->f.versionNo)) {
	ReleaseReadLock(&tdc->lock);
	ReleaseReadLock(&vcp->lock);
	afs_PutDCache(tdc);
#ifdef OSI_EXPORT_DEBUG
	printk("afs: get_name(%s, 0x%08x/%d/%d.%d): dir (0x%08x/%d/%d.%d) changed; retrying\n",
	       parent->d_name.name ? (char *)parent->d_name.name : "?",
	       data.fid.Cell,      data.fid.Fid.Volume,
	       data.fid.Fid.Vnode, data.fid.Fid.Unique,
	       vcp->f.fid.Cell,      vcp->f.fid.Fid.Volume,
	       vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique);
#endif
	goto redo;
    }

    data.name  = name;
    data.found = 0;
    code = afs_dir_EnumerateDir(tdc, get_name_hook, &data);
    if (!code && !data.found) {
#ifdef OSI_EXPORT_DEBUG
	printk("afs: get_name(%s, 0x%08x/%d/%d.%d): not found\n",
	       parent->d_name.name ? (char *)parent->d_name.name : "?",
	       data.fid.Cell,      data.fid.Fid.Volume,
	       data.fid.Fid.Vnode, data.fid.Fid.Unique);
#endif
	code = ENOENT;
    } else if (code) {
#ifdef OSI_EXPORT_DEBUG
	printk("afs: get_name(%s, 0x%08x/%d/%d.%d): Enumeratedir(0x%08x/%d/%d.%d): %d\n",
	       parent->d_name.name ? (char *)parent->d_name.name : "?",
	       data.fid.Cell,      data.fid.Fid.Volume,
	       data.fid.Fid.Vnode, data.fid.Fid.Unique,
	       vcp->f.fid.Cell,      vcp->f.fid.Fid.Volume,
	       vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique, code);
#endif
    }

    ReleaseReadLock(&tdc->lock);
    ReleaseReadLock(&vcp->lock);
    afs_PutDCache(tdc);

done:
    if (!code) {
	printk("afs: get_name(%s, 0x%08x/%d/%d.%d) => %s\n",
	       parent->d_name.name ? (char *)parent->d_name.name : "?",
	       data.fid.Cell,      data.fid.Fid.Volume,
	       data.fid.Fid.Vnode, data.fid.Fid.Unique, name);
    }
    afs_PutFakeStat(&fakestate);
    AFS_GUNLOCK();
    crfree(credp);
    code = afs_CheckCode(code, &treq, 102);
    return -code;
}
Exemplo n.º 14
0
static struct dentry *afs_export_get_parent(struct dentry *child)
{
    struct VenusFid tfid;
    struct vrequest treq;
    struct cell *tcell;
    struct vcache *vcp;
    struct dentry *dp = NULL;
    cred_t *credp;
    afs_uint32 cellidx;
    int code;

    if (!child->d_inode) {
	/* can't find the parent of a negative dentry */
#ifdef OSI_EXPORT_DEBUG
	printk("afs: get_parent(%s): no inode\n",
	       child->d_name.name ? (char *)child->d_name.name : "?");
#endif
	return ERR_PTR(-EIO);
    }

    credp = crref();
    AFS_GLOCK();

    vcp = VTOAFS(child->d_inode);

    if (afs_IsDynrootMount(vcp)) {
	/* the dynmount directory; parent is always the AFS root */
	tfid = afs_globalVp->f.fid;

    } else if (afs_IsDynrootAny(vcp) &&
	       VNUM_TO_VNTYPE(vcp->f.fid.Fid.Vnode) == VN_TYPE_MOUNT) {
	/* a mount point in the dynmount directory */
	afs_GetDynrootMountFid(&tfid);

    } else if (vcp->mvstat == 2) {
	/* volume root */
	ObtainReadLock(&vcp->lock);
	if (vcp->mvid && vcp->mvid->Fid.Volume) {
	    tfid = *vcp->mvid;
	    ReleaseReadLock(&vcp->lock);
	} else {
	    ReleaseReadLock(&vcp->lock);
	    tcell = afs_GetCell(vcp->f.fid.Cell, READ_LOCK);
	    if (!tcell) {
#ifdef OSI_EXPORT_DEBUG
		printk("afs: get_parent(0x%08x/%d/%d.%d): no cell\n",
		       vcp->f.fid.Cell, vcp->f.fid.Fid.Volume,
		       vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique);
#endif
		dp = ERR_PTR(-ENOENT);
		goto done;
	    }

	    cellidx = tcell->cellIndex;
	    afs_PutCell(tcell, READ_LOCK);

	    afs_GetDynrootMountFid(&tfid);
	    tfid.Fid.Vnode = VNUM_FROM_TYPEID(VN_TYPE_MOUNT, cellidx << 2);
	    tfid.Fid.Unique = vcp->f.fid.Fid.Volume;
	}

    } else {
	/* any other vnode */
	if (vType(vcp) == VDIR && !vcp->f.parent.vnode && vcp->mvstat != 1) {
	    code = afs_InitReq(&treq, credp);
	    if (code) {
#ifdef OSI_EXPORT_DEBUG
		printk("afs: get_parent(0x%08x/%d/%d.%d): InitReq: %d\n",
		       vcp->f.fid.Cell, vcp->f.fid.Fid.Volume,
		       vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique, code);
#endif
		dp = ERR_PTR(-ENOENT);
		goto done;
	    } else {
		code = update_dir_parent(&treq, vcp);
		if (code) {
#ifdef OSI_EXPORT_DEBUG
		    printk("afs: get_parent(0x%08x/%d/%d.%d): update_dir_parent: %d\n",
			   vcp->f.fid.Cell, vcp->f.fid.Fid.Volume,
			   vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique, code);
#endif
		    dp = ERR_PTR(-ENOENT);
		    goto done;
		}
	    }
	}

	tfid.Cell       = vcp->f.fid.Cell;
	tfid.Fid.Volume = vcp->f.fid.Fid.Volume;
	tfid.Fid.Vnode  = vcp->f.parent.vnode;
	tfid.Fid.Unique = vcp->f.parent.unique;
    }

#ifdef OSI_EXPORT_DEBUG
    printk("afs: get_parent(0x%08x/%d/%d.%d): => 0x%08x/%d/%d.%d\n",
	   vcp->f.fid.Cell, vcp->f.fid.Fid.Volume,
	   vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique,
	   tfid.Cell, tfid.Fid.Volume, tfid.Fid.Vnode, tfid.Fid.Unique);
#endif

    dp = get_dentry_from_fid(credp, &tfid);
    if (!dp) {
#ifdef OSI_EXPORT_DEBUG
	printk("afs: get_parent(0x%08x/%d/%d.%d): no dentry\n",
	       vcp->f.fid.Cell, vcp->f.fid.Fid.Volume,
	       vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique);
#endif
	dp = ERR_PTR(-ENOENT);
    }

done:
    AFS_GUNLOCK();
    crfree(credp);

    return dp;
}
Exemplo n.º 15
0
/* question: does afs_create need to set CDirty in the adp or the avc?
 * I think we can get away without it, but I'm not sure.  Note that
 * afs_setattr is called in here for truncation.
 */
#ifdef AFS_SGI64_ENV
int
afs_create(OSI_VC_DECL(adp), char *aname, struct vattr *attrs, int flags,
	   int amode, struct vcache **avcp, afs_ucred_t *acred)
#else /* AFS_SGI64_ENV */
int
afs_create(OSI_VC_DECL(adp), char *aname, struct vattr *attrs,
	   enum vcexcl aexcl, int amode, struct vcache **avcp,
	   afs_ucred_t *acred)
#endif				/* AFS_SGI64_ENV */
{
    afs_int32 origCBs, origZaps, finalZaps;
    struct vrequest treq;
    register afs_int32 code;
    register struct afs_conn *tc;
    struct VenusFid newFid;
    struct AFSStoreStatus InStatus;
    struct AFSFetchStatus OutFidStatus, OutDirStatus;
    struct AFSVolSync tsync;
    struct AFSCallBack CallBack;
    afs_int32 now;
    struct dcache *tdc;
    afs_size_t offset, len;
    struct server *hostp = 0;
    struct vcache *tvc;
    struct volume *volp = 0;
    struct afs_fakestat_state fakestate;
    XSTATS_DECLS;
    OSI_VC_CONVERT(adp);


    AFS_STATCNT(afs_create);
    if ((code = afs_InitReq(&treq, acred)))
	goto done2;

    afs_Trace3(afs_iclSetp, CM_TRACE_CREATE, ICL_TYPE_POINTER, adp,
	       ICL_TYPE_STRING, aname, ICL_TYPE_INT32, amode);

    afs_InitFakeStat(&fakestate);

#ifdef AFS_SGI65_ENV
    /* If avcp is passed not null, it's the old reference to this file.
     * We can use this to avoid create races. For now, just decrement
     * the reference count on it.
     */
    if (*avcp) {
	AFS_RELE(AFSTOV(*avcp));
	*avcp = NULL;
    }
#endif

    if (strlen(aname) > AFSNAMEMAX) {
	code = ENAMETOOLONG;
	goto done3;
    }

    if (!afs_ENameOK(aname)) {
	code = EINVAL;
	goto done3;
    }
    switch (attrs->va_type) {
    case VBLK:
    case VCHR:
#if	!defined(AFS_SUN5_ENV)
    case VSOCK:
#endif
    case VFIFO:
	/* We don't support special devices or FIFOs */
	code = EINVAL;
	goto done3;
    default:
	;
    }
    AFS_DISCON_LOCK();

    code = afs_EvalFakeStat(&adp, &fakestate, &treq);
    if (code)
	goto done;
  tagain:
    code = afs_VerifyVCache(adp, &treq);
    if (code)
	goto done;

    /** If the volume is read-only, return error without making an RPC to the
      * fileserver
      */
    if (adp->f.states & CRO) {
	code = EROFS;
	goto done;
    }

    if (AFS_IS_DISCONNECTED && !AFS_IS_DISCON_RW) {
        code = ENETDOWN;
        goto done;
    }

    tdc = afs_GetDCache(adp, (afs_size_t) 0, &treq, &offset, &len, 1);
    ObtainWriteLock(&adp->lock, 135);
    if (tdc)
	ObtainSharedLock(&tdc->lock, 630);

    /*
     * Make sure that the data in the cache is current. We may have
     * received a callback while we were waiting for the write lock.
     */
    if (!(adp->f.states & CStatd)
	|| (tdc && !hsame(adp->f.m.DataVersion, tdc->f.versionNo))) {
	ReleaseWriteLock(&adp->lock);
	if (tdc) {
	    ReleaseSharedLock(&tdc->lock);
	    afs_PutDCache(tdc);
	}
	goto tagain;
    }
    if (tdc) {
	/* see if file already exists.  If it does, we only set 
	 * the size attributes (to handle O_TRUNC) */
	code = afs_dir_Lookup(tdc, aname, &newFid.Fid);	/* use dnlc first xxx */
	if (code == 0) {
	    ReleaseSharedLock(&tdc->lock);
	    afs_PutDCache(tdc);
	    ReleaseWriteLock(&adp->lock);
#ifdef AFS_SGI64_ENV
	    if (flags & VEXCL) {
#else
	    if (aexcl != NONEXCL) {
#endif
		code = EEXIST;	/* file exists in excl mode open */
		goto done;
	    }
	    /* found the file, so use it */
	    newFid.Cell = adp->f.fid.Cell;
	    newFid.Fid.Volume = adp->f.fid.Fid.Volume;
	    tvc = NULL;
	    if (newFid.Fid.Unique == 0) {
		tvc = afs_LookupVCache(&newFid, &treq, NULL, adp, aname);
	    }
	    if (!tvc)		/* lookup failed or wasn't called */
		tvc = afs_GetVCache(&newFid, &treq, NULL, NULL);

	    if (tvc) {
		/* if the thing exists, we need the right access to open it.
		 * we must check that here, since no other checks are
		 * made by the open system call */
		len = attrs->va_size;	/* only do the truncate */
		/*
		 * We used to check always for READ access before; the
		 * problem is that we will fail if the existing file
		 * has mode -w-w-w, which is wrong.
		 */
		if ((amode & VREAD)
		    && !afs_AccessOK(tvc, PRSFS_READ, &treq, CHECK_MODE_BITS)) {
		    afs_PutVCache(tvc);
		    code = EACCES;
		    goto done;
		}
#if defined(AFS_DARWIN80_ENV)
		if ((amode & VWRITE) || VATTR_IS_ACTIVE(attrs, va_data_size))
#elif defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
		if ((amode & VWRITE) || (attrs->va_mask & AT_SIZE))
#else
		if ((amode & VWRITE) || len != 0xffffffff)
#endif
		{
		    /* needed for write access check */
		    tvc->f.parent.vnode = adp->f.fid.Fid.Vnode;
		    tvc->f.parent.unique = adp->f.fid.Fid.Unique;
		    /* need write mode for these guys */
		    if (!afs_AccessOK
			(tvc, PRSFS_WRITE, &treq, CHECK_MODE_BITS)) {
			afs_PutVCache(tvc);
			code = EACCES;
			goto done;
		    }
		}
#if defined(AFS_DARWIN80_ENV)
		if (VATTR_IS_ACTIVE(attrs, va_data_size))
#elif defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
		if (attrs->va_mask & AT_SIZE)
#else
		if (len != 0xffffffff)
#endif
		{
		    if (vType(tvc) != VREG) {
			afs_PutVCache(tvc);
			code = EISDIR;
			goto done;
		    }
		    /* do a truncate */
#if defined(AFS_DARWIN80_ENV)
		    VATTR_INIT(attrs);
		    VATTR_SET_SUPPORTED(attrs, va_data_size);
		    VATTR_SET_ACTIVE(attrs, va_data_size);
#elif defined(UKERNEL)
		    attrs->va_mask = ATTR_SIZE;
#elif defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
		    attrs->va_mask = AT_SIZE;
#else
		    VATTR_NULL(attrs);
#endif
		    attrs->va_size = len;
		    ObtainWriteLock(&tvc->lock, 136);
		    tvc->f.states |= CCreating;
		    ReleaseWriteLock(&tvc->lock);
#if defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
#if defined(AFS_SGI64_ENV)
		    code =
			afs_setattr(VNODE_TO_FIRST_BHV((vnode_t *) tvc),
				    attrs, 0, acred);
#else
		    code = afs_setattr(tvc, attrs, 0, acred);
#endif /* AFS_SGI64_ENV */
#else /* SUN5 || SGI */
		    code = afs_setattr(tvc, attrs, acred);
#endif /* SUN5 || SGI */
		    ObtainWriteLock(&tvc->lock, 137);
		    tvc->f.states &= ~CCreating;
		    ReleaseWriteLock(&tvc->lock);
		    if (code) {
			afs_PutVCache(tvc);
			goto done;
		    }
		}
		*avcp = tvc;
	    } else
		code = ENOENT;	/* shouldn't get here */
	    /* make sure vrefCount bumped only if code == 0 */
	    goto done;
	}
    }
    
    /* if we create the file, we don't do any access checks, since
     * that's how O_CREAT is supposed to work */
    if (adp->f.states & CForeign) {
	origCBs = afs_allCBs;
	origZaps = afs_allZaps;
    } else {
	origCBs = afs_evenCBs;	/* if changes, we don't really have a callback */
	origZaps = afs_evenZaps;	/* number of even numbered vnodes discarded */
    }
    InStatus.Mask = AFS_SETMODTIME | AFS_SETMODE | AFS_SETGROUP;
    InStatus.ClientModTime = osi_Time();
    InStatus.Group = (afs_int32) afs_cr_gid(acred);
    if (AFS_NFSXLATORREQ(acred)) {
	/*
	 * XXX The following is mainly used to fix a bug in the HP-UX
	 * nfs client where they create files with mode of 0 without
	 * doing any setattr later on to fix it.  * XXX
	 */
#if	defined(AFS_AIX_ENV)
	if (attrs->va_mode != -1) {
#else
#if	defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
	if (attrs->va_mask & AT_MODE) {
#else
	if (attrs->va_mode != ((unsigned short)-1)) {
#endif
#endif
	    if (!attrs->va_mode)
		attrs->va_mode = 0x1b6;	/* XXX default mode: rw-rw-rw XXX */
	}
    }

    if (!AFS_IS_DISCONNECTED) {
	/* If not disconnected, connect to the server.*/

    	InStatus.UnixModeBits = attrs->va_mode & 0xffff;	/* only care about protection bits */
    	do {
	    tc = afs_Conn(&adp->f.fid, &treq, SHARED_LOCK);
	    if (tc) {
	    	hostp = tc->srvr->server;	/* remember for callback processing */
	    	now = osi_Time();
	    	XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_CREATEFILE);
	    	RX_AFS_GUNLOCK();
	    	code =
		    RXAFS_CreateFile(tc->id, (struct AFSFid *)&adp->f.fid.Fid,
				 aname, &InStatus, (struct AFSFid *)
				 &newFid.Fid, &OutFidStatus, &OutDirStatus,
				 &CallBack, &tsync);
	    	RX_AFS_GLOCK();
	    	XSTATS_END_TIME;
	    	CallBack.ExpirationTime += now;
	    } else
	    	code = -1;
    	} while (afs_Analyze
	         (tc, code, &adp->f.fid, &treq, AFS_STATS_FS_RPCIDX_CREATEFILE,
	          SHARED_LOCK, NULL));

	if ((code == EEXIST || code == UAEEXIST) &&
#ifdef AFS_SGI64_ENV
    	!(flags & VEXCL)
#else /* AFS_SGI64_ENV */
    	aexcl == NONEXCL
#endif
    	) {
	    /* if we get an EEXIST in nonexcl mode, just do a lookup */
	    if (tdc) {
	    	ReleaseSharedLock(&tdc->lock);
	    	afs_PutDCache(tdc);
	    }
	    ReleaseWriteLock(&adp->lock);


#if defined(AFS_SGI64_ENV)
	    code = afs_lookup(VNODE_TO_FIRST_BHV((vnode_t *) adp), aname, avcp,
				  NULL, 0, NULL, acred);
#elif defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
	    code = afs_lookup(adp, aname, avcp, NULL, 0, NULL, acred);
#elif defined(UKERNEL)
	    code = afs_lookup(adp, aname, avcp, acred, 0);
#elif !defined(AFS_DARWIN_ENV)
	    code = afs_lookup(adp, aname, avcp, acred);
#endif
	goto done;
        }

	if (code) {
	    if (code < 0) {
	    	ObtainWriteLock(&afs_xcbhash, 488);
	    	afs_DequeueCallback(adp);
	    	adp->f.states &= ~CStatd;
	    	ReleaseWriteLock(&afs_xcbhash);
	    	osi_dnlc_purgedp(adp);
	    }
	    ReleaseWriteLock(&adp->lock);
	    if (tdc) {
	    	ReleaseSharedLock(&tdc->lock);
	    	afs_PutDCache(tdc);
	    }
	goto done;
	}

    } else {
#if defined(AFS_DISCON_ENV)
	/* Generate a fake FID for disconnected mode. */
	newFid.Cell = adp->f.fid.Cell;
	newFid.Fid.Volume = adp->f.fid.Fid.Volume;
	afs_GenFakeFid(&newFid, VREG, 1);
#endif
    }				/* if (!AFS_IS_DISCON_RW) */

    /* otherwise, we should see if we can make the change to the dir locally */
    if (tdc)
	UpgradeSToWLock(&tdc->lock, 631);
    if (AFS_IS_DISCON_RW || afs_LocalHero(adp, tdc, &OutDirStatus, 1)) {
	/* we can do it locally */
	ObtainWriteLock(&afs_xdcache, 291);
	code = afs_dir_Create(tdc, aname, &newFid.Fid);
	ReleaseWriteLock(&afs_xdcache);
	if (code) {
	    ZapDCE(tdc);
	    DZap(tdc);
	}
    }
    if (tdc) {
	ReleaseWriteLock(&tdc->lock);
	afs_PutDCache(tdc);
    }
    if (AFS_IS_DISCON_RW)
	adp->f.m.LinkCount++;

    newFid.Cell = adp->f.fid.Cell;
    newFid.Fid.Volume = adp->f.fid.Fid.Volume;
    ReleaseWriteLock(&adp->lock);
    volp = afs_FindVolume(&newFid, READ_LOCK);

    /* New tricky optimistic callback handling algorithm for file creation works
     * as follows.  We create the file essentially with no locks set at all.  File
     * server may thus handle operations from others cache managers as well as from
     * this very own cache manager that reference the file in question before
     * we managed to create the cache entry.  However, if anyone else changes
     * any of the status information for a file, we'll see afs_evenCBs increase
     * (files always have even fids).  If someone on this workstation manages
     * to do something to the file, they'll end up having to create a cache
     * entry for the new file.  Either we'll find it once we've got the afs_xvcache
     * lock set, or it was also *deleted* the vnode before we got there, in which case
     * we will find evenZaps has changed, too.  Thus, we only assume we have the right
     * status information if no callbacks or vnode removals have occurred to even
     * numbered files from the time the call started until the time that we got the xvcache
     * lock set.  Of course, this also assumes that any call that modifies a file first
     * gets a write lock on the file's vnode, but if that weren't true, the whole cache manager
     * would fail, since no call would be able to update the local vnode status after modifying
     * a file on a file server. */
    ObtainWriteLock(&afs_xvcache, 138);
    if (adp->f.states & CForeign)
	finalZaps = afs_allZaps;	/* do this before calling newvcache */
    else
	finalZaps = afs_evenZaps;	/* do this before calling newvcache */
    /* don't need to call RemoveVCB, since only path leaving a callback is the
     * one where we pass through afs_NewVCache.  Can't have queued a VCB unless
     * we created and freed an entry between file creation time and here, and the
     * freeing of the vnode will change evenZaps.  Don't need to update the VLRU
     * queue, since the find will only succeed in the event of a create race, and 
     * then the vcache will be at the front of the VLRU queue anyway...  */
    if (!(tvc = afs_FindVCache(&newFid, 0, DO_STATS))) {
	tvc = afs_NewVCache(&newFid, hostp);
	if (tvc) {
	    int finalCBs;
	    ObtainWriteLock(&tvc->lock, 139);

	    ObtainWriteLock(&afs_xcbhash, 489);
	    finalCBs = afs_evenCBs;
	    /* add the callback in */
	    if (adp->f.states & CForeign) {
		tvc->f.states |= CForeign;
		finalCBs = afs_allCBs;
	    }
	    if (origCBs == finalCBs && origZaps == finalZaps) {
		tvc->f.states |= CStatd;	/* we've fake entire thing, so don't stat */
		tvc->f.states &= ~CBulkFetching;
		if (!AFS_IS_DISCON_RW) {
		    tvc->cbExpires = CallBack.ExpirationTime;
		    afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), volp);
		}
	    } else {
		afs_DequeueCallback(tvc);
		tvc->f.states &= ~(CStatd | CUnique);
		tvc->callback = 0;
		if (tvc->f.fid.Fid.Vnode & 1 || (vType(tvc) == VDIR))
		    osi_dnlc_purgedp(tvc);
	    }
	    ReleaseWriteLock(&afs_xcbhash);
	    if (AFS_IS_DISCON_RW) {
#if defined(AFS_DISCON_ENV)
		afs_DisconAddDirty(tvc, VDisconCreate, 0);
		afs_GenDisconStatus(adp, tvc, &newFid, attrs, &treq, VREG);
#endif
	    } else {
		afs_ProcessFS(tvc, &OutFidStatus, &treq);
	    }

	    tvc->f.parent.vnode = adp->f.fid.Fid.Vnode;
	    tvc->f.parent.unique = adp->f.fid.Fid.Unique;
	    ReleaseWriteLock(&tvc->lock);
	    *avcp = tvc;
	    code = 0;
	} else
	    code = ENOENT;
    } else {
	/* otherwise cache entry already exists, someone else must
	 * have created it.  Comments used to say:  "don't need write
	 * lock to *clear* these flags" but we should do it anyway.
	 * Code used to clear stat bit and callback, but I don't see 
	 * the point -- we didn't have a create race, somebody else just
	 * snuck into NewVCache before we got here, probably a racing 
	 * lookup.
	 */
	*avcp = tvc;
	code = 0;
    }
    ReleaseWriteLock(&afs_xvcache);

  done:
    AFS_DISCON_UNLOCK();

  done3:
    if (volp)
	afs_PutVolume(volp, READ_LOCK);

    if (code == 0) {
	afs_AddMarinerName(aname, *avcp);
	/* return the new status in vattr */
	afs_CopyOutAttrs(*avcp, attrs);
    }

    afs_PutFakeStat(&fakestate);
    code = afs_CheckCode(code, &treq, 20);

  done2:
    return code;
}


/*
 * Check to see if we can track the change locally: requires that
 * we have sufficiently recent info in data cache.  If so, we
 * know the new DataVersion number, and place it correctly in both the
 * data and stat cache entries.  This routine returns 1 if we should
 * do the operation locally, and 0 otherwise.
 *
 * This routine must be called with the stat cache entry write-locked,
 * and dcache entry write-locked.
 */
int
afs_LocalHero(register struct vcache *avc, register struct dcache *adc,
	      register AFSFetchStatus * astat, register int aincr)
{
    register afs_int32 ok;
    afs_hyper_t avers;

    AFS_STATCNT(afs_LocalHero);
    hset64(avers, astat->dataVersionHigh, astat->DataVersion);
    /* this *is* the version number, no matter what */
    if (adc) {
	ok = (hsame(avc->f.m.DataVersion, adc->f.versionNo) && avc->callback
	      && (avc->f.states & CStatd) && avc->cbExpires >= osi_Time());
    } else {
	ok = 0;
    }
#if defined(AFS_SGI_ENV)
    osi_Assert(avc->v.v_type == VDIR);
#endif
    /* The bulk status code used the length as a sequence number.  */
    /* Don't update the vcache entry unless the stats are current. */
    if (avc->f.states & CStatd) {
	hset(avc->f.m.DataVersion, avers);
#ifdef AFS_64BIT_CLIENT
	FillInt64(avc->f.m.Length, astat->Length_hi, astat->Length);
#else /* AFS_64BIT_CLIENT */
	avc->f.m.Length = astat->Length;
#endif /* AFS_64BIT_CLIENT */
	avc->f.m.Date = astat->ClientModTime;
    }
    if (ok) {
	/* we've been tracking things correctly */
	adc->dflags |= DFEntryMod;
	adc->f.versionNo = avers;
	return 1;
    } else {
	if (adc) {
	    ZapDCE(adc);
	    DZap(adc);
	}
	if (avc->f.states & CStatd) {
	    osi_dnlc_purgedp(avc);
	}
	return 0;
    }
}
Exemplo n.º 16
0
afs_link(struct vcache *avc, OSI_VC_DECL(adp), char *aname, 
	 afs_ucred_t *acred)
#endif
{
    struct vrequest treq;
    struct dcache *tdc;
    afs_int32 code;
    struct afs_conn *tc;
    afs_size_t offset, len;
    struct AFSFetchStatus OutFidStatus, OutDirStatus;
    struct AFSVolSync tsync;
    struct afs_fakestat_state vfakestate, dfakestate;
    struct rx_connection *rxconn;
    XSTATS_DECLS;
    OSI_VC_CONVERT(adp);

    AFS_STATCNT(afs_link);
    afs_Trace3(afs_iclSetp, CM_TRACE_LINK, ICL_TYPE_POINTER, adp,
	       ICL_TYPE_POINTER, avc, ICL_TYPE_STRING, aname);
    /* create a hard link; new entry is aname in dir adp */
    if ((code = afs_InitReq(&treq, acred)))
	goto done2;

    afs_InitFakeStat(&vfakestate);
    afs_InitFakeStat(&dfakestate);
    
    AFS_DISCON_LOCK();

    code = afs_EvalFakeStat(&avc, &vfakestate, &treq);
    if (code)
	goto done;
    code = afs_EvalFakeStat(&adp, &dfakestate, &treq);
    if (code)
	goto done;

    if (avc->f.fid.Cell != adp->f.fid.Cell
	|| avc->f.fid.Fid.Volume != adp->f.fid.Fid.Volume) {
	code = EXDEV;
	goto done;
    }
    if (strlen(aname) > AFSNAMEMAX) {
	code = ENAMETOOLONG;
	goto done;
    }
    code = afs_VerifyVCache(adp, &treq);
    if (code)
	goto done;

    /** If the volume is read-only, return error without making an RPC to the
      * fileserver
      */
    if (adp->f.states & CRO) {
	code = EROFS;
	goto done;
    }
    
    if (AFS_IS_DISCONNECTED) {
        code = ENETDOWN;
        goto done;
    }

    tdc = afs_GetDCache(adp, (afs_size_t) 0, &treq, &offset, &len, 1);	/* test for error below */
    ObtainWriteLock(&adp->lock, 145);
    do {
	tc = afs_Conn(&adp->f.fid, &treq, SHARED_LOCK, &rxconn);
	if (tc) {
	    XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_LINK);
	    RX_AFS_GUNLOCK();
	    code =
		RXAFS_Link(rxconn, (struct AFSFid *)&adp->f.fid.Fid, aname,
			   (struct AFSFid *)&avc->f.fid.Fid, &OutFidStatus,
			   &OutDirStatus, &tsync);
	    RX_AFS_GLOCK();
	    XSTATS_END_TIME;

	} else
	    code = -1;
    } while (afs_Analyze
	     (tc, rxconn, code, &adp->f.fid, &treq, AFS_STATS_FS_RPCIDX_LINK,
	      SHARED_LOCK, NULL));

    if (code) {
	if (tdc)
	    afs_PutDCache(tdc);
	if (code < 0) {
	    ObtainWriteLock(&afs_xcbhash, 492);
	    afs_DequeueCallback(adp);
	    adp->f.states &= ~CStatd;
	    ReleaseWriteLock(&afs_xcbhash);
	    osi_dnlc_purgedp(adp);
	}
	ReleaseWriteLock(&adp->lock);
	goto done;
    }
    if (tdc)
	ObtainWriteLock(&tdc->lock, 635);
    if (afs_LocalHero(adp, tdc, &OutDirStatus, 1)) {
	/* we can do it locally */
	ObtainWriteLock(&afs_xdcache, 290);
	code = afs_dir_Create(tdc, aname, &avc->f.fid.Fid);
	ReleaseWriteLock(&afs_xdcache);
	if (code) {
	    ZapDCE(tdc);	/* surprise error -- invalid value */
	    DZap(tdc);
	}
    }
    if (tdc) {
	ReleaseWriteLock(&tdc->lock);
	afs_PutDCache(tdc);	/* drop ref count */
    }
    ReleaseWriteLock(&adp->lock);
    ObtainWriteLock(&avc->lock, 146);	/* correct link count */

    /* we could lock both dir and file; since we get the new fid
     * status back, you'd think we could put it in the cache status
     * entry at that point.  Note that if we don't lock the file over
     * the rpc call, we have no guarantee that the status info
     * returned in ustat is the most recent to store in the file's
     * cache entry */

    ObtainWriteLock(&afs_xcbhash, 493);
    afs_DequeueCallback(avc);
    avc->f.states &= ~CStatd;	/* don't really know new link count */
    ReleaseWriteLock(&afs_xcbhash);
    if (avc->f.fid.Fid.Vnode & 1 || (vType(avc) == VDIR))
	osi_dnlc_purgedp(avc);
    ReleaseWriteLock(&avc->lock);
    code = 0;
  done:
    code = afs_CheckCode(code, &treq, 24);
    afs_PutFakeStat(&vfakestate);
    afs_PutFakeStat(&dfakestate);
    AFS_DISCON_UNLOCK();
  done2:
    return code;
}
Exemplo n.º 17
0
/* dispatch a no-cache read request */
afs_int32
afs_ReadNoCache(struct vcache *avc,
		struct nocache_read_request *bparms,
		afs_ucred_t *acred)
{
    afs_int32 code;
    afs_int32 bcnt;
    struct brequest *breq;
    struct vrequest *areq;

    /* the reciever will free this */
    areq = osi_Alloc(sizeof(struct vrequest));

    if (avc->vc_error) {
	code = EIO;
	afs_warn("afs_ReadNoCache VCache Error!\n");
	goto cleanup;
    }
    if ((code = afs_InitReq(areq, acred))) {
	afs_warn("afs_ReadNoCache afs_InitReq error!\n");
	goto cleanup;
    }

    AFS_GLOCK();
    code = afs_VerifyVCache(avc, areq);
    AFS_GUNLOCK();

    if (code) {
	code = afs_CheckCode(code, areq, 11);	/* failed to get it */
	afs_warn("afs_ReadNoCache Failed to verify VCache!\n");
	goto cleanup;
    }

    bparms->areq = areq;

    /* and queue this one */
    bcnt = 1;
    AFS_GLOCK();
    while(bcnt < 20) {
	breq = afs_BQueue(BOP_FETCH_NOCACHE, avc, B_DONTWAIT, 0, acred, 1, 1,
			  bparms, (void *)0, (void *)0);
	if(breq != 0) {
	    code = 0;
	    break;
	}
	afs_osi_Wait(10 * bcnt, 0, 0);
    }
    AFS_GUNLOCK();

    if(!breq) {
    	code = EBUSY;
	goto cleanup;
    }

    return code;

cleanup:
    /* If there's a problem before we queue the request, we need to
     * do everything that would normally happen when the request was
     * processed, like unlocking the pages and freeing memory.
     */
    unlock_and_release_pages(bparms->auio);
    osi_Free(areq, sizeof(struct vrequest));
    osi_Free(bparms->auio->uio_iov,
	     bparms->auio->uio_iovcnt * sizeof(struct iovec));
    osi_Free(bparms->auio, sizeof(struct uio));
    osi_Free(bparms, sizeof(struct nocache_read_request));
    return code;
}
Exemplo n.º 18
0
afs_root(struct mount *mp, struct vnode **vpp)
#endif
{
    int error;
    struct vrequest treq;
    struct vcache *tvp = 0;
    struct vcache *gvp;
#if !defined(AFS_FBSD53_ENV) || defined(AFS_FBSD80_ENV)
    struct thread *td = curthread;
#endif
    struct ucred *cr = osi_curcred();

    AFS_GLOCK();
    AFS_STATCNT(afs_root);
    crhold(cr);
tryagain:
    if (afs_globalVp && (afs_globalVp->f.states & CStatd)) {
	tvp = afs_globalVp;
	error = 0;
    } else {
	if (!(error = afs_InitReq(&treq, cr)) && !(error = afs_CheckInit())) {
	    tvp = afs_GetVCache(&afs_rootFid, &treq, NULL, NULL);
	    /* we really want this to stay around */
	    if (tvp) {
		gvp = afs_globalVp;
		afs_globalVp = tvp;
		if (gvp) {
		    afs_PutVCache(gvp);
		    if (tvp != afs_globalVp) {
			/* someone raced us and won */
			afs_PutVCache(tvp);
			goto tryagain;
		    }
		}
	    } else
		error = ENOENT;
	}
    }
    if (tvp) {
	struct vnode *vp = AFSTOV(tvp);

	ASSERT_VI_UNLOCKED(vp, "afs_root");
	AFS_GUNLOCK();
	error = vget(vp, LK_EXCLUSIVE | LK_RETRY, td);
	AFS_GLOCK();
	/* we dropped the glock, so re-check everything it had serialized */
	if (!afs_globalVp || !(afs_globalVp->f.states & CStatd) ||
		tvp != afs_globalVp) {
	    vput(vp);
	    afs_PutVCache(tvp);
	    goto tryagain;
	}
	if (error != 0)
	    goto tryagain;
	/*
	 * I'm uncomfortable about this.  Shouldn't this happen at a
	 * higher level, and shouldn't we busy the top-level directory
	 * to prevent recycling?
	 */
	vp->v_vflag |= VV_ROOT;

	afs_globalVFS = mp;
	*vpp = vp;
    }

    afs_Trace2(afs_iclSetp, CM_TRACE_VFSROOT, ICL_TYPE_POINTER, tvp ? AFSTOV(tvp) : NULL,
	       ICL_TYPE_INT32, error);
    AFS_GUNLOCK();
    crfree(cr);
    return error;
}
Exemplo n.º 19
0
afs_open(struct vcache **avcp, afs_int32 aflags, afs_ucred_t *acred)
#endif
{
    afs_int32 code;
    struct vrequest treq;
    struct vcache *tvc;
    int writing;
    struct afs_fakestat_state fakestate;

    AFS_STATCNT(afs_open);
    if ((code = afs_InitReq(&treq, acred)))
	return code;
#ifdef AFS_SGI64_ENV
    /* avcpp can be, but is not necesarily, bhp's vnode. */
    tvc = VTOAFS(BHV_TO_VNODE(bhv));
#else
    tvc = *avcp;
#endif
    afs_Trace2(afs_iclSetp, CM_TRACE_OPEN, ICL_TYPE_POINTER, tvc,
	       ICL_TYPE_INT32, aflags);
    afs_InitFakeStat(&fakestate);

    AFS_DISCON_LOCK();

    code = afs_EvalFakeStat(&tvc, &fakestate, &treq);
    if (code)
	goto done;
    code = afs_VerifyVCache(tvc, &treq);
    if (code)
	goto done;

    ObtainReadLock(&tvc->lock);

    if (AFS_IS_DISCONNECTED && (afs_DCacheMissingChunks(tvc) != 0)) {
       ReleaseReadLock(&tvc->lock);
       /* printf("Network is down in afs_open: missing chunks\n"); */
       code = ENETDOWN;
       goto done;
    }

    ReleaseReadLock(&tvc->lock);

    if (aflags & (FWRITE | FTRUNC))
	writing = 1;
    else
	writing = 0;
    if (vType(tvc) == VDIR) {
	/* directory */
	if (writing) {
	    code = EISDIR;
	    goto done;
	} else {
	    if (!afs_AccessOK
		(tvc, ((tvc->f.states & CForeign) ? PRSFS_READ : PRSFS_LOOKUP),
		 &treq, CHECK_MODE_BITS)) {
		code = EACCES;
		/* printf("afs_Open: no access for dir\n"); */
		goto done;
	    }
	}
    } else {
#ifdef	AFS_SUN5_ENV
	if (AFS_NFSXLATORREQ(acred) && (aflags & FREAD)) {
	    if (!afs_AccessOK
		(tvc, PRSFS_READ, &treq,
		 CHECK_MODE_BITS | CMB_ALLOW_EXEC_AS_READ)) {
		code = EACCES;
		goto done;
	    }
	}
#endif
#ifdef	AFS_AIX41_ENV
	if (aflags & FRSHARE) {
	    /*
	     * Hack for AIX 4.1:
	     *  Apparently it is possible for a file to get mapped without
	     *  either VNOP_MAP or VNOP_RDWR being called, if (1) it is a
	     *  sharable library, and (2) it has already been loaded.  We must
	     *  ensure that the credp is up to date.  We detect the situation
	     *  by checking for O_RSHARE at open time.
	     */
	    /*
	     * We keep the caller's credentials since an async daemon will
	     * handle the request at some point. We assume that the same
	     * credentials will be used.
	     */
	    ObtainWriteLock(&tvc->lock, 140);
	    if (!tvc->credp || (tvc->credp != acred)) {
		crhold(acred);
		if (tvc->credp) {
		    struct ucred *crp = tvc->credp;
		    tvc->credp = NULL;
		    crfree(crp);
		}
		tvc->credp = acred;
	    }
	    ReleaseWriteLock(&tvc->lock);
	}
#endif
	/* normal file or symlink */
	osi_FlushText(tvc);	/* only needed to flush text if text locked last time */
#ifdef AFS_BOZONLOCK_ENV
	afs_BozonLock(&tvc->pvnLock, tvc);
#endif
	osi_FlushPages(tvc, acred);
#ifdef AFS_BOZONLOCK_ENV
	afs_BozonUnlock(&tvc->pvnLock, tvc);
#endif
    }
    /* set date on file if open in O_TRUNC mode */
    if (aflags & FTRUNC) {
	/* this fixes touch */
	ObtainWriteLock(&tvc->lock, 123);
	tvc->f.m.Date = osi_Time();
	tvc->f.states |= CDirty;
	ReleaseWriteLock(&tvc->lock);
    }
    ObtainReadLock(&tvc->lock);
    if (writing)
	tvc->execsOrWriters++;
    tvc->opens++;
#if defined(AFS_SGI_ENV) || defined (AFS_LINUX26_ENV)
    if (writing && tvc->cred == NULL) {
	crhold(acred);
	tvc->cred = acred;
    }
#endif
    ReleaseReadLock(&tvc->lock);
    if ((afs_preCache != 0) && (writing == 0) && (vType(tvc) != VDIR) && 
	(!afs_BBusy())) {
	struct dcache *tdc;
	afs_size_t offset, len;

	tdc = afs_GetDCache(tvc, 0, &treq, &offset, &len, 1);

	ObtainSharedLock(&tdc->mflock, 865);
	if (!(tdc->mflags & DFFetchReq)) {
	    struct brequest *bp;

	    /* start the daemon (may already be running, however) */
	    UpgradeSToWLock(&tdc->mflock, 666);
	    tdc->mflags |= DFFetchReq;  /* guaranteed to be cleared by BKG or 
					   GetDCache */
	    /* last parm (1) tells bkg daemon to do an afs_PutDCache when it 
	       is done, since we don't want to wait for it to finish before 
	       doing so ourselves.
	    */
	    bp = afs_BQueue(BOP_FETCH, tvc, B_DONTWAIT, 0, acred,
			    (afs_size_t) 0, (afs_size_t) 1, tdc,
			    (void *)0, (void *)0);
	    if (!bp) {
		tdc->mflags &= ~DFFetchReq;
	    }
	    ReleaseWriteLock(&tdc->mflock);
	} else {
	    ReleaseSharedLock(&tdc->mflock);
	}
    }	
  done:
    afs_PutFakeStat(&fakestate);
    AFS_DISCON_UNLOCK();

    code = afs_CheckCode(code, &treq, 4);	/* avoid AIX -O bug */

    afs_Trace2(afs_iclSetp, CM_TRACE_OPEN, ICL_TYPE_POINTER, tvc,
	       ICL_TYPE_INT32, 999999);

    return code;
}
Exemplo n.º 20
0
int
afs_MemRead(register struct vcache *avc, struct uio *auio,
            struct AFS_UCRED *acred, daddr_t albn, struct buf **abpp,
            int noLock)
{
    afs_size_t totalLength;
    afs_size_t transferLength;
    afs_size_t filePos;
    afs_size_t offset, len, tlen;
    afs_int32 trimlen;
    struct dcache *tdc = 0;
    afs_int32 error, trybusy = 1;
#ifdef AFS_DARWIN80_ENV
    uio_t tuiop = NULL;
#else
    struct uio tuio;
    struct uio *tuiop = &tuio;
    struct iovec *tvec;
#endif
    afs_int32 code;
    struct vrequest treq;

    AFS_STATCNT(afs_MemRead);
    if (avc->vc_error)
        return EIO;

    /* check that we have the latest status info in the vnode cache */
    if ((code = afs_InitReq(&treq, acred)))
        return code;
    if (!noLock) {
        code = afs_VerifyVCache(avc, &treq);
        if (code) {
            code = afs_CheckCode(code, &treq, 8);	/* failed to get it */
            return code;
        }
    }
#ifndef	AFS_VM_RDWR_ENV
    if (AFS_NFSXLATORREQ(acred)) {
        if (!afs_AccessOK
                (avc, PRSFS_READ, &treq,
                 CHECK_MODE_BITS | CMB_ALLOW_EXEC_AS_READ)) {
            return afs_CheckCode(EACCES, &treq, 9);
        }
    }
#endif

#ifndef AFS_DARWIN80_ENV
    tvec = (struct iovec *)osi_AllocSmallSpace(sizeof(struct iovec));
#endif
    totalLength = AFS_UIO_RESID(auio);
    filePos = AFS_UIO_OFFSET(auio);
    afs_Trace4(afs_iclSetp, CM_TRACE_READ, ICL_TYPE_POINTER, avc,
               ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos), ICL_TYPE_INT32,
               totalLength, ICL_TYPE_OFFSET,
               ICL_HANDLE_OFFSET(avc->f.m.Length));
    error = 0;
    transferLength = 0;
    if (!noLock)
        ObtainReadLock(&avc->lock);
#if	defined(AFS_TEXT_ENV) && !defined(AFS_VM_RDWR_ENV)
    if (avc->flushDV.high == AFS_MAXDV && avc->flushDV.low == AFS_MAXDV) {
        hset(avc->flushDV, avc->f.m.DataVersion);
    }
#endif

    /*
     * Locks held:
     * avc->lock(R)
     */
    if (filePos >= avc->f.m.Length) {
        if (len > AFS_ZEROS)
            len = sizeof(afs_zeros);	/* and in 0 buffer */
        len = 0;
#ifdef AFS_DARWIN80_ENV
        trimlen = len;
        tuiop = afsio_darwin_partialcopy(auio, trimlen);
#else
        afsio_copy(auio, &tuio, tvec);
        trimlen = len;
        afsio_trim(&tuio, trimlen);
#endif
        AFS_UIOMOVE(afs_zeros, trimlen, UIO_READ, tuiop, code);
    }

    while (avc->f.m.Length > 0 && totalLength > 0) {
        /* read all of the cached info */
        if (filePos >= avc->f.m.Length)
            break;		/* all done */
        if (noLock) {
            if (tdc) {
                ReleaseReadLock(&tdc->lock);
                afs_PutDCache(tdc);
            }
            tdc = afs_FindDCache(avc, filePos);
            if (tdc) {
                ObtainReadLock(&tdc->lock);
                offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
                len = tdc->f.chunkBytes - offset;
            }
        } else {
            /* a tricky question: does the presence of the DFFetching flag
             * mean that we're fetching the latest version of the file?  No.
             * The server could update the file as soon as the fetch responsible
             * for the setting of the DFFetching flag completes.
             *
             * However, the presence of the DFFetching flag (visible under
             * a dcache read lock since it is set and cleared only under a
             * dcache write lock) means that we're fetching as good a version
             * as was known to this client at the time of the last call to
             * afs_VerifyVCache, since the latter updates the stat cache's
             * m.DataVersion field under a vcache write lock, and from the
             * time that the DFFetching flag goes on in afs_GetDCache (before
             * the fetch starts), to the time it goes off (after the fetch
             * completes), afs_GetDCache keeps at least a read lock on the
             * vcache entry.
             *
             * This means that if the DFFetching flag is set, we can use that
             * data for any reads that must come from the current version of
             * the file (current == m.DataVersion).
             *
             * Another way of looking at this same point is this: if we're
             * fetching some data and then try do an afs_VerifyVCache, the
             * VerifyVCache operation will not complete until after the
             * DFFetching flag is turned off and the dcache entry's f.versionNo
             * field is updated.
             *
             * Note, by the way, that if DFFetching is set,
             * m.DataVersion > f.versionNo (the latter is not updated until
             * after the fetch completes).
             */
            if (tdc) {
                ReleaseReadLock(&tdc->lock);
                afs_PutDCache(tdc);	/* before reusing tdc */
            }
            tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 2);
            ObtainReadLock(&tdc->lock);
            /* now, first try to start transfer, if we'll need the data.  If
             * data already coming, we don't need to do this, obviously.  Type
             * 2 requests never return a null dcache entry, btw.
             */
            if (!(tdc->dflags & DFFetching)
                    && !hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
                /* have cache entry, it is not coming in now,
                 * and we'll need new data */
tagain:
                if (trybusy && !afs_BBusy()) {
                    struct brequest *bp;
                    /* daemon is not busy */
                    ObtainSharedLock(&tdc->mflock, 665);
                    if (!(tdc->mflags & DFFetchReq)) {
                        /* start the daemon (may already be running, however) */
                        UpgradeSToWLock(&tdc->mflock, 666);
                        tdc->mflags |= DFFetchReq;
                        bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred,
                                        (afs_size_t) filePos, (afs_size_t) 0,
                                        tdc);
                        if (!bp) {
                            tdc->mflags &= ~DFFetchReq;
                            trybusy = 0;	/* Avoid bkg daemon since they're too busy */
                            ReleaseWriteLock(&tdc->mflock);
                            goto tagain;
                        }
                        ConvertWToSLock(&tdc->mflock);
                        /* don't use bp pointer! */
                    }
                    code = 0;
                    ConvertSToRLock(&tdc->mflock);
                    while (!code && tdc->mflags & DFFetchReq) {
                        afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT,
                                   ICL_TYPE_STRING, __FILE__, ICL_TYPE_INT32,
                                   __LINE__, ICL_TYPE_POINTER, tdc,
                                   ICL_TYPE_INT32, tdc->dflags);
                        /* don't need waiting flag on this one */
                        ReleaseReadLock(&tdc->mflock);
                        ReleaseReadLock(&tdc->lock);
                        ReleaseReadLock(&avc->lock);
                        code = afs_osi_SleepSig(&tdc->validPos);
                        ObtainReadLock(&avc->lock);
                        ObtainReadLock(&tdc->lock);
                        ObtainReadLock(&tdc->mflock);
                    }
                    ReleaseReadLock(&tdc->mflock);
                    if (code) {
                        error = code;
                        break;
                    }
                }
            }
            /* now data may have started flowing in (if DFFetching is on).  If
             * data is now streaming in, then wait for some interesting stuff.
             */
            code = 0;
            while (!code && (tdc->dflags & DFFetching)
                    && tdc->validPos <= filePos) {
                /* too early: wait for DFFetching flag to vanish,
                 * or data to appear */
                afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT, ICL_TYPE_STRING,
                           __FILE__, ICL_TYPE_INT32, __LINE__,
                           ICL_TYPE_POINTER, tdc, ICL_TYPE_INT32,
                           tdc->dflags);
                ReleaseReadLock(&tdc->lock);
                ReleaseReadLock(&avc->lock);
                code = afs_osi_SleepSig(&tdc->validPos);
                ObtainReadLock(&avc->lock);
                ObtainReadLock(&tdc->lock);
            }
            if (code) {
                error = code;
                break;
            }
            /* fetching flag gone, data is here, or we never tried
             * (BBusy for instance) */
            if (tdc->dflags & DFFetching) {
                /* still fetching, some new data is here:
                 * compute length and offset */
                offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
                len = tdc->validPos - filePos;
            } else {
                /* no longer fetching, verify data version
                 * (avoid new GetDCache call) */
                if (hsame(avc->f.m.DataVersion, tdc->f.versionNo)
                        && ((len = tdc->validPos - filePos) > 0)) {
                    offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
                } else {
                    /* don't have current data, so get it below */
                    afs_Trace3(afs_iclSetp, CM_TRACE_VERSIONNO,
                               ICL_TYPE_INT64, ICL_HANDLE_OFFSET(filePos),
                               ICL_TYPE_HYPER, &avc->f.m.DataVersion,
                               ICL_TYPE_HYPER, &tdc->f.versionNo);
                    ReleaseReadLock(&tdc->lock);
                    afs_PutDCache(tdc);
                    tdc = NULL;
                }
            }

            if (!tdc) {
                /* If we get, it was not possible to start the
                 * background daemon. With flag == 1 afs_GetDCache
                 * does the FetchData rpc synchronously.
                 */
                ReleaseReadLock(&avc->lock);
                tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 1);
                ObtainReadLock(&avc->lock);
                if (tdc)
                    ObtainReadLock(&tdc->lock);
            }
        }

        afs_Trace3(afs_iclSetp, CM_TRACE_VNODEREAD, ICL_TYPE_POINTER, tdc,
                   ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(offset),
                   ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(len));
        if (!tdc) {
            error = EIO;
            break;
        }

        /*
         * Locks held:
         * avc->lock(R)
         * tdc->lock(R)
         */

        if (len > totalLength)
            len = totalLength;	/* will read len bytes */
        if (len <= 0) {		/* shouldn't get here if DFFetching is on */
            /* read past the end of a chunk, may not be at next chunk yet, and yet
             * also not at eof, so may have to supply fake zeros */
            len = AFS_CHUNKTOSIZE(tdc->f.chunk) - offset;	/* bytes left in chunk addr space */
            if (len > totalLength)
                len = totalLength;	/* and still within xfr request */
            tlen = avc->f.m.Length - offset;	/* and still within file */
            if (len > tlen)
                len = tlen;
            if (len > AFS_ZEROS)
                len = sizeof(afs_zeros);	/* and in 0 buffer */
#ifdef AFS_DARWIN80_ENV
            trimlen = len;
            tuiop = afsio_darwin_partialcopy(auio, trimlen);
#else
            afsio_copy(auio, &tuio, tvec);
            trimlen = len;
            afsio_trim(&tuio, trimlen);
#endif
            AFS_UIOMOVE(afs_zeros, trimlen, UIO_READ, tuiop, code);
            if (code) {
                error = code;
                break;
            }
        } else {
            /* get the data from the mem cache */

            /* mung uio structure to be right for this transfer */
#ifdef AFS_DARWIN80_ENV
            trimlen = len;
            tuiop = afsio_darwin_partialcopy(auio, trimlen);
            uio_setoffset(tuiop, offset);
#else
            afsio_copy(auio, &tuio, tvec);
            trimlen = len;
            afsio_trim(&tuio, trimlen);
            tuio.afsio_offset = offset;
#endif

            code = afs_MemReadUIO(tdc->f.inode, tuiop);

            if (code) {
                error = code;
                break;
            }
        }
        /* otherwise we've read some, fixup length, etc and continue with next seg */
        len = len - AFS_UIO_RESID(tuiop);	/* compute amount really transferred */
        trimlen = len;
        afsio_skip(auio, trimlen);	/* update input uio structure */
        totalLength -= len;
        transferLength += len;
        filePos += len;

        if (len <= 0)
            break;		/* surprise eof */
#ifdef AFS_DARWIN80_ENV
        if (tuiop) {
            uio_free(tuiop);
            tuiop = 0;
        }
#endif
    }				/* the whole while loop */

    /*
     * Locks held:
     * avc->lock(R)
     * tdc->lock(R) if tdc
     */

    /* if we make it here with tdc non-zero, then it is the last chunk we
     * dealt with, and we have to release it when we're done.  We hold on
     * to it in case we need to do a prefetch.
     */
    if (tdc) {
        ReleaseReadLock(&tdc->lock);
        /* try to queue prefetch, if needed */
        if (!noLock &&
#ifndef AFS_VM_RDWR_ENV
                afs_preCache
#else
                1
#endif
           ) {
            afs_PrefetchChunk(avc, tdc, acred, &treq);
        }
        afs_PutDCache(tdc);
    }
    if (!noLock)
        ReleaseReadLock(&avc->lock);
#ifdef AFS_DARWIN80_ENV
    if (tuiop)
        uio_free(tuiop);
#else
    osi_FreeSmallSpace(tvec);
#endif
    error = afs_CheckCode(error, &treq, 10);
    return error;
}
Exemplo n.º 21
0
int
afs_UFSRead(register struct vcache *avc, struct uio *auio,
            struct AFS_UCRED *acred, daddr_t albn, struct buf **abpp,
            int noLock)
{
    afs_size_t totalLength;
    afs_size_t transferLength;
    afs_size_t filePos;
    afs_size_t offset, len, tlen;
    afs_int32 trimlen;
    struct dcache *tdc = 0;
    afs_int32 error;
#ifdef AFS_DARWIN80_ENV
    uio_t tuiop=NULL;
#else
    struct uio tuio;
    struct uio *tuiop = &tuio;
    struct iovec *tvec;
#endif
    struct osi_file *tfile;
    afs_int32 code;
    int trybusy = 1;
    struct vrequest treq;

    AFS_STATCNT(afs_UFSRead);
    if (avc && avc->vc_error)
        return EIO;

    AFS_DISCON_LOCK();

    /* check that we have the latest status info in the vnode cache */
    if ((code = afs_InitReq(&treq, acred)))
        return code;
    if (!noLock) {
        if (!avc)
            osi_Panic("null avc in afs_UFSRead");
        else {
            code = afs_VerifyVCache(avc, &treq);
            if (code) {
                code = afs_CheckCode(code, &treq, 11);	/* failed to get it */
                AFS_DISCON_UNLOCK();
                return code;
            }
        }
    }
#ifndef	AFS_VM_RDWR_ENV
    if (AFS_NFSXLATORREQ(acred)) {
        if (!afs_AccessOK
                (avc, PRSFS_READ, &treq,
                 CHECK_MODE_BITS | CMB_ALLOW_EXEC_AS_READ)) {
            AFS_DISCON_UNLOCK();
            return afs_CheckCode(EACCES, &treq, 12);
        }
    }
#endif

#ifndef AFS_DARWIN80_ENV
    tvec = (struct iovec *)osi_AllocSmallSpace(sizeof(struct iovec));
#endif
    totalLength = AFS_UIO_RESID(auio);
    filePos = AFS_UIO_OFFSET(auio);
    afs_Trace4(afs_iclSetp, CM_TRACE_READ, ICL_TYPE_POINTER, avc,
               ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos), ICL_TYPE_INT32,
               totalLength, ICL_TYPE_OFFSET,
               ICL_HANDLE_OFFSET(avc->f.m.Length));
    error = 0;
    transferLength = 0;
    if (!noLock)
        ObtainReadLock(&avc->lock);
#if	defined(AFS_TEXT_ENV) && !defined(AFS_VM_RDWR_ENV)
    if (avc->flushDV.high == AFS_MAXDV && avc->flushDV.low == AFS_MAXDV) {
        hset(avc->flushDV, avc->f.m.DataVersion);
    }
#endif

    if (filePos >= avc->f.m.Length) {
        if (len > AFS_ZEROS)
            len = sizeof(afs_zeros);	/* and in 0 buffer */
        len = 0;
#ifdef AFS_DARWIN80_ENV
        trimlen = len;
        tuiop = afsio_darwin_partialcopy(auio, trimlen);
#else
        afsio_copy(auio, &tuio, tvec);
        trimlen = len;
        afsio_trim(&tuio, trimlen);
#endif
        AFS_UIOMOVE(afs_zeros, trimlen, UIO_READ, tuiop, code);
    }

    while (avc->f.m.Length > 0 && totalLength > 0) {
        /* read all of the cached info */
        if (filePos >= avc->f.m.Length)
            break;		/* all done */
        if (noLock) {
            if (tdc) {
                ReleaseReadLock(&tdc->lock);
                afs_PutDCache(tdc);
            }
            tdc = afs_FindDCache(avc, filePos);
            if (tdc) {
                ObtainReadLock(&tdc->lock);
                offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
                len = tdc->validPos - filePos;
            }
        } else {
            /* a tricky question: does the presence of the DFFetching flag
             * mean that we're fetching the latest version of the file?  No.
             * The server could update the file as soon as the fetch responsible
             * for the setting of the DFFetching flag completes.
             *
             * However, the presence of the DFFetching flag (visible under
             * a dcache read lock since it is set and cleared only under a
             * dcache write lock) means that we're fetching as good a version
             * as was known to this client at the time of the last call to
             * afs_VerifyVCache, since the latter updates the stat cache's
             * m.DataVersion field under a vcache write lock, and from the
             * time that the DFFetching flag goes on in afs_GetDCache (before
             * the fetch starts), to the time it goes off (after the fetch
             * completes), afs_GetDCache keeps at least a read lock on the
             * vcache entry.
             *
             * This means that if the DFFetching flag is set, we can use that
             * data for any reads that must come from the current version of
             * the file (current == m.DataVersion).
             *
             * Another way of looking at this same point is this: if we're
             * fetching some data and then try do an afs_VerifyVCache, the
             * VerifyVCache operation will not complete until after the
             * DFFetching flag is turned off and the dcache entry's f.versionNo
             * field is updated.
             *
             * Note, by the way, that if DFFetching is set,
             * m.DataVersion > f.versionNo (the latter is not updated until
             * after the fetch completes).
             */
            if (tdc) {
                ReleaseReadLock(&tdc->lock);
                afs_PutDCache(tdc);	/* before reusing tdc */
            }
            tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 2);
#ifdef AFS_DISCON_ENV
            if (!tdc) {
                printf("Network down in afs_read");
                error = ENETDOWN;
                break;
            }
#endif /* AFS_DISCON_ENV */

            ObtainReadLock(&tdc->lock);
            /* now, first try to start transfer, if we'll need the data.  If
             * data already coming, we don't need to do this, obviously.  Type
             * 2 requests never return a null dcache entry, btw. */
            if (!(tdc->dflags & DFFetching)
                    && !hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
                /* have cache entry, it is not coming in now, and we'll need new data */
tagain:
                if (trybusy && !afs_BBusy()) {
                    struct brequest *bp;
                    /* daemon is not busy */
                    ObtainSharedLock(&tdc->mflock, 667);
                    if (!(tdc->mflags & DFFetchReq)) {
                        UpgradeSToWLock(&tdc->mflock, 668);
                        tdc->mflags |= DFFetchReq;
                        bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred,
                                        (afs_size_t) filePos, (afs_size_t) 0,
                                        tdc);
                        if (!bp) {
                            /* Bkg table full; retry deadlocks */
                            tdc->mflags &= ~DFFetchReq;
                            trybusy = 0;	/* Avoid bkg daemon since they're too busy */
                            ReleaseWriteLock(&tdc->mflock);
                            goto tagain;
                        }
                        ConvertWToSLock(&tdc->mflock);
                    }
                    code = 0;
                    ConvertSToRLock(&tdc->mflock);
                    while (!code && tdc->mflags & DFFetchReq) {
                        afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT,
                                   ICL_TYPE_STRING, __FILE__, ICL_TYPE_INT32,
                                   __LINE__, ICL_TYPE_POINTER, tdc,
                                   ICL_TYPE_INT32, tdc->dflags);
                        /* don't need waiting flag on this one */
                        ReleaseReadLock(&tdc->mflock);
                        ReleaseReadLock(&tdc->lock);
                        ReleaseReadLock(&avc->lock);
                        code = afs_osi_SleepSig(&tdc->validPos);
                        ObtainReadLock(&avc->lock);
                        ObtainReadLock(&tdc->lock);
                        ObtainReadLock(&tdc->mflock);
                    }
                    ReleaseReadLock(&tdc->mflock);
                    if (code) {
                        error = code;
                        break;
                    }
                }
            }
            /* now data may have started flowing in (if DFFetching is on).  If
             * data is now streaming in, then wait for some interesting stuff.
             */
            code = 0;
            while (!code && (tdc->dflags & DFFetching)
                    && tdc->validPos <= filePos) {
                /* too early: wait for DFFetching flag to vanish,
                 * or data to appear */
                afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT, ICL_TYPE_STRING,
                           __FILE__, ICL_TYPE_INT32, __LINE__,
                           ICL_TYPE_POINTER, tdc, ICL_TYPE_INT32,
                           tdc->dflags);
                ReleaseReadLock(&tdc->lock);
                ReleaseReadLock(&avc->lock);
                code = afs_osi_SleepSig(&tdc->validPos);
                ObtainReadLock(&avc->lock);
                ObtainReadLock(&tdc->lock);
            }
            if (code) {
                error = code;
                break;
            }
            /* fetching flag gone, data is here, or we never tried
             * (BBusy for instance) */
            if (tdc->dflags & DFFetching) {
                /* still fetching, some new data is here:
                 * compute length and offset */
                offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
                len = tdc->validPos - filePos;
            } else {
                /* no longer fetching, verify data version (avoid new
                 * GetDCache call) */
                if (hsame(avc->f.m.DataVersion, tdc->f.versionNo)
                        && ((len = tdc->validPos - filePos) > 0)) {
                    offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
                } else {
                    /* don't have current data, so get it below */
                    afs_Trace3(afs_iclSetp, CM_TRACE_VERSIONNO,
                               ICL_TYPE_INT64, ICL_HANDLE_OFFSET(filePos),
                               ICL_TYPE_HYPER, &avc->f.m.DataVersion,
                               ICL_TYPE_HYPER, &tdc->f.versionNo);
                    ReleaseReadLock(&tdc->lock);
                    afs_PutDCache(tdc);
                    tdc = NULL;
                }
            }

            if (!tdc) {
                /* If we get, it was not possible to start the
                 * background daemon. With flag == 1 afs_GetDCache
                 * does the FetchData rpc synchronously.
                 */
                ReleaseReadLock(&avc->lock);
                tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 1);
                ObtainReadLock(&avc->lock);
                if (tdc)
                    ObtainReadLock(&tdc->lock);
            }
        }

        if (!tdc) {
            error = EIO;
            break;
        }
        len = tdc->validPos - filePos;
        afs_Trace3(afs_iclSetp, CM_TRACE_VNODEREAD, ICL_TYPE_POINTER, tdc,
                   ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(offset),
                   ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(len));
        if (len > totalLength)
            len = totalLength;	/* will read len bytes */
        if (len <= 0) {		/* shouldn't get here if DFFetching is on */
            afs_Trace4(afs_iclSetp, CM_TRACE_VNODEREAD2, ICL_TYPE_POINTER,
                       tdc, ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(tdc->validPos),
                       ICL_TYPE_INT32, tdc->f.chunkBytes, ICL_TYPE_INT32,
                       tdc->dflags);
            /* read past the end of a chunk, may not be at next chunk yet, and yet
             * also not at eof, so may have to supply fake zeros */
            len = AFS_CHUNKTOSIZE(tdc->f.chunk) - offset;	/* bytes left in chunk addr space */
            if (len > totalLength)
                len = totalLength;	/* and still within xfr request */
            tlen = avc->f.m.Length - offset;	/* and still within file */
            if (len > tlen)
                len = tlen;
            if (len > AFS_ZEROS)
                len = sizeof(afs_zeros);	/* and in 0 buffer */
#ifdef AFS_DARWIN80_ENV
            trimlen = len;
            tuiop = afsio_darwin_partialcopy(auio, trimlen);
#else
            afsio_copy(auio, &tuio, tvec);
            trimlen = len;
            afsio_trim(&tuio, trimlen);
#endif
            AFS_UIOMOVE(afs_zeros, trimlen, UIO_READ, tuiop, code);
            if (code) {
                error = code;
                break;
            }
        } else {
            /* get the data from the file */
#ifdef IHINT
            if (tfile = tdc->ihint) {
                if (tdc->f.inode != tfile->inum) {
                    afs_warn("afs_UFSRead: %x hint mismatch tdc %d inum %d\n",
                             tdc, tdc->f.inode, tfile->inum);
                    osi_UFSClose(tfile);
                    tdc->ihint = tfile = 0;
                    nihints--;
                }
            }
            if (tfile != 0) {
                usedihint++;
            } else
#endif /* IHINT */
#if defined(LINUX_USE_FH)
                tfile = (struct osi_file *)osi_UFSOpen_fh(&tdc->f.fh, tdc->f.fh_type);
#else
                tfile = (struct osi_file *)osi_UFSOpen(tdc->f.inode);
#endif
#ifdef AFS_DARWIN80_ENV
            trimlen = len;
            tuiop = afsio_darwin_partialcopy(auio, trimlen);
            uio_setoffset(tuiop, offset);
#else
            /* mung uio structure to be right for this transfer */
            afsio_copy(auio, &tuio, tvec);
            trimlen = len;
            afsio_trim(&tuio, trimlen);
            tuio.afsio_offset = offset;
#endif

#if defined(AFS_AIX41_ENV)
            AFS_GUNLOCK();
            code =
                VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, &tuio, NULL, NULL,
                          NULL, afs_osi_credp);
            AFS_GLOCK();
#elif defined(AFS_AIX32_ENV)
            code =
                VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, &tuio, NULL, NULL);
            /* Flush all JFS pages now for big performance gain in big file cases
             * If we do something like this, must check to be sure that AFS file
             * isn't mmapped... see afs_gn_map() for why.
             */
            /*
            	  if (tfile->vnode->v_gnode && tfile->vnode->v_gnode->gn_seg) {
             many different ways to do similar things:
               so far, the best performing one is #2, but #1 might match it if we
               straighten out the confusion regarding which pages to flush.  It
               really does matter.
               1.	    vm_flushp(tfile->vnode->v_gnode->gn_seg, 0, len/PAGESIZE - 1);
               2.	    vm_releasep(tfile->vnode->v_gnode->gn_seg, offset/PAGESIZE,
            			(len + PAGESIZE-1)/PAGESIZE);
               3.	    vms_inactive(tfile->vnode->v_gnode->gn_seg) Doesn't work correctly
               4.  	    vms_delete(tfile->vnode->v_gnode->gn_seg) probably also fails
            	    tfile->vnode->v_gnode->gn_seg = NULL;
               5.       deletep
               6.       ipgrlse
               7.       ifreeseg
                      Unfortunately, this seems to cause frequent "cache corruption" episodes.
               	    vm_releasep(tfile->vnode->v_gnode->gn_seg, offset/PAGESIZE,
            			(len + PAGESIZE-1)/PAGESIZE);
            	  }
            */
#elif defined(AFS_AIX_ENV)
            code =
                VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, (off_t) & offset,
                          &tuio, NULL, NULL, -1);
#elif defined(AFS_SUN5_ENV)
            AFS_GUNLOCK();
#ifdef AFS_SUN510_ENV
            {
                caller_context_t ct;

                VOP_RWLOCK(tfile->vnode, 0, &ct);
                code = VOP_READ(tfile->vnode, &tuio, 0, afs_osi_credp, &ct);
                VOP_RWUNLOCK(tfile->vnode, 0, &ct);
            }
#else
            VOP_RWLOCK(tfile->vnode, 0);
            code = VOP_READ(tfile->vnode, &tuio, 0, afs_osi_credp);
            VOP_RWUNLOCK(tfile->vnode, 0);
#endif
            AFS_GLOCK();
#elif defined(AFS_SGI_ENV)
            AFS_GUNLOCK();
            AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_READ);
            AFS_VOP_READ(tfile->vnode, &tuio, IO_ISLOCKED, afs_osi_credp,
                         code);
            AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_READ);
            AFS_GLOCK();
#elif defined(AFS_OSF_ENV)
            tuio.uio_rw = UIO_READ;
            AFS_GUNLOCK();
            VOP_READ(tfile->vnode, &tuio, 0, afs_osi_credp, code);
            AFS_GLOCK();
#elif defined(AFS_HPUX100_ENV)
            AFS_GUNLOCK();
            code = VOP_RDWR(tfile->vnode, &tuio, UIO_READ, 0, afs_osi_credp);
            AFS_GLOCK();
#elif defined(AFS_LINUX20_ENV)
            AFS_GUNLOCK();
            code = osi_rdwr(tfile, &tuio, UIO_READ);
            AFS_GLOCK();
#elif defined(AFS_DARWIN80_ENV)
            AFS_GUNLOCK();
            code = VNOP_READ(tfile->vnode, tuiop, 0, afs_osi_ctxtp);
            AFS_GLOCK();
#elif defined(AFS_DARWIN_ENV)
            AFS_GUNLOCK();
            VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc());
            code = VOP_READ(tfile->vnode, &tuio, 0, afs_osi_credp);
            VOP_UNLOCK(tfile->vnode, 0, current_proc());
            AFS_GLOCK();
#elif defined(AFS_FBSD80_ENV)
            AFS_GUNLOCK();
            VOP_LOCK(tfile->vnode, LK_EXCLUSIVE);
            code = VOP_READ(tfile->vnode, &tuio, 0, afs_osi_credp);
            VOP_UNLOCK(tfile->vnode, 0);
            AFS_GLOCK();
#elif defined(AFS_FBSD50_ENV)
            AFS_GUNLOCK();
            VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curthread);
            code = VOP_READ(tfile->vnode, &tuio, 0, afs_osi_credp);
            VOP_UNLOCK(tfile->vnode, 0, curthread);
            AFS_GLOCK();
#elif defined(AFS_XBSD_ENV)
            AFS_GUNLOCK();
            VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curproc);
            code = VOP_READ(tfile->vnode, &tuio, 0, afs_osi_credp);
            VOP_UNLOCK(tfile->vnode, 0, curproc);
            AFS_GLOCK();
#else
            code = VOP_RDWR(tfile->vnode, &tuio, UIO_READ, 0, afs_osi_credp);
#endif

#ifdef IHINT
            if (!tdc->ihint && nihints < maxIHint) {
                tdc->ihint = tfile;
                nihints++;
            } else
#endif /* IHINT */
                osi_UFSClose(tfile);

            if (code) {
                error = code;
                break;
            }
        }
        /* otherwise we've read some, fixup length, etc and continue with next seg */
        len = len - AFS_UIO_RESID(tuiop);	/* compute amount really transferred */
        trimlen = len;
        afsio_skip(auio, trimlen);	/* update input uio structure */
        totalLength -= len;
        transferLength += len;
        filePos += len;
        if (len <= 0)
            break;		/* surprise eof */
#ifdef AFS_DARWIN80_ENV
        if (tuiop) {
            uio_free(tuiop);
            tuiop = 0;
        }
#endif
    }

    /* if we make it here with tdc non-zero, then it is the last chunk we
     * dealt with, and we have to release it when we're done.  We hold on
     * to it in case we need to do a prefetch, obviously.
     */
    if (tdc) {
        ReleaseReadLock(&tdc->lock);
#if !defined(AFS_VM_RDWR_ENV)
        /* try to queue prefetch, if needed */
        if (!noLock) {
            if (!(tdc->mflags & DFNextStarted))
                afs_PrefetchChunk(avc, tdc, acred, &treq);
        }
#endif
        afs_PutDCache(tdc);
    }
    if (!noLock)
        ReleaseReadLock(&avc->lock);

#ifdef AFS_DARWIN80_ENV
    if (tuiop)
        uio_free(tuiop);
#else
    osi_FreeSmallSpace(tvec);
#endif
    AFS_DISCON_UNLOCK();
    error = afs_CheckCode(error, &treq, 13);
    return error;
}
Exemplo n.º 22
0
afs_rmdir(OSI_VC_DECL(adp), char *aname, afs_ucred_t *acred)
#endif
{
    struct vrequest treq;
    register struct dcache *tdc;
    register struct vcache *tvc = NULL;
    register afs_int32 code;
    register struct afs_conn *tc;
    afs_size_t offset, len;
    struct AFSFetchStatus OutDirStatus;
    struct AFSVolSync tsync;
    struct afs_fakestat_state fakestate;
    XSTATS_DECLS;
    OSI_VC_CONVERT(adp);

    AFS_STATCNT(afs_rmdir);

    afs_Trace2(afs_iclSetp, CM_TRACE_RMDIR, ICL_TYPE_POINTER, adp,
	       ICL_TYPE_STRING, aname);

    if ((code = afs_InitReq(&treq, acred)))
	goto done2;
    afs_InitFakeStat(&fakestate);

    if (strlen(aname) > AFSNAMEMAX) {
	code = ENAMETOOLONG;
	goto done;
    }

    AFS_DISCON_LOCK();

    code = afs_EvalFakeStat(&adp, &fakestate, &treq);
    if (code)
	goto done;

    code = afs_VerifyVCache(adp, &treq);
    if (code)
	goto done;

    /** If the volume is read-only, return error without making an RPC to the
      * fileserver
      */
    if (adp->f.states & CRO) {
	code = EROFS;
	goto done;
    }

   if (AFS_IS_DISCONNECTED && !AFS_IS_DISCON_RW) {
    	/* Disconnected read only mode. */
        code = ENETDOWN;
        goto done;
    }

    tdc = afs_GetDCache(adp, (afs_size_t) 0, &treq, &offset, &len, 1);	/* test for error below */
    ObtainWriteLock(&adp->lock, 154);
    if (tdc)
	ObtainSharedLock(&tdc->lock, 633);
    if (tdc && (adp->f.states & CForeign)) {
	struct VenusFid unlinkFid;

	unlinkFid.Fid.Vnode = 0;
	code = afs_dir_Lookup(tdc, aname, &unlinkFid.Fid);
	if (code == 0) {
	    afs_int32 cached = 0;

	    unlinkFid.Cell = adp->f.fid.Cell;
	    unlinkFid.Fid.Volume = adp->f.fid.Fid.Volume;
	    if (unlinkFid.Fid.Unique == 0) {
		tvc =
		    afs_LookupVCache(&unlinkFid, &treq, &cached, adp, aname);
	    } else {
		ObtainReadLock(&afs_xvcache);
		tvc = afs_FindVCache(&unlinkFid, 0, 1 /* do xstats */ );
		ReleaseReadLock(&afs_xvcache);
	    }
	}
    }

    if (!AFS_IS_DISCON_RW) {
	/* Not disconnected, can connect to server. */
    	do {
	    tc = afs_Conn(&adp->f.fid, &treq, SHARED_LOCK);
	    if (tc) {
	    	XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_REMOVEDIR);
	    	RX_AFS_GUNLOCK();
	    	code =
		    RXAFS_RemoveDir(tc->id,
		    		(struct AFSFid *)&adp->f.fid.Fid,
				aname,
				&OutDirStatus,
				&tsync);
	    	RX_AFS_GLOCK();
	    	XSTATS_END_TIME;
	    } else
	    	code = -1;
    	} while (afs_Analyze
	         (tc, code, &adp->f.fid, &treq, AFS_STATS_FS_RPCIDX_REMOVEDIR,
	         SHARED_LOCK, NULL));

    	if (code) {
	    if (tdc) {
	    	ReleaseSharedLock(&tdc->lock);
	    	afs_PutDCache(tdc);
	    }

	    if (code < 0) {
	    	ObtainWriteLock(&afs_xcbhash, 491);
	    	afs_DequeueCallback(adp);
	    	adp->f.states &= ~CStatd;
	    	ReleaseWriteLock(&afs_xcbhash);
	    	osi_dnlc_purgedp(adp);
	    }
	    ReleaseWriteLock(&adp->lock);
	    goto done;
    	}

    	/* here if rpc worked; update the in-core link count */
    	adp->f.m.LinkCount = OutDirStatus.LinkCount;

    } else {
#if defined(AFS_DISCON_ENV)
    	/* Disconnected. */

	if (!tdc) {
	    ReleaseWriteLock(&adp->lock);
	    /* printf("afs_rmdir: No local dcache!\n"); */
	    code = ENETDOWN;
	    goto done;
	}
	
	if (!tvc) {
	    /* Find the vcache. */
	    struct VenusFid tfid;

	    tfid.Cell = adp->f.fid.Cell;
	    tfid.Fid.Volume = adp->f.fid.Fid.Volume;
	    code = afs_dir_Lookup(tdc, aname, &tfid.Fid);

	    ObtainSharedLock(&afs_xvcache, 764);
	    tvc = afs_FindVCache(&tfid, 0, 1 /* do xstats */ );
	    ReleaseSharedLock(&afs_xvcache);
	    
	    if (!tvc) {
		/* printf("afs_rmdir: Can't find dir's vcache!\n"); */
		ReleaseSharedLock(&tdc->lock);
	        afs_PutDCache(tdc);	/* drop ref count */
    	        ReleaseWriteLock(&adp->lock);
		code = ENETDOWN;
	        goto done;
	    }
	}

	if (tvc->f.m.LinkCount > 2) {
	    /* This dir contains more than . and .., thus it can't be
	     * deleted.
	     */
	    ReleaseSharedLock(&tdc->lock);
	    afs_PutDCache(tdc);
	    afs_PutVCache(tvc);
	    ReleaseWriteLock(&adp->lock);
	    code = ENOTEMPTY;
	    goto done;
	}

    	/* Make a shadow copy of the parent dir (if not done already).
	 * If we were created locally, then we don't need to have a shadow
	 * directory (as there's no server state to remember)
	 */
	if (!adp->f.shadow.vnode && !(adp->f.ddirty_flags & VDisconCreate)) {
	    afs_MakeShadowDir(adp, tdc);
	}

	adp->f.m.LinkCount--;
#endif				/* #ifdef AFS_DISCON_ENV */
    }				/* if (!AFS_IS_DISCON_RW) */

    if (tdc)
	UpgradeSToWLock(&tdc->lock, 634);
    if (AFS_IS_DISCON_RW || afs_LocalHero(adp, tdc, &OutDirStatus, 1)) {
	/* we can do it locally */
	code = afs_dir_Delete(tdc, aname);
	if (code) {
	    ZapDCE(tdc);	/* surprise error -- invalid value */
	    DZap(tdc);
	}
    }
    if (tdc) {
	ReleaseWriteLock(&tdc->lock);
	afs_PutDCache(tdc);	/* drop ref count */
    }

    if (tvc)
	osi_dnlc_purgedp(tvc);	/* get rid of any entries for this directory */
    else
	osi_dnlc_remove(adp, aname, 0);

    if (tvc) {
	ObtainWriteLock(&tvc->lock, 155);
	tvc->f.states &= ~CUnique;	/* For the dfs xlator */
#if defined(AFS_DISCON_ENV)
	if (AFS_IS_DISCON_RW) {
	    if (tvc->f.ddirty_flags & VDisconCreate) {
		/* If we we were created whilst disconnected, removal doesn't
		 * need to get logged. Just go away gracefully */
		afs_DisconRemoveDirty(tvc);
	    } else {
		afs_DisconAddDirty(tvc, VDisconRemove, 1);
	    }
	}
#endif
	ReleaseWriteLock(&tvc->lock);
	afs_PutVCache(tvc);
    }
    ReleaseWriteLock(&adp->lock);
    /* don't worry about link count since dirs can not be hardlinked */
    code = 0;

  done:
    AFS_DISCON_UNLOCK();
    afs_PutFakeStat(&fakestate);
    code = afs_CheckCode(code, &treq, 27);
  done2:
    return code;
}
Exemplo n.º 23
0
/* 
 * Given a FID, obtain or construct a dentry, or return an error.
 * This should be called with the BKL and AFS_GLOCK held.
 */
static struct dentry *get_dentry_from_fid(cred_t *credp, struct VenusFid *afid)
{
    struct vrequest treq;
    struct vcache *vcp;
    struct vattr vattr;
    struct inode *ip;
    struct dentry *dp;
    afs_int32 code;

    code = afs_InitReq(&treq, credp);
    if (code) {
#ifdef OSI_EXPORT_DEBUG
	printk("afs: get_dentry_from_fid(0x%08x/%d/%d.%d): afs_InitReq: %d\n",
	       afid->Cell, afid->Fid.Volume, afid->Fid.Vnode, afid->Fid.Unique,
	       code);
#endif
	return ERR_PTR(-afs_CheckCode(code, &treq, 101));
    }
    vcp = afs_GetVCache(afid, &treq, NULL, NULL);
    if (vcp == NULL) {
#ifdef OSI_EXPORT_DEBUG
	printk("afs: get_dentry_from_fid(0x%08x/%d/%d.%d): no vcache\n",
	       afid->Cell, afid->Fid.Volume, afid->Fid.Vnode, afid->Fid.Unique);
#endif
	return NULL;
    }

    /* 
     * Now, it might be that we just caused a directory vnode to
     * spring into existence, in which case its parent FID is unset.
     * We need to do something about that, but only because we care
     * in our own get_parent(), below -- the common code never looks
     * at parentVnode on directories, except for VIOCGETVCXSTATUS.
     * So, if this fails, we don't really care very much.
     */
    if (vType(vcp) == VDIR && vcp->mvstat != 2 && !vcp->f.parent.vnode)
	update_dir_parent(&treq, vcp);

    /*
     * If this is a volume root directory and fakestat is enabled,
     * we might need to replace the directory by a mount point.
     */
    code = UnEvalFakeStat(&treq, &vcp);
    if (code) {
#ifdef OSI_EXPORT_DEBUG
	printk("afs: get_dentry_from_fid(0x%08x/%d/%d.%d): UnEvalFakeStat: %d\n",
	       afid->Cell, afid->Fid.Volume, afid->Fid.Vnode, afid->Fid.Unique,
	       code);
#endif
	afs_PutVCache(vcp);
	return ERR_PTR(-afs_CheckCode(code, &treq, 101));
    }

    ip = AFSTOV(vcp);
    afs_getattr(vcp, &vattr, credp);
    afs_fill_inode(ip, &vattr);

    /* d_alloc_anon might block, so we shouldn't hold the glock */
    AFS_GUNLOCK();
    dp = d_alloc_anon(ip);
    AFS_GLOCK();

    if (!dp) {
	iput(ip);
#ifdef OSI_EXPORT_DEBUG
	printk("afs: get_dentry_from_fid(0x%08x/%d/%d.%d): out of memory\n",
	       afid->Cell, afid->Fid.Volume, afid->Fid.Vnode, afid->Fid.Unique);
#endif
	return ERR_PTR(-ENOMEM);
    }

    dp->d_op = &afs_dentry_operations;
    return dp;
}
Exemplo n.º 24
0
int afs_lockctl(struct vcache * avc, struct AFS_FLOCK * af, int acmd,
		afs_ucred_t * acred)
#endif
{
    struct vrequest treq;
    afs_int32 code;
    struct afs_fakestat_state fakestate;

    AFS_STATCNT(afs_lockctl);
    if ((code = afs_InitReq(&treq, acred)))
	return code;
    afs_InitFakeStat(&fakestate);

    AFS_DISCON_LOCK();

    code = afs_EvalFakeStat(&avc, &fakestate, &treq);
    if (code) {
	goto done;
    }
#if defined(AFS_SGI_ENV)
    if ((acmd == F_GETLK) || (acmd == F_RGETLK)) {
#else
    if (acmd == F_GETLK) {
#endif
	if (af->l_type == F_UNLCK) {
	    code = 0;
	    goto done;
	}
	code = HandleGetLock(avc, af, &treq, clid);
	code = afs_CheckCode(code, &treq, 2);	/* defeat buggy AIX optimz */
	goto done;
    } else if ((acmd == F_SETLK) || (acmd == F_SETLKW)
#if defined(AFS_SGI_ENV)
	       || (acmd == F_RSETLK) || (acmd == F_RSETLKW)) {
#else
	) {
#endif

    if ((avc->f.states & CRO)) {
	/* for RO volumes, don't do anything for locks; the fileserver doesn't
	 * even track them. A write lock should not be possible, though. */
	if (af->l_type == F_WRLCK) {
	    code = EBADF;
	} else {
	    code = 0;
	}
	goto done;
    }

    /* Java VMs ask for l_len=(long)-1 regardless of OS/CPU */
    if ((sizeof(af->l_len) == 8) && (af->l_len == 0x7fffffffffffffffLL))
	af->l_len = 0;
    /* next line makes byte range locks always succeed,
     * even when they should block */
    if (af->l_whence != 0 || af->l_start != 0 || af->l_len != 0) {
	DoLockWarning(acred);
	code = 0;
	goto done;
    }
    /* otherwise we can turn this into a whole-file flock */
    if (af->l_type == F_RDLCK)
	code = LOCK_SH;
    else if (af->l_type == F_WRLCK)
	code = LOCK_EX;
    else if (af->l_type == F_UNLCK)
	code = LOCK_UN;
    else {
	code = EINVAL;		/* unknown lock type */
	goto done;
    }
    if (((acmd == F_SETLK)
#if 	defined(AFS_SGI_ENV)
	 || (acmd == F_RSETLK)
#endif
	) && code != LOCK_UN)
	code |= LOCK_NB;	/* non-blocking, s.v.p. */
#if defined(AFS_DARWIN_ENV)
    code = HandleFlock(avc, code, &treq, clid, 0 /*!onlymine */ );
#elif defined(AFS_SGI_ENV)
    AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
    code = HandleFlock(avc, code, &treq, clid, 0 /*!onlymine */ );
    AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
#else
    code = HandleFlock(avc, code, &treq, 0, 0 /*!onlymine */ );
#endif
    code = afs_CheckCode(code, &treq, 3);	/* defeat AIX -O bug */
    goto done;
    }
    code = EINVAL;
done:
    afs_PutFakeStat(&fakestate);
    AFS_DISCON_UNLOCK();
    return code;
}


/*
 * Get a description of the first lock which would
 * block the lock specified.  If the specified lock
 * would succeed, fill in the lock structure with 'F_UNLCK'.
 *
 * To do that, we have to ask the server for the lock
 * count if:
 *    1. The file is not locked by this machine.
 *    2. Asking for write lock, and only the current
 *       PID has the file read locked.
 */
static int
HandleGetLock(struct vcache *avc, struct AFS_FLOCK *af,
	      struct vrequest *areq, int clid)
{
    afs_int32 code;
    struct AFS_FLOCK flock;

    lockIdSet(&flock, NULL, clid);

    ObtainWriteLock(&avc->lock, 122);
    if (avc->flockCount == 0) {
	/*
	 * We don't know ourselves, so ask the server. Unfortunately, we
	 * don't know the pid.  Not even the server knows the pid.  Besides,
	 * the process with the lock is on another machine
	 */
	code = GetFlockCount(avc, areq);
	if (code == 0 || (af->l_type == F_RDLCK && code > 0)) {
	    af->l_type = F_UNLCK;
	    goto unlck_leave;
	}
	if (code > 0)
	    af->l_type = F_RDLCK;
	else
	    af->l_type = F_WRLCK;

	af->l_pid = 0;
#if defined(AFS_HAVE_FLOCK_SYSID)
	af->l_sysid = 0;
#endif
	goto done;
    }

    if (af->l_type == F_RDLCK) {
	/*
	 * We want a read lock.  If there are only
	 * read locks, or we are the one with the
	 * write lock, say it is unlocked.
	 */
	if (avc->flockCount > 0 ||	/* only read locks */
	    !lockIdcmp2(&flock, avc, NULL, 1, clid)) {
	    af->l_type = F_UNLCK;
	    goto unlck_leave;
	}

	/* one write lock, but who? */
	af->l_type = F_WRLCK;	/* not us, so lock would block */
	if (avc->slocks) {	/* we know who, so tell */
	    af->l_pid = avc->slocks->pid;
#if defined(AFS_HAVE_FLOCK_SYSID)
	    af->l_sysid = avc->slocks->sysid;
#endif
	} else {
	    af->l_pid = 0;	/* XXX can't happen?? */
#if defined(AFS_HAVE_FLOCK_SYSID)
	    af->l_sysid = 0;
#endif
	}
	goto done;
    }

    /*
     * Ok, we want a write lock.  If there is a write lock
     * already, and it is not this process, we fail.
     */
    if (avc->flockCount < 0) {
	if (lockIdcmp2(&flock, avc, NULL, 1, clid)) {
	    af->l_type = F_WRLCK;
	    if (avc->slocks) {
		af->l_pid = avc->slocks->pid;
#if defined(AFS_HAVE_FLOCK_SYSID)
		af->l_sysid = avc->slocks->sysid;
#endif
	    } else {
		af->l_pid = 0;	/* XXX can't happen?? */
#if defined(AFS_HAVE_FLOCK_SYSID)
		af->l_sysid = 0;
#endif
	    }
	    goto done;
	}
	/* we are the one with the write lock */
	af->l_type = F_UNLCK;
	goto unlck_leave;
    }

    /*
     * Want a write lock, and we know there are read locks.
     * If there is more than one, or it isn't us, we cannot lock.
     */
    if ((avc->flockCount > 1)
	|| lockIdcmp2(&flock, avc, NULL, 1, clid)) {
	struct SimpleLocks *slp;

	af->l_type = F_RDLCK;
	af->l_pid = 0;
#if defined(AFS_HAVE_FLOCK_SYSID)
	af->l_sysid = 0;
#endif
	/* find a pid that isn't our own */
	for (slp = avc->slocks; slp; slp = slp->next) {
	    if (lockIdcmp2(&flock, NULL, slp, 1, clid)) {
		af->l_pid = slp->pid;
#if defined(AFS_HAVE_FLOCK_SYSID)
		af->l_sysid = avc->slocks->sysid;
#endif
		break;
	    }
	}
	goto done;
    }

    /*
     * Ok, we want a write lock.  If there is a write lock
     * already, and it is not this process, we fail.
     */
    if (avc->flockCount < 0) {
	if (lockIdcmp2(&flock, avc, NULL, 1, clid)) {
	    af->l_type = F_WRLCK;
	    if (avc->slocks) {
		af->l_pid = avc->slocks->pid;
#if defined(AFS_HAVE_FLOCK_SYSID)
		af->l_sysid = avc->slocks->sysid;
#endif
	    } else {
		af->l_pid = 0;	/* XXX can't happen?? */
#if defined(AFS_HAVE_FLOCK_SYSID)
		af->l_sysid = 0;
#endif
	    }
	    goto done;
	}
	/* we are the one with the write lock */
	af->l_type = F_UNLCK;
	goto unlck_leave;
    }

    /*
     * Want a write lock, and we know there are read locks.
     * If there is more than one, or it isn't us, we cannot lock.
     */
    if ((avc->flockCount > 1)
	|| lockIdcmp2(&flock, avc, NULL, 1, clid)) {
	struct SimpleLocks *slp;
	af->l_type = F_RDLCK;
	af->l_pid = 0;
#if defined(AFS_HAVE_FLOCK_SYSID)
	af->l_sysid = 0;
#endif
	/* find a pid that isn't our own */
	for (slp = avc->slocks; slp; slp = slp->next) {
	    if (lockIdcmp2(&flock, NULL, slp, 1, clid)) {
		af->l_pid = slp->pid;
#if defined(AFS_HAVE_FLOCK_SYSID)
		af->l_sysid = avc->slocks->sysid;
#endif
		break;
	    }
	}
	goto done;
    }

    /*
     * Want a write lock, and there is just one read lock, and it
     * is this process with a read lock.  Ask the server if there
     * are any more processes with the file locked.
     */
    code = GetFlockCount(avc, areq);
    if (code == 0 || code == 1) {
	af->l_type = F_UNLCK;
	goto unlck_leave;
    }
    if (code > 0)
	af->l_type = F_RDLCK;
    else
	af->l_type = F_WRLCK;
    af->l_pid = 0;
#if defined(AFS_HAVE_FLOCK_SYSID)
    af->l_sysid = 0;
#endif

  done:
    af->l_whence = 0;
    af->l_start = 0;
    af->l_len = 0;		/* to end of file */

  unlck_leave:
    ReleaseWriteLock(&avc->lock);
    return 0;
}

/* Get the 'flock' count from the server.  This comes back in a 'spare'
 * field from a GetStatus RPC.  If we have any problems with the RPC,
 * we lie and say the file is unlocked.  If we ask any 'old' fileservers,
 * the spare field will be a zero, saying the file is unlocked.  This is
 * OK, as a further 'lock' request will do the right thing.
 */
static int
GetFlockCount(struct vcache *avc, struct vrequest *areq)
{
    struct afs_conn *tc;
    afs_int32 code;
    struct AFSFetchStatus OutStatus;
    struct AFSCallBack CallBack;
    struct AFSVolSync tsync;
    struct rx_connection *rxconn;
    int temp;
    XSTATS_DECLS;
    temp = areq->flags & O_NONBLOCK;
    areq->flags |= O_NONBLOCK;

    /* If we're disconnected, lie and say that we've got no locks. Ick */
    if (AFS_IS_DISCONNECTED)
        return 0;
        
    do {
	tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn);
	if (tc) {
	    XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHSTATUS);
	    RX_AFS_GUNLOCK();
	    code =
		RXAFS_FetchStatus(rxconn, (struct AFSFid *)&avc->f.fid.Fid,
				  &OutStatus, &CallBack, &tsync);
	    RX_AFS_GLOCK();
	    XSTATS_END_TIME;
	} else
	    code = -1;
    } while (afs_Analyze
	     (tc, rxconn, code, &avc->f.fid, areq, AFS_STATS_FS_RPCIDX_FETCHSTATUS,
	      SHARED_LOCK, NULL));

    if (temp)
	areq->flags &= ~O_NONBLOCK;

    if (code) {
	return (0);		/* failed, say it is 'unlocked' */
    } else {
	return ((int)OutStatus.lockCount);
    }
}
Exemplo n.º 25
0
int
afs_CheckRootVolume(void)
{
    char rootVolName[32];
    struct volume *tvp = NULL;
    int usingDynroot = afs_GetDynrootEnable();
    int localcell;

    AFS_STATCNT(afs_CheckRootVolume);
    if (*afs_rootVolumeName == 0) {
	strcpy(rootVolName, "root.afs");
    } else {
	strcpy(rootVolName, afs_rootVolumeName);
    }

    if (usingDynroot) {
	afs_GetDynrootFid(&afs_rootFid);
	tvp = afs_GetVolume(&afs_rootFid, NULL, READ_LOCK);
    } else {
	struct cell *lc = afs_GetPrimaryCell(READ_LOCK);

	if (!lc)
	    return ENOENT;
	localcell = lc->cellNum;
	afs_PutCell(lc, READ_LOCK);
	tvp = afs_GetVolumeByName(rootVolName, localcell, 1, NULL, READ_LOCK);
	if (!tvp) {
	    char buf[128];
	    int len = strlen(rootVolName);

	    if ((len < 9) || strcmp(&rootVolName[len - 9], ".readonly")) {
		strcpy(buf, rootVolName);
		afs_strcat(buf, ".readonly");
		tvp = afs_GetVolumeByName(buf, localcell, 1, NULL, READ_LOCK);
	    }
	}
	if (tvp) {
	    int volid = (tvp->roVol ? tvp->roVol : tvp->volume);
	    afs_rootFid.Cell = localcell;
	    if (afs_rootFid.Fid.Volume && afs_rootFid.Fid.Volume != volid
		&& afs_globalVp) {
		/* If we had a root fid before and it changed location we reset
		 * the afs_globalVp so that it will be reevaluated.
		 * Just decrement the reference count. This only occurs during
		 * initial cell setup and can panic the machine if we set the
		 * count to zero and fs checkv is executed when the current
		 * directory is /afs.
		 */
#ifdef AFS_LINUX20_ENV
		{
		    struct vrequest treq;
		    struct vattr vattr;
		    cred_t *credp;
		    struct dentry *dp;
		    struct vcache *vcp;

		    afs_rootFid.Fid.Volume = volid;
		    afs_rootFid.Fid.Vnode = 1;
		    afs_rootFid.Fid.Unique = 1;

		    credp = crref();
		    if (afs_InitReq(&treq, credp))
			goto out;
		    vcp = afs_GetVCache(&afs_rootFid, &treq, NULL, NULL);
		    if (!vcp)
			goto out;
		    afs_getattr(vcp, &vattr, credp);
		    afs_fill_inode(AFSTOV(vcp), &vattr);

		    dp = d_find_alias(AFSTOV(afs_globalVp));

#if defined(AFS_LINUX24_ENV)
#if defined(HAVE_DCACHE_LOCK)
		    spin_lock(&dcache_lock);
#else
		    spin_lock(&AFSTOV(vcp)->i_lock);
#endif
#if defined(AFS_LINUX26_ENV)
		    spin_lock(&dp->d_lock);
#endif
#endif
		    list_del_init(&dp->d_alias);
		    list_add(&dp->d_alias, &(AFSTOV(vcp)->i_dentry));
		    dp->d_inode = AFSTOV(vcp);
#if defined(AFS_LINUX24_ENV)
#if defined(AFS_LINUX26_ENV)
		    spin_unlock(&dp->d_lock);
#endif
#if defined(HAVE_DCACHE_LOCK)
		    spin_unlock(&dcache_lock);
#else
		    spin_unlock(&AFSTOV(vcp)->i_lock);
#endif
#endif
		    dput(dp);

		    AFS_FAST_RELE(afs_globalVp);
		    afs_globalVp = vcp;
		out:
		    crfree(credp);
		}
#else
#ifdef AFS_DARWIN80_ENV
		afs_PutVCache(afs_globalVp);
#else
		AFS_FAST_RELE(afs_globalVp);
#endif
		afs_globalVp = 0;
#endif
	    }
	    afs_rootFid.Fid.Volume = volid;
	    afs_rootFid.Fid.Vnode = 1;
	    afs_rootFid.Fid.Unique = 1;
	}
    }
    if (tvp) {
	afs_initState = 300;	/* won */
	afs_osi_Wakeup(&afs_initState);
	afs_PutVolume(tvp, READ_LOCK);
    }
    if (afs_rootFid.Fid.Volume)
	return 0;
    else
	return ENOENT;
}
Exemplo n.º 26
0
/*
 * This is almost exactly like the PFlush() routine in afs_pioctl.c,
 * but that routine is static.  We are about to change a file from
 * normal caching to bypass it's caching.  Therefore, we want to
 * free up any cache space in use by the file, and throw out any
 * existing VM pages for the file.  We keep track of the number of
 * times we go back and forth from caching to bypass.
 */
void
afs_TransitionToBypass(struct vcache *avc,
		       afs_ucred_t *acred, int aflags)
{

    afs_int32 code;
    struct vrequest treq;
    int setDesire = 0;
    int setManual = 0;

    if (!avc)
	return;

    if (aflags & TRANSChangeDesiredBit)
	setDesire = 1;
    if (aflags & TRANSSetManualBit)
	setManual = 1;

#ifdef AFS_BOZONLOCK_ENV
    afs_BozonLock(&avc->pvnLock, avc);	/* Since afs_TryToSmush will do a pvn_vptrunc */
#else
    AFS_GLOCK();
#endif

    ObtainWriteLock(&avc->lock, 925);
    /*
     * Someone may have beat us to doing the transition - we had no lock
     * when we checked the flag earlier.  No cause to panic, just return.
     */
    if (avc->cachingStates & FCSBypass)
	goto done;

    /* If we never cached this, just change state */
    if (setDesire && (!(avc->cachingStates & FCSBypass))) {
	avc->cachingStates |= FCSBypass;
	goto done;
    }

    /* cg2v, try to store any chunks not written 20071204 */
    if (avc->execsOrWriters > 0) {
	code = afs_InitReq(&treq, acred);
	if (!code)
	    code = afs_StoreAllSegments(avc, &treq, AFS_SYNC | AFS_LASTSTORE);
    }

#if 0
    /* also cg2v, don't dequeue the callback */
    ObtainWriteLock(&afs_xcbhash, 956);
    afs_DequeueCallback(avc);
    ReleaseWriteLock(&afs_xcbhash);
#endif
    avc->f.states &= ~(CStatd | CDirty);      /* next reference will re-stat */
    /* now find the disk cache entries */
    afs_TryToSmush(avc, acred, 1);
    osi_dnlc_purgedp(avc);
    if (avc->linkData && !(avc->f.states & CCore)) {
	afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
	avc->linkData = NULL;
    }

    avc->cachingStates |= FCSBypass;    /* Set the bypass flag */
    if(setDesire)
	avc->cachingStates |= FCSDesireBypass;
    if(setManual)
	avc->cachingStates |= FCSManuallySet;
    avc->cachingTransitions++;

done:
    ReleaseWriteLock(&avc->lock);
#ifdef AFS_BOZONLOCK_ENV
    afs_BozonUnlock(&avc->pvnLock, avc);
#else
    AFS_GUNLOCK();
#endif
}
Exemplo n.º 27
0
/* don't set CDirty in here because RPC is called synchronously */
int 
afs_symlink(OSI_VC_DECL(adp), char *aname, struct vattr *attrs, 
	    char *atargetName, struct vcache **tvcp, afs_ucred_t *acred)
{
    afs_uint32 now = 0;
    struct vrequest treq;
    afs_int32 code = 0;
    struct afs_conn *tc;
    struct VenusFid newFid;
    struct dcache *tdc;
    afs_size_t offset, len;
    afs_int32 alen;
    struct server *hostp = 0;
    struct vcache *tvc;
    struct AFSStoreStatus InStatus;
    struct AFSFetchStatus OutFidStatus, OutDirStatus;
    struct AFSCallBack CallBack;
    struct AFSVolSync tsync;
    struct volume *volp = 0;
    struct afs_fakestat_state fakestate;
    struct rx_connection *rxconn;
    XSTATS_DECLS;
    OSI_VC_CONVERT(adp);

    AFS_STATCNT(afs_symlink);
    afs_Trace2(afs_iclSetp, CM_TRACE_SYMLINK, ICL_TYPE_POINTER, adp,
	       ICL_TYPE_STRING, aname);

    if ((code = afs_InitReq(&treq, acred)))
	goto done2;

    afs_InitFakeStat(&fakestate);

    AFS_DISCON_LOCK();
    
    code = afs_EvalFakeStat(&adp, &fakestate, &treq);
    if (code)
	goto done;

    if (strlen(aname) > AFSNAMEMAX || strlen(atargetName) > AFSPATHMAX) {
	code = ENAMETOOLONG;
	goto done;
    }

    if (afs_IsDynroot(adp)) {
	code = afs_DynrootVOPSymlink(adp, acred, aname, atargetName);
	goto done;
    }
    if (afs_IsDynrootMount(adp)) {
	code = EROFS;
	goto done;
    }

    code = afs_VerifyVCache(adp, &treq);
    if (code) {
	code = afs_CheckCode(code, &treq, 30);
	goto done;
    }

    /** If the volume is read-only, return error without making an RPC to the
      * fileserver
      */
    if (adp->f.states & CRO) {
	code = EROFS;
	goto done;
    }

    if (AFS_IS_DISCONNECTED && !AFS_IS_DISCON_RW) {
        code = ENETDOWN;
        goto done;
    }
    
    InStatus.Mask = AFS_SETMODTIME | AFS_SETMODE;
    InStatus.ClientModTime = osi_Time();
    alen = strlen(atargetName);	/* we want it to include the null */
    if ( (*atargetName == '#' || *atargetName == '%') && alen > 1 && atargetName[alen-1] == '.') {
	InStatus.UnixModeBits = 0644;	/* mt pt: null from "." at end */
	if (alen == 1)
	    alen++;		/* Empty string */
    } else {
	InStatus.UnixModeBits = 0755;
	alen++;			/* add in the null */
    }
    tdc = afs_GetDCache(adp, (afs_size_t) 0, &treq, &offset, &len, 1);
    volp = afs_FindVolume(&adp->f.fid, READ_LOCK);	/*parent is also in same vol */
    ObtainWriteLock(&adp->lock, 156);
    if (tdc)
	ObtainWriteLock(&tdc->lock, 636);
    /* No further locks: if the SymLink succeeds, it does not matter what happens
     * to our local copy of the directory. If somebody tampers with it in the meantime,
     * the copy will be invalidated */
    if (!AFS_IS_DISCON_RW) {
	do {
	    tc = afs_Conn(&adp->f.fid, &treq, SHARED_LOCK, &rxconn);
	    if (tc) {
		hostp = tc->parent->srvr->server;
		XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_SYMLINK);
		if (adp->f.states & CForeign) {
		    now = osi_Time();
		    RX_AFS_GUNLOCK();
		    code = 
			RXAFS_DFSSymlink(rxconn,
					 (struct AFSFid *)&adp->f.fid.Fid,
					 aname, atargetName, &InStatus,
					 (struct AFSFid *)&newFid.Fid,
					 &OutFidStatus, &OutDirStatus, 
					 &CallBack, &tsync);
		    RX_AFS_GLOCK();
		} else {
		    RX_AFS_GUNLOCK();
		    code =
			RXAFS_Symlink(rxconn, (struct AFSFid *)&adp->f.fid.Fid,
				      aname, atargetName, &InStatus,
				      (struct AFSFid *)&newFid.Fid, 
				      &OutFidStatus, &OutDirStatus, &tsync);
		    RX_AFS_GLOCK();
	    	}
		XSTATS_END_TIME;
	    } else
		code = -1;
	} while (afs_Analyze
		    (tc, rxconn, code, &adp->f.fid, &treq, AFS_STATS_FS_RPCIDX_SYMLINK,
		     SHARED_LOCK, NULL));
    } else {
	newFid.Cell = adp->f.fid.Cell;
	newFid.Fid.Volume = adp->f.fid.Fid.Volume;
	afs_GenFakeFid(&newFid, VREG, 0);
    }

    ObtainWriteLock(&afs_xvcache, 40);
    if (code) {
	if (code < 0) {
	    ObtainWriteLock(&afs_xcbhash, 499);
	    afs_DequeueCallback(adp);
	    adp->f.states &= ~CStatd;
	    ReleaseWriteLock(&afs_xcbhash);
	    osi_dnlc_purgedp(adp);
	}
	ReleaseWriteLock(&adp->lock);
	ReleaseWriteLock(&afs_xvcache);
	if (tdc) {
	    ReleaseWriteLock(&tdc->lock);
	    afs_PutDCache(tdc);
	}
	goto done;
    }
    /* otherwise, we should see if we can make the change to the dir locally */
    if (AFS_IS_DISCON_RW || afs_LocalHero(adp, tdc, &OutDirStatus, 1)) {
	/* we can do it locally */
	ObtainWriteLock(&afs_xdcache, 293);
	/* If the following fails because the name has been created in the meantime, the
	 * directory is out-of-date - the file server knows best! */
	code = afs_dir_Create(tdc, aname, &newFid.Fid);
	ReleaseWriteLock(&afs_xdcache);
	if (code && !AFS_IS_DISCON_RW) {
	    ZapDCE(tdc);	/* surprise error -- use invalid value */
	    DZap(tdc);
	}
    }
    if (tdc) {
	ReleaseWriteLock(&tdc->lock);
	afs_PutDCache(tdc);
    }
    newFid.Cell = adp->f.fid.Cell;
    newFid.Fid.Volume = adp->f.fid.Fid.Volume;
    ReleaseWriteLock(&adp->lock);

    /* now we're done with parent dir, create the link's entry.  Note that
     * no one can get a pointer to the new cache entry until we release 
     * the xvcache lock. */
    tvc = afs_NewVCache(&newFid, hostp);
    if (!tvc)
    {
	code = -2;
	ReleaseWriteLock(&afs_xvcache);
	goto done;
    }
    ObtainWriteLock(&tvc->lock, 157);
    ObtainWriteLock(&afs_xcbhash, 500);
    tvc->f.states |= CStatd;	/* have valid info */
    tvc->f.states &= ~CBulkFetching;

    if (adp->f.states & CForeign) {
	tvc->f.states |= CForeign;
	/* We don't have to worry about losing the callback since we're doing it 
	 * under the afs_xvcache lock actually, afs_NewVCache may drop the 
	 * afs_xvcache lock, if it calls afs_FlushVCache */
	tvc->cbExpires = CallBack.ExpirationTime + now;
	afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), volp);
    } else {
	tvc->cbExpires = 0x7fffffff;	/* never expires, they can't change */
	/* since it never expires, we don't have to queue the callback */
    }
    ReleaseWriteLock(&afs_xcbhash);

    if (AFS_IS_DISCON_RW) {
	attrs->va_mode = InStatus.UnixModeBits;
	afs_GenDisconStatus(adp, tvc, &newFid, attrs, &treq, VLNK);
	code = afs_DisconCreateSymlink(tvc, atargetName, &treq);
	if (code) {
	    /* XXX - When this goes wrong, we need to tidy up the changes we made to
	     * the parent, and get rid of the vcache we just created */
	    ReleaseWriteLock(&tvc->lock);
	    ReleaseWriteLock(&afs_xvcache);
	    afs_PutVCache(tvc);
	    goto done;
	}
	afs_DisconAddDirty(tvc, VDisconCreate, 0);
    } else {
	afs_ProcessFS(tvc, &OutFidStatus, &treq);
    }

    if (!tvc->linkData) {
	tvc->linkData = afs_osi_Alloc(alen);
	osi_Assert(tvc->linkData != NULL);
	strncpy(tvc->linkData, atargetName, alen - 1);
	tvc->linkData[alen - 1] = 0;
    }
    ReleaseWriteLock(&tvc->lock);
    ReleaseWriteLock(&afs_xvcache);
    if (tvcp)
    	*tvcp = tvc;
    else
	afs_PutVCache(tvc);
    code = 0;
  done:
    afs_PutFakeStat(&fakestate);
    if (volp)
	afs_PutVolume(volp, READ_LOCK);
    AFS_DISCON_UNLOCK();
    code = afs_CheckCode(code, &treq, 31);
  done2:
    return code;
}
Exemplo n.º 28
0
/**
 * @param aname Volume name.
 * @param acell Cell id.
 * @param agood
 * @param areq Request type.
 * @param locktype Type of lock to be used.
 * @return Volume or NULL if failure.
 */
static struct volume *
afs_NewVolumeByName(char *aname, afs_int32 acell, int agood,
		    struct vrequest *areq, afs_int32 locktype)
{
    afs_int32 code, type = 0;
    struct volume *tv, *tv1;
    struct vldbentry *tve;
    struct nvldbentry *ntve;
    struct uvldbentry *utve;
    struct cell *tcell;
    char *tbuffer, *ve;
    struct afs_conn *tconn;
    struct vrequest treq;
    struct rx_connection *rxconn;

    if (strlen(aname) > VL_MAXNAMELEN)	/* Invalid volume name */
	return NULL;

    tcell = afs_GetCell(acell, READ_LOCK);
    if (!tcell) {
	return NULL;
    }

    /* allow null request if we don't care about ENODEV/ETIMEDOUT distinction */
    if (!areq)
	areq = &treq;


    afs_Trace2(afs_iclSetp, CM_TRACE_GETVOL, ICL_TYPE_STRING, aname,
	       ICL_TYPE_POINTER, aname);
    tbuffer = osi_AllocLargeSpace(AFS_LRALLOCSIZ);
    tve = (struct vldbentry *)(tbuffer + 1024);
    ntve = (struct nvldbentry *)tve;
    utve = (struct uvldbentry *)tve;
    afs_InitReq(&treq, afs_osi_credp);	/* *must* be unauth for vldb */
    do {
	tconn =
	    afs_ConnByMHosts(tcell->cellHosts, tcell->vlport, tcell->cellNum,
			     &treq, SHARED_LOCK, 0, &rxconn);
	if (tconn) {
	    if (tconn->srvr->server->flags & SNO_LHOSTS) {
		type = 0;
		RX_AFS_GUNLOCK();
		code = VL_GetEntryByNameO(rxconn, aname, tve);
		RX_AFS_GLOCK();
	    } else if (tconn->srvr->server->flags & SYES_LHOSTS) {
		type = 1;
		RX_AFS_GUNLOCK();
		code = VL_GetEntryByNameN(rxconn, aname, ntve);
		RX_AFS_GLOCK();
	    } else {
		type = 2;
		RX_AFS_GUNLOCK();
		code = VL_GetEntryByNameU(rxconn, aname, utve);
		RX_AFS_GLOCK();
		if (!(tconn->srvr->server->flags & SVLSRV_UUID)) {
		    if (code == RXGEN_OPCODE) {
			type = 1;
			RX_AFS_GUNLOCK();
			code = VL_GetEntryByNameN(rxconn, aname, ntve);
			RX_AFS_GLOCK();
			if (code == RXGEN_OPCODE) {
			    type = 0;
			    tconn->srvr->server->flags |= SNO_LHOSTS;
			    RX_AFS_GUNLOCK();
			    code = VL_GetEntryByNameO(rxconn, aname, tve);
			    RX_AFS_GLOCK();
			} else if (!code)
			    tconn->srvr->server->flags |= SYES_LHOSTS;
		    } else if (!code)
			tconn->srvr->server->flags |= SVLSRV_UUID;
		}
		lastnvcode = code;
	    }
	} else
	    code = -1;
    } while (afs_Analyze(tconn, rxconn, code, NULL, &treq, -1,	/* no op code for this */
			 SHARED_LOCK, tcell));

    if (code) {
	/* If the client has yet to contact this cell and contact failed due
	 * to network errors, mark the VLDB servers as back up.
	 * That the client tried and failed can be determined from the
	 * fact that there was a downtime incident, but CHasVolRef is not set.
	 */
    /* RT 48959 - unclear if this should really go */
#if 0
	if (areq->networkError && !(tcell->states & CHasVolRef)) {
	    int i;
	    struct server *sp;
	    struct srvAddr *sap;
	    for (i = 0; i < AFS_MAXCELLHOSTS; i++) {
		if ((sp = tcell->cellHosts[i]) == NULL)
		    break;
		for (sap = sp->addr; sap; sap = sap->next_sa)
		    afs_MarkServerUpOrDown(sap, 0);
	    }
	}
#endif
	afs_CopyError(&treq, areq);
	osi_FreeLargeSpace(tbuffer);
	afs_PutCell(tcell, READ_LOCK);
	return NULL;
    }
    /*
     * Check to see if this cell has not yet referenced a volume.  If
     * it hasn't, it's just about to change its status, and we need to mark
     * this fact down. Note that it is remotely possible that afs_SetupVolume
     * could fail and we would still not have a volume reference.
     */
    if (!(tcell->states & CHasVolRef)) {
	tcell->states |= CHasVolRef;
	afs_stats_cmperf.numCellsContacted++;
    }
    /*First time a volume in this cell has been referenced */
    if (type == 2)
	ve = (char *)utve;
    else if (type == 1)
	ve = (char *)ntve;
    else
	ve = (char *)tve;
    tv = afs_SetupVolume(0, aname, ve, tcell, agood, type, &treq);
    if ((agood == 3) && tv && tv->backVol) {
	/*
	 * This means that very soon we'll ask for the BK volume so
	 * we'll prefetch it (well we did already.)
	 */
	tv1 =
	    afs_SetupVolume(tv->backVol, (char *)0, ve, tcell, 0, type, &treq);
	if (tv1) {
	    tv1->refCount--;
	}
    }
    if ((agood >= 2) && tv && tv->roVol) {
	/*
	 * This means that very soon we'll ask for the RO volume so
	 * we'll prefetch it (well we did already.)
	 */
	tv1 = afs_SetupVolume(tv->roVol, NULL, ve, tcell, 0, type, &treq);
	if (tv1) {
	    tv1->refCount--;
	}
    }
    osi_FreeLargeSpace(tbuffer);
    afs_PutCell(tcell, READ_LOCK);
    return tv;

}				/*afs_NewVolumeByName */
Exemplo n.º 29
0
int
afs_getattr(OSI_VC_DECL(avc), struct vattr *attrs, afs_ucred_t *acred)
#endif
{
    afs_int32 code;
    struct vrequest treq;
    struct unixuser *au;
    int inited = 0;
    OSI_VC_CONVERT(avc);

    AFS_STATCNT(afs_getattr);
    afs_Trace2(afs_iclSetp, CM_TRACE_GETATTR, ICL_TYPE_POINTER, avc,
	       ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));

    if (afs_fakestat_enable && avc->mvstat == 1) {
	struct afs_fakestat_state fakestat;

	code = afs_InitReq(&treq, acred);
	if (code)
	    return code;
	afs_InitFakeStat(&fakestat);
	code = afs_TryEvalFakeStat(&avc, &fakestat, &treq);
	if (code) {
	    afs_PutFakeStat(&fakestat);
	    return code;
	}

	code = afs_CopyOutAttrs(avc, attrs);
	afs_PutFakeStat(&fakestat);
	return code;
    }
#if defined(AFS_SUN5_ENV)
    if (flags & ATTR_HINT) {
	code = afs_CopyOutAttrs(avc, attrs);
	return code;
    }
#endif
#if defined(AFS_DARWIN_ENV) && !defined(AFS_DARWIN80_ENV)
    if (avc->f.states & CUBCinit) {
	code = afs_CopyOutAttrs(avc, attrs);
	return code;
    }
#endif

    AFS_DISCON_LOCK();

#ifdef AFS_BOZONLOCK_ENV
    afs_BozonLock(&avc->pvnLock, avc);
#endif

    if (afs_shuttingdown)
	return EIO;

    if (!(avc->f.states & CStatd)) {
	if (!(code = afs_InitReq(&treq, acred))) {
	    code = afs_VerifyVCache2(avc, &treq);
	    inited = 1;
	}
    } else
	code = 0;

#if defined(AFS_SUN5_ENV) || defined(AFS_BOZONLOCK_ENV)
    if (code == 0)
	osi_FlushPages(avc, acred);
#endif
#ifdef AFS_BOZONLOCK_ENV
    afs_BozonUnlock(&avc->pvnLock, avc);
#endif


    if (code == 0) {
	osi_FlushText(avc);	/* only needed to flush text if text locked last time */
	code = afs_CopyOutAttrs(avc, attrs);

	if (afs_nfsexporter) {
	    if (!inited) {
		if ((code = afs_InitReq(&treq, acred)))
		    return code;
		inited = 1;
	    }
	    if (AFS_NFSXLATORREQ(acred)) {
		if ((vType(avc) != VDIR)
		    && !afs_AccessOK(avc, PRSFS_READ, &treq,
				     CHECK_MODE_BITS |
				     CMB_ALLOW_EXEC_AS_READ)) {
		    return EACCES;
		}
	    }
	    if ((au = afs_FindUser(treq.uid, -1, READ_LOCK))) {
		struct afs_exporter *exporter = au->exporter;

		if (exporter && !(afs_nfsexporter->exp_states & EXP_UNIXMODE)) {
		    unsigned int ubits;
		    /*
		     *  If the remote user wishes to enforce default Unix mode semantics,
		     *  like in the nfs exporter case, we OR in the user bits
		     *  into the group and other bits. We need to do this
		     *  because there is no RFS_ACCESS call and thus nfs
		     *  clients implement nfs_access by interpreting the 
		     *  mode bits in the traditional way, which of course
		     *  loses with afs.
		     */
		    ubits = (attrs->va_mode & 0700) >> 6;
		    attrs->va_mode = attrs->va_mode | ubits | (ubits << 3);
		    /* If it's the root of AFS, replace the inode number with the
		     * inode number of the mounted on directory; otherwise this 
		     * confuses getwd()... */
#ifdef AFS_LINUX22_ENV
		    if (avc == afs_globalVp) {
			struct inode *ip = AFSTOV(avc)->i_sb->s_root->d_inode;
			attrs->va_nodeid = ip->i_ino;	/* VTOI()? */
		    }
#else
		    if (
#if defined(AFS_DARWIN_ENV)
			vnode_isvroot(AFSTOV(avc))
#elif defined(AFS_NBSD50_ENV)
			AFSTOV(avc)->v_vflag & VV_ROOT
#else
			AFSTOV(avc)->v_flag & VROOT
#endif
			) {
			struct vnode *vp = AFSTOV(avc);

#ifdef AFS_DARWIN80_ENV
			/* XXX vp = vnode_mount(vp)->mnt_vnodecovered; */
			vp = 0;
#else
			vp = vp->v_vfsp->vfs_vnodecovered;
			if (vp) {	/* Ignore weird failures */
#ifdef AFS_SGI62_ENV
			    attrs->va_nodeid = VnodeToIno(vp);
#else
			    struct inode *ip;

			    ip = (struct inode *)VTOI(vp);
			    if (ip)	/* Ignore weird failures */
				attrs->va_nodeid = ip->i_number;
#endif
			}
#endif
		    }
#endif /* AFS_LINUX22_ENV */
		}
		afs_PutUser(au, READ_LOCK);
	    }
	}