int
afs_UFSHandleLink(register struct vcache *avc, struct vrequest *areq)
{
    register struct dcache *tdc;
    register char *tp, *rbuf;
    void *tfile;
    afs_size_t offset, len;
    afs_int32 tlen, alen;
    register afs_int32 code;

    /* two different formats, one for links protected 644, have a "." at the
     * end of the file name, which we turn into a null.  Others, protected
     * 755, we add a null to the end of */
    AFS_STATCNT(afs_UFSHandleLink);
    if (!avc->linkData) {
        tdc = afs_GetDCache(avc, (afs_size_t) 0, areq, &offset, &len, 0);
        afs_Trace3(afs_iclSetp, CM_TRACE_UFSLINK, ICL_TYPE_POINTER, avc,
                   ICL_TYPE_POINTER, tdc, ICL_TYPE_OFFSET,
                   ICL_HANDLE_OFFSET(avc->f.m.Length));
        if (!tdc) {
            if (AFS_IS_DISCONNECTED)
                return ENETDOWN;
            else
                return EIO;
        }
        /* otherwise we have the data loaded, go for it */
        if (len > 1024) {
            afs_PutDCache(tdc);
            return EFAULT;
        }
        if (avc->f.m.Mode & 0111)
            alen = len + 1;	/* regular link */
        else
            alen = len;		/* mt point */
        rbuf = (char *)osi_AllocLargeSpace(AFS_LRALLOCSIZ);
        tlen = len;
        ObtainReadLock(&tdc->lock);
#if defined(LINUX_USE_FH)
        tfile = osi_UFSOpen_fh(&tdc->f.fh, tdc->f.fh_type);
#else
        tfile = osi_UFSOpen(tdc->f.inode);
#endif
        code = afs_osi_Read(tfile, -1, rbuf, tlen);
        osi_UFSClose(tfile);
        ReleaseReadLock(&tdc->lock);
        afs_PutDCache(tdc);
        rbuf[alen - 1] = '\0';
        alen = strlen(rbuf) + 1;
        tp = afs_osi_Alloc(alen);	/* make room for terminating null */
        memcpy(tp, rbuf, alen);
        osi_FreeLargeSpace(rbuf);
        if (code != tlen) {
            afs_osi_Free(tp, alen);
            return EIO;
        }
        avc->linkData = tp;
    }
    return 0;
}
Beispiel #2
0
/* ptr_parm 0 is the pathname, size_parm 0 to the fetch is the chunk number */
static void
BPath(struct brequest *ab)
{
    struct dcache *tdc = NULL;
    struct vcache *tvc = NULL;
    struct vnode *tvn = NULL;
#ifdef AFS_LINUX22_ENV
    struct dentry *dp = NULL;
#endif
    afs_size_t offset, len;
    struct vrequest *treq = NULL;
    afs_int32 code;

    AFS_STATCNT(BPath);
    if ((code = afs_CreateReq(&treq, ab->cred))) {
	return;
    }
    AFS_GUNLOCK();
#ifdef AFS_LINUX22_ENV
    code = gop_lookupname((char *)ab->ptr_parm[0], AFS_UIOSYS, 1, &dp);
    if (dp)
	tvn = (struct vnode *)dp->d_inode;
#else
    code = gop_lookupname((char *)ab->ptr_parm[0], AFS_UIOSYS, 1, &tvn);
#endif
    AFS_GLOCK();
    osi_FreeLargeSpace((char *)ab->ptr_parm[0]);	/* free path name buffer here */
    if (code) {
	afs_DestroyReq(treq);
	return;
    }
    /* now path may not have been in afs, so check that before calling our cache manager */
    if (!tvn || !IsAfsVnode(tvn)) {
	/* release it and give up */
	if (tvn) {
#ifdef AFS_LINUX22_ENV
	    dput(dp);
#else
	    AFS_RELE(tvn);
#endif
	}
	afs_DestroyReq(treq);
	return;
    }
    tvc = VTOAFS(tvn);
    /* here we know its an afs vnode, so we can get the data for the chunk */
    tdc = afs_GetDCache(tvc, ab->size_parm[0], treq, &offset, &len, 1);
    if (tdc) {
	afs_PutDCache(tdc);
    }
#ifdef AFS_LINUX22_ENV
    dput(dp);
#else
    AFS_RELE(tvn);
#endif
    afs_DestroyReq(treq);
}
Beispiel #3
0
int
osi_UFSClose(register struct osi_file *afile)
{
    AFS_STATCNT(osi_Close);
    if (afile) {
	if (OSIFILE_INODE(afile)) {
	    filp_close(afile->filp, NULL);
	}
    }

    osi_FreeLargeSpace(afile);
    return 0;
}
Beispiel #4
0
int
afs_MemHandleLink(struct vcache *avc, struct vrequest *areq)
{
    struct dcache *tdc;
    char *tp, *rbuf;
    afs_size_t offset, len;
    afs_int32 tlen, alen;
    afs_int32 code;

    AFS_STATCNT(afs_MemHandleLink);
    /* two different formats, one for links protected 644, have a "." at
     * the end of the file name, which we turn into a null.  Others, 
     * protected 755, we add a null to the end of */
    if (!avc->linkData) {
	void *addr;
	tdc = afs_GetDCache(avc, (afs_size_t) 0, areq, &offset, &len, 0);
	if (!tdc) {
	    return EIO;
	}
	/* otherwise we have the data loaded, go for it */
	if (len > 1024) {
	    afs_PutDCache(tdc);
	    return EFAULT;
	}
	if (avc->f.m.Mode & 0111)
	    alen = len + 1;	/* regular link */
	else
	    alen = len;		/* mt point */
	rbuf = osi_AllocLargeSpace(AFS_LRALLOCSIZ);
	ObtainReadLock(&tdc->lock);
	addr = afs_MemCacheOpen(&tdc->f.inode);
	tlen = len;
	code = afs_MemReadBlk(addr, 0, rbuf, tlen);
	afs_MemCacheClose(addr);
	ReleaseReadLock(&tdc->lock);
	afs_PutDCache(tdc);
	rbuf[alen - 1] = 0;
	alen = strlen(rbuf) + 1;
	tp = afs_osi_Alloc(alen);	/* make room for terminating null */
	osi_Assert(tp != NULL);
	memcpy(tp, rbuf, alen);
	osi_FreeLargeSpace(rbuf);
	if (code != len) {
	    afs_osi_Free(tp, alen);
	    return EIO;
	}
	avc->linkData = tp;
    }
    return 0;
}
Beispiel #5
0
int
osi_UFSClose(struct osi_file *afile)
{
    AFS_STATCNT(osi_Close);
    if (afile) {
	if (FILE_INODE(&afile->file)) {
	    struct file *filp = &afile->file;
	    if (filp->f_op && filp->f_op->release)
		filp->f_op->release(FILE_INODE(filp), filp);
	    iput(FILE_INODE(filp));
	}
    }

    osi_FreeLargeSpace(afile);
    return 0;
}
Beispiel #6
0
void *
osi_UFSOpen(afs_dcache_id_t *ainode)
{
    struct osi_file *afile = NULL;
    extern int cacheDiskType;

    AFS_STATCNT(osi_UFSOpen);
    if (cacheDiskType != AFS_FCACHE_TYPE_UFS) {
	osi_Panic("UFSOpen called for non-UFS cache\n");
    }
    if (!afs_osicred_initialized) {
	memset(&afs_osi_cred, 0, sizeof(afs_ucred_t));
	crhold(&afs_osi_cred);	/* don't let it evaporate, since it is static */
	afs_osicred_initialized = 1;
    }
    AFS_GUNLOCK();
    afile = kmalloc(sizeof(struct osi_file), GFP_NOFS);
    if (!afile) {
	osi_Panic("osi_UFSOpen: Failed to allocate %d bytes for osi_file.\n",
		  (int)sizeof(struct osi_file));
    }
    memset(afile, 0, sizeof(struct osi_file));

    afile->filp = afs_linux_raw_open(ainode);
    if (afile->filp) {
        afile->size = i_size_read(FILE_INODE(afile->filp));
    }
    AFS_GLOCK();

    if (!afile->filp) {
        osi_FreeLargeSpace(afile);
        return NULL;
    }

    afile->offset = 0;
    afile->proc = (int (*)())0;
    return (void *)afile;
}
Beispiel #7
0
/**
 * @param aname Volume name.
 * @param acell Cell id.
 * @param agood
 * @param areq Request type.
 * @param locktype Type of lock to be used.
 * @return Volume or NULL if failure.
 */
static struct volume *
afs_NewVolumeByName(char *aname, afs_int32 acell, int agood,
		    struct vrequest *areq, afs_int32 locktype)
{
    afs_int32 code, type = 0;
    struct volume *tv, *tv1;
    struct vldbentry *tve;
    struct nvldbentry *ntve;
    struct uvldbentry *utve;
    struct cell *tcell;
    char *tbuffer, *ve;
    struct afs_conn *tconn;
    struct vrequest treq;
    struct rx_connection *rxconn;

    if (strlen(aname) > VL_MAXNAMELEN)	/* Invalid volume name */
	return NULL;

    tcell = afs_GetCell(acell, READ_LOCK);
    if (!tcell) {
	return NULL;
    }

    /* allow null request if we don't care about ENODEV/ETIMEDOUT distinction */
    if (!areq)
	areq = &treq;


    afs_Trace2(afs_iclSetp, CM_TRACE_GETVOL, ICL_TYPE_STRING, aname,
	       ICL_TYPE_POINTER, aname);
    tbuffer = osi_AllocLargeSpace(AFS_LRALLOCSIZ);
    tve = (struct vldbentry *)(tbuffer + 1024);
    ntve = (struct nvldbentry *)tve;
    utve = (struct uvldbentry *)tve;
    afs_InitReq(&treq, afs_osi_credp);	/* *must* be unauth for vldb */
    do {
	tconn =
	    afs_ConnByMHosts(tcell->cellHosts, tcell->vlport, tcell->cellNum,
			     &treq, SHARED_LOCK, 0, &rxconn);
	if (tconn) {
	    if (tconn->srvr->server->flags & SNO_LHOSTS) {
		type = 0;
		RX_AFS_GUNLOCK();
		code = VL_GetEntryByNameO(rxconn, aname, tve);
		RX_AFS_GLOCK();
	    } else if (tconn->srvr->server->flags & SYES_LHOSTS) {
		type = 1;
		RX_AFS_GUNLOCK();
		code = VL_GetEntryByNameN(rxconn, aname, ntve);
		RX_AFS_GLOCK();
	    } else {
		type = 2;
		RX_AFS_GUNLOCK();
		code = VL_GetEntryByNameU(rxconn, aname, utve);
		RX_AFS_GLOCK();
		if (!(tconn->srvr->server->flags & SVLSRV_UUID)) {
		    if (code == RXGEN_OPCODE) {
			type = 1;
			RX_AFS_GUNLOCK();
			code = VL_GetEntryByNameN(rxconn, aname, ntve);
			RX_AFS_GLOCK();
			if (code == RXGEN_OPCODE) {
			    type = 0;
			    tconn->srvr->server->flags |= SNO_LHOSTS;
			    RX_AFS_GUNLOCK();
			    code = VL_GetEntryByNameO(rxconn, aname, tve);
			    RX_AFS_GLOCK();
			} else if (!code)
			    tconn->srvr->server->flags |= SYES_LHOSTS;
		    } else if (!code)
			tconn->srvr->server->flags |= SVLSRV_UUID;
		}
		lastnvcode = code;
	    }
	} else
	    code = -1;
    } while (afs_Analyze(tconn, rxconn, code, NULL, &treq, -1,	/* no op code for this */
			 SHARED_LOCK, tcell));

    if (code) {
	/* If the client has yet to contact this cell and contact failed due
	 * to network errors, mark the VLDB servers as back up.
	 * That the client tried and failed can be determined from the
	 * fact that there was a downtime incident, but CHasVolRef is not set.
	 */
    /* RT 48959 - unclear if this should really go */
#if 0
	if (areq->networkError && !(tcell->states & CHasVolRef)) {
	    int i;
	    struct server *sp;
	    struct srvAddr *sap;
	    for (i = 0; i < AFS_MAXCELLHOSTS; i++) {
		if ((sp = tcell->cellHosts[i]) == NULL)
		    break;
		for (sap = sp->addr; sap; sap = sap->next_sa)
		    afs_MarkServerUpOrDown(sap, 0);
	    }
	}
#endif
	afs_CopyError(&treq, areq);
	osi_FreeLargeSpace(tbuffer);
	afs_PutCell(tcell, READ_LOCK);
	return NULL;
    }
    /*
     * Check to see if this cell has not yet referenced a volume.  If
     * it hasn't, it's just about to change its status, and we need to mark
     * this fact down. Note that it is remotely possible that afs_SetupVolume
     * could fail and we would still not have a volume reference.
     */
    if (!(tcell->states & CHasVolRef)) {
	tcell->states |= CHasVolRef;
	afs_stats_cmperf.numCellsContacted++;
    }
    /*First time a volume in this cell has been referenced */
    if (type == 2)
	ve = (char *)utve;
    else if (type == 1)
	ve = (char *)ntve;
    else
	ve = (char *)tve;
    tv = afs_SetupVolume(0, aname, ve, tcell, agood, type, &treq);
    if ((agood == 3) && tv && tv->backVol) {
	/*
	 * This means that very soon we'll ask for the BK volume so
	 * we'll prefetch it (well we did already.)
	 */
	tv1 =
	    afs_SetupVolume(tv->backVol, (char *)0, ve, tcell, 0, type, &treq);
	if (tv1) {
	    tv1->refCount--;
	}
    }
    if ((agood >= 2) && tv && tv->roVol) {
	/*
	 * This means that very soon we'll ask for the RO volume so
	 * we'll prefetch it (well we did already.)
	 */
	tv1 = afs_SetupVolume(tv->roVol, NULL, ve, tcell, 0, type, &treq);
	if (tv1) {
	    tv1->refCount--;
	}
    }
    osi_FreeLargeSpace(tbuffer);
    afs_PutCell(tcell, READ_LOCK);
    return tv;

}				/*afs_NewVolumeByName */
Beispiel #8
0
int
afs_StoreAllSegments(struct vcache *avc, struct vrequest *areq,
		     int sync)
{
    struct dcache *tdc;
    afs_int32 code = 0;
    afs_int32 index;
    afs_int32 origCBs, foreign = 0;
    int hash;
    afs_hyper_t newDV, oldDV;	/* DV when we start, and finish, respectively */
    struct dcache **dcList;
    unsigned int i, j, minj, moredata, high, off;
    afs_size_t maxStoredLength;	/* highest offset we've written to server. */
    int safety, marineronce = 0;

    AFS_STATCNT(afs_StoreAllSegments);

    hset(oldDV, avc->f.m.DataVersion);
    hset(newDV, avc->f.m.DataVersion);
    hash = DVHash(&avc->f.fid);
    foreign = (avc->f.states & CForeign);
    dcList = osi_AllocLargeSpace(AFS_LRALLOCSIZ);
    afs_Trace2(afs_iclSetp, CM_TRACE_STOREALL, ICL_TYPE_POINTER, avc,
	       ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
#if !defined(AFS_AIX32_ENV) && !defined(AFS_SGI65_ENV)
    /* In the aix vm implementation we need to do the vm_writep even
     * on the memcache case since that's we adjust the file's size
     * and finish flushing partial vm pages.
     */
    if ((cacheDiskType != AFS_FCACHE_TYPE_MEM) ||
	(sync & AFS_VMSYNC_INVAL) || (sync & AFS_VMSYNC) ||
	(sync & AFS_LASTSTORE))
#endif /* !AFS_AIX32_ENV && !AFS_SGI65_ENV */
    {
	/* If we're not diskless, reading a file may stress the VM
	 * system enough to cause a pageout, and this vnode would be
	 * locked when the pageout occurs.  We can prevent this problem
	 * by making sure all dirty pages are already flushed.  We don't
	 * do this when diskless because reading a diskless (i.e.
	 * memory-resident) chunk doesn't require using new VM, and we
	 * also don't want to dump more dirty data into a diskless cache,
	 * since they're smaller, and we might exceed its available
	 * space.
	 */
#if	defined(AFS_SUN5_ENV)
	if (sync & AFS_VMSYNC_INVAL)	/* invalidate VM pages */
	    osi_VM_TryToSmush(avc, CRED(), 1);
	else
#endif
	    osi_VM_StoreAllSegments(avc);
    }
    if (AFS_IS_DISCONNECTED && !AFS_IN_SYNC) {
	/* This will probably make someone sad ... */
	/*printf("Net down in afs_StoreSegments\n");*/
	return ENETDOWN;
    }
    ConvertWToSLock(&avc->lock);

    /*
     * Subsequent code expects a sorted list, and it expects all the
     * chunks in the list to be contiguous, so we need a sort and a
     * while loop in here, too - but this will work for a first pass...
     * 92.10.05 - OK, there's a sort in here now.  It's kind of a modified
     *            bin sort, I guess.  Chunk numbers start with 0
     *
     * - Have to get a write lock on xdcache because GetDSlot might need it (if
     *   the chunk doesn't have a dcache struct).
     *   This seems like overkill in most cases.
     * - I'm not sure that it's safe to do "index = .hvNextp", then unlock
     *   xdcache, then relock xdcache and try to use index.  It is done
     *   a lot elsewhere in the CM, but I'm not buying that argument.
     * - should be able to check IFDataMod without doing the GetDSlot (just
     *   hold afs_xdcache).  That way, it's easy to do this without the
     *   writelock on afs_xdcache, and we save unneccessary disk
     *   operations. I don't think that works, 'cuz the next pointers
     *   are still on disk.
     */
    origCBs = afs_allCBs;

    maxStoredLength = 0;
    minj = 0;

    do {
	memset(dcList, 0, NCHUNKSATONCE * sizeof(struct dcache *));
	high = 0;
	moredata = FALSE;

	/* lock and start over from beginning of hash chain
	 * in order to avoid a race condition. */
	ObtainWriteLock(&afs_xdcache, 284);
	index = afs_dvhashTbl[hash];

	for (j = 0; index != NULLIDX;) {
	    if ((afs_indexFlags[index] & IFDataMod)
		&& (afs_indexUnique[index] == avc->f.fid.Fid.Unique)) {
		tdc = afs_GetValidDSlot(index);	/* refcount+1. */
		if (!tdc) {
		    ReleaseWriteLock(&afs_xdcache);
		    code = EIO;
		    goto done;
		}
		ReleaseReadLock(&tdc->tlock);
		if (!FidCmp(&tdc->f.fid, &avc->f.fid) && tdc->f.chunk >= minj) {
		    off = tdc->f.chunk - minj;
		    if (off < NCHUNKSATONCE) {
			if (dcList[off])
			    osi_Panic("dclist slot already in use!");
			if (afs_mariner && !marineronce) {
			    /* first chunk only */
			    afs_MarinerLog("store$Storing", avc);
			    marineronce++;
			}
			dcList[off] = tdc;
			if (off > high)
			    high = off;
			j++;
			/* DCLOCKXXX: chunkBytes is protected by tdc->lock which we
			 * can't grab here, due to lock ordering with afs_xdcache.
			 * So, disable this shortcut for now.  -- kolya 2001-10-13
			 */
			/* shortcut: big win for little files */
			/* tlen -= tdc->f.chunkBytes;
			 * if (tlen <= 0)
			 *    break;
			 */
		    } else {
			moredata = TRUE;
			afs_PutDCache(tdc);
			if (j == NCHUNKSATONCE)
			    break;
		    }
		} else {
		    afs_PutDCache(tdc);
		}
	    }
	    index = afs_dvnextTbl[index];
	}
	ReleaseWriteLock(&afs_xdcache);

	/* this guy writes chunks, puts back dcache structs, and bumps newDV */
	/* "moredata" just says "there are more dirty chunks yet to come".
	 */
	if (j) {
	    code =
		afs_CacheStoreVCache(dcList, avc, areq, sync,
				   minj, high, moredata,
				   &newDV, &maxStoredLength);
	    /* Release any zero-length dcache entries in our interval
	     * that we locked but didn't store back above.
	     */
	    for (j = 0; j <= high; j++) {
		tdc = dcList[j];
		if (tdc) {
		    osi_Assert(tdc->f.chunkBytes == 0);
		    ReleaseSharedLock(&tdc->lock);
		    afs_PutDCache(tdc);
		}
	    }
	}
	/* if (j) */
	minj += NCHUNKSATONCE;
    } while (!code && moredata);

 done:
    UpgradeSToWLock(&avc->lock, 29);

    /* send a trivial truncation store if did nothing else */
    if (code == 0) {
	/*
	 * Call StoreMini if we haven't written enough data to extend the
	 * file at the fileserver to the client's notion of the file length.
	 */
	if ((avc->f.truncPos != AFS_NOTRUNC)
	    || ((avc->f.states & CExtendedFile)
		&& (maxStoredLength < avc->f.m.Length))) {
	    code = afs_StoreMini(avc, areq);
	    if (code == 0)
		hadd32(newDV, 1);	/* just bumped here, too */
	}
	avc->f.states &= ~CExtendedFile;
    }

    /*
     * Finally, turn off DWriting, turn on DFEntryMod,
     * update f.versionNo.
     * A lot of this could be integrated into the loop above
     */
    if (!code) {
	afs_hyper_t h_unset;
	hones(h_unset);

	minj = 0;

	do {
	    moredata = FALSE;
	    memset(dcList, 0,
		   NCHUNKSATONCE * sizeof(struct dcache *));

	    /* overkill, but it gets the lock in case GetDSlot needs it */
	    ObtainWriteLock(&afs_xdcache, 285);

	    for (j = 0, safety = 0, index = afs_dvhashTbl[hash];
		 index != NULLIDX && safety < afs_cacheFiles + 2;
	         index = afs_dvnextTbl[index]) {

		if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
		    tdc = afs_GetValidDSlot(index);
		    if (!tdc) {
			/* This is okay; since manipulating the dcaches at this
			 * point is best-effort. We only get a dcache here to
			 * increment the dv and turn off DWriting. If we were
			 * supposed to do that for a dcache, but could not
			 * due to an I/O error, it just means the dv won't
			 * be updated so we don't be able to use that cached
			 * chunk in the future. That's inefficient, but not
			 * an error. */
			continue;
		    }
		    ReleaseReadLock(&tdc->tlock);

		    if (!FidCmp(&tdc->f.fid, &avc->f.fid)
			&& tdc->f.chunk >= minj) {
			off = tdc->f.chunk - minj;
			if (off < NCHUNKSATONCE) {
			    /* this is the file, and the correct chunk range */
			    if (j >= NCHUNKSATONCE)
				osi_Panic
				    ("Too many dcache entries in range\n");
			    dcList[j++] = tdc;
			} else {
			    moredata = TRUE;
			    afs_PutDCache(tdc);
			    if (j == NCHUNKSATONCE)
				break;
			}
		    } else {
			afs_PutDCache(tdc);
		    }
		}
	    }
	    ReleaseWriteLock(&afs_xdcache);

	    for (i = 0; i < j; i++) {
		/* Iterate over the dcache entries we collected above */
		tdc = dcList[i];
		ObtainSharedLock(&tdc->lock, 677);

		/* was code here to clear IFDataMod, but it should only be done
		 * in storedcache and storealldcache.
		 */
		/* Only increase DV if we had up-to-date data to start with.
		 * Otherwise, we could be falsely upgrading an old chunk
		 * (that we never read) into one labelled with the current
		 * DV #.  Also note that we check that no intervening stores
		 * occurred, otherwise we might mislabel cache information
		 * for a chunk that we didn't store this time
		 */
		/* Don't update the version number if it's not yet set. */
		if (!hsame(tdc->f.versionNo, h_unset)
		    && hcmp(tdc->f.versionNo, oldDV) >= 0) {

		    if ((!(afs_dvhack || foreign)
			 && hsame(avc->f.m.DataVersion, newDV))
			|| ((afs_dvhack || foreign)
			    && (origCBs == afs_allCBs))) {
			/* no error, this is the DV */

			UpgradeSToWLock(&tdc->lock, 678);
			hset(tdc->f.versionNo, avc->f.m.DataVersion);
			tdc->dflags |= DFEntryMod;
			/* DWriting may not have gotten cleared above, if all
			 * we did was a StoreMini */
			tdc->f.states &= ~DWriting;
			ConvertWToSLock(&tdc->lock);
		    }
		}

		ReleaseSharedLock(&tdc->lock);
		afs_PutDCache(tdc);
	    }

	    minj += NCHUNKSATONCE;

	} while (moredata);
    }

    if (code) {
	/*
	 * Invalidate chunks after an error for ccores files since
	 * afs_inactive won't be called for these and they won't be
	 * invalidated. Also discard data if it's a permanent error from the
	 * fileserver.
	 */
	if (areq->permWriteError || (avc->f.states & CCore)) {
	    afs_InvalidateAllSegments(avc);
	}
    }
    afs_Trace3(afs_iclSetp, CM_TRACE_STOREALLDONE, ICL_TYPE_POINTER, avc,
	       ICL_TYPE_INT32, avc->f.m.Length, ICL_TYPE_INT32, code);
    /* would like a Trace5, but it doesn't exist... */
    afs_Trace3(afs_iclSetp, CM_TRACE_AVCLOCKER, ICL_TYPE_POINTER, avc,
	       ICL_TYPE_INT32, avc->lock.wait_states, ICL_TYPE_INT32,
	       avc->lock.excl_locked);
    afs_Trace4(afs_iclSetp, CM_TRACE_AVCLOCKEE, ICL_TYPE_POINTER, avc,
	       ICL_TYPE_INT32, avc->lock.wait_states, ICL_TYPE_INT32,
	       avc->lock.readers_reading, ICL_TYPE_INT32,
	       avc->lock.num_waiting);

    /*
     * Finally, if updated DataVersion matches newDV, we did all of the
     * stores.  If mapDV indicates that the page cache was flushed up
     * to when we started the store, then we can relabel them as flushed
     * as recently as newDV.
     * Turn off CDirty bit because the stored data is now in sync with server.
     */
    if (code == 0 && hcmp(avc->mapDV, oldDV) >= 0) {
	if ((!(afs_dvhack || foreign) && hsame(avc->f.m.DataVersion, newDV))
	    || ((afs_dvhack || foreign) && (origCBs == afs_allCBs))) {
	    hset(avc->mapDV, newDV);
	    avc->f.states &= ~CDirty;
	}
    }
    osi_FreeLargeSpace(dcList);

    /* If not the final write a temporary error is ok. */
    if (code && !areq->permWriteError && !(sync & AFS_LASTSTORE))
	code = 0;

    return code;

}				/*afs_StoreAllSegments (new 03/02/94) */
Beispiel #9
0
/* called with the GLOCK held */
int
afs_syscall_pioctl(char *path, unsigned int com, caddr_t cmarg, int follow)
{
    cred_t *credp = crref();	/* don't free until done! */
    struct afs_ioctl data;
    struct clientcred ccred;
    struct rmtbulk idata, odata;
    short in_size, out_size;
    afs_int32 code = 0, pag, err;
    gid_t g0, g1;
    char *abspath, *pathbuf = 0;

    AFS_STATCNT(afs_syscall_pioctl);
    if (follow)
	follow = 1;		/* compat. with old venus */
    code = copyin_afs_ioctl(cmarg, &data);
    if (code) goto out;

    if ((com & 0xff) == 90) {
	/* PSetClientContext, in any space */
	code = EINVAL;
	goto out;
    }

    /* Special handling for a few pioctls */
    switch (com & 0xffff) {
	case (0x5600 |  3): /* VIOCSETTOK */
	    code = afspag_PSetTokens(data.in, data.in_size, &credp);
	    if (code) goto out;
	    break;

	case (0x5600 |  9): /* VIOCUNLOG */
	case (0x5600 | 21): /* VIOCUNPAG */
	    code = afspag_PUnlog(data.in, data.in_size, &credp);
	    if (code) goto out;
	    break;

	case (0x5600 | 38): /* VIOC_AFS_SYSNAME */
	    code = afspag_PSetSysName(data.in, data.in_size, &credp);
	    if (code) goto out;
	    break;
    }

    /* Set up credentials */
    memset(&ccred, 0, sizeof(ccred));
    pag = PagInCred(credp);
    ccred.uid = afs_cr_uid(credp);
    if (pag != NOPAG) {
	 afs_get_groups_from_pag(pag, &g0, &g1);
	 ccred.group0 = g0;
	 ccred.group1 = g1;
    }

    /*
     * Copy the path and convert to absolute, if one was given.
     * NB: We can only use osI_AllocLargeSpace here as long as
     * RMTSYS_MAXPATHLEN is less than AFS_LRALLOCSIZ.
     */
    if (path) {
	pathbuf = osi_AllocLargeSpace(RMTSYS_MAXPATHLEN);
	if (!pathbuf) {
	    code = ENOMEM;
	    goto out;
	}
	code = osi_abspath(path, pathbuf, RMTSYS_MAXPATHLEN, 0, &abspath);
	if (code)
	    goto out_path;
    } else {
	abspath = NIL_PATHP;
    }

    /* Allocate, copy, and convert incoming data */
    idata.rmtbulk_len = in_size = data.in_size;
    if (in_size  < 0 || in_size  > MAXBUFFERLEN) {
	code = EINVAL;
	goto out_path;
    }
    if (in_size > AFS_LRALLOCSIZ)
	 idata.rmtbulk_val = osi_Alloc(in_size);
    else
	 idata.rmtbulk_val = osi_AllocLargeSpace(AFS_LRALLOCSIZ);
    if (!idata.rmtbulk_val) {
	code = ENOMEM;
	goto out_path;
    }
    if (in_size) {
	AFS_COPYIN(data.in, idata.rmtbulk_val, in_size, code);
	if (code)
	    goto out_idata;
	inparam_conversion(com, idata.rmtbulk_val, in_size, 0);
    }

    /* Allocate space for outgoing data */
    odata.rmtbulk_len = out_size = data.out_size;
    if (out_size < 0 || out_size > MAXBUFFERLEN) {
	code = EINVAL;
	goto out_idata;
    }
    if (out_size > AFS_LRALLOCSIZ)
	 odata.rmtbulk_val = osi_Alloc(out_size);
    else
	 odata.rmtbulk_val = osi_AllocLargeSpace(AFS_LRALLOCSIZ);
    if (!odata.rmtbulk_val) {
	code = ENOMEM;
	goto out_idata;
    }

    AFS_GUNLOCK();
    code = RMTSYS_Pioctl(rmtsys_conn, &ccred, abspath, com, follow,
			 &idata, &odata, &err);
    AFS_GLOCK();
    if (code)
	goto out_odata;

    /* Convert and copy out the result */
    if (odata.rmtbulk_len > out_size) {
	code = E2BIG;
	goto out_odata;
    }
    if (odata.rmtbulk_len) {
	outparam_conversion(com, odata.rmtbulk_val, odata.rmtbulk_len, 1);
	AFS_COPYOUT(odata.rmtbulk_val, data.out, odata.rmtbulk_len, code);
    }
    if (!code)
	code = err;

out_odata:
    if (out_size > AFS_LRALLOCSIZ)
	osi_Free(odata.rmtbulk_val, out_size);
    else
	osi_FreeLargeSpace(odata.rmtbulk_val);

out_idata:
    if (in_size > AFS_LRALLOCSIZ)
	osi_Free(idata.rmtbulk_val, in_size);
    else
	osi_FreeLargeSpace(idata.rmtbulk_val);

out_path:
    if (path)
	osi_FreeLargeSpace(pathbuf);

out:
    crfree(credp);
#if defined(KERNEL_HAVE_UERROR)
    if (!getuerror())
	setuerror(code);
    return (getuerror());
#else
    return (code);
#endif
}