Exemple #1
0
/* ignores all remaining multiRx calls */
void
multi_Finalize_Ignore(struct multi_handle *mh)
{
    int i;
    int nCalls = mh->nConns;
    for (i = 0; i < nCalls; i++) {
	struct rx_call *call = mh->calls[i];
	if (call)
	    rx_EndCall(call, 0);
    }
#ifdef RX_ENABLE_LOCKS
    MUTEX_DESTROY(&mh->lock);
    CV_DESTROY(&mh->cv);
#endif /* RX_ENABLE_LOCKS */
    osi_Free(mh->calls, sizeof(struct rx_call *) * nCalls);
    osi_Free(mh->ready, sizeof(short *) * nCalls);
    osi_Free(mh, sizeof(struct multi_handle));
}
Exemple #2
0
static void
shutdown_rxevent(void)
{
    struct xfreelist *xp, *nxp;

    initialized = 0;
#if	defined(AFS_AIX32_ENV) && defined(KERNEL)
    /* Everything is freed in afs_osinet.c */
#else
    while ((xp = xfreemallocs) != NULL) {
	nxp = xp->next;
	osi_Free((char *) xp, sizeof(struct rxevent) * rxevent_allocUnit);
    }
#endif
}
Exemple #3
0
static long
DoClient(int index, opaque rock)
{
    struct client *c = (struct client *)rock;
    long code = 0;
    int i;
    u_long n, inc_n;

    n = 95678;
    for (i = 0; i < c->fastCalls[index]; i++) {
	code = RXKST_Fast(c->conn, n, &inc_n);
	if (code)
	    return (code);
	if (n + 1 != inc_n)
	    return RXKST_INCFAILED;
	n++;
    }

    for (i = 0; i < c->slowCalls[index]; i++) {
	u_long ntime;
	u_long now;
	code = RXKST_Slow(c->conn, 1, &ntime);
	if (code)
	    return (code);
	now = FT_ApproxTime();
	if ((ntime < now - maxSkew) || (ntime > now + maxSkew))
	    return RXKST_TIMESKEW;
    }

    if (c->copiousCalls[index] > 0) {
	u_long buflen = 10000;
	char *buf = osi_Alloc(buflen);
	for (i = 0; i < c->copiousCalls[index]; i++) {
	    code = Copious(c, buf, buflen);
	    if (code)
		break;
	}
	osi_Free(buf, buflen);
	if (code)
	    return code;
    }
    return 0;
}
Exemple #4
0
/* afs_put_super
 * Called from unmount to release super_block. */
static void
afs_put_super(struct super_block *sbp)
{
    AFS_GLOCK();
    AFS_STATCNT(afs_unmount);

    afs_globalVFS = 0;
    afs_globalVp = 0;

    osi_linux_free_inode_pages();	/* invalidate and release remaining AFS inodes. */
    afs_shutdown();
    mntput(afs_cacheMnt);

    osi_linux_verify_alloced_memory();
#if defined(HAVE_LINUX_BDI_INIT)
    bdi_destroy(afs_backing_dev_info);
#endif
    osi_Free(afs_backing_dev_info, sizeof(struct backing_dev_info));
    AFS_GUNLOCK();

    sbp->s_dev = 0;
    module_put(THIS_MODULE);
}
/* Cannot have static linkage--called from BPrefetch (afs_daemons) */
afs_int32
afs_PrefetchNoCache(struct vcache *avc,
		    afs_ucred_t *acred,
		    struct nocache_read_request *bparms)
{
    struct uio *auio;
#ifndef UKERNEL
    struct iovec *iovecp;
#endif
    struct vrequest *areq;
    afs_int32 code = 0;
    struct rx_connection *rxconn;
#ifdef AFS_64BIT_CLIENT
    afs_int32 length_hi, bytes, locked;
#endif

    struct afs_conn *tc;
    struct rx_call *tcall;
    struct tlocal1 {
	struct AFSVolSync tsync;
	struct AFSFetchStatus OutStatus;
	struct AFSCallBack CallBack;
    };
    struct tlocal1 *tcallspec;

    auio = bparms->auio;
    areq = bparms->areq;
#ifndef UKERNEL
    iovecp = auio->uio_iov;
#endif

    tcallspec = osi_Alloc(sizeof(struct tlocal1));
    do {
	tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK /* ignored */, &rxconn);
	if (tc) {
	    avc->callback = tc->parent->srvr->server;
	    tcall = rx_NewCall(rxconn);
#ifdef AFS_64BIT_CLIENT
	    if (!afs_serverHasNo64Bit(tc)) {
		code = StartRXAFS_FetchData64(tcall,
					      (struct AFSFid *) &avc->f.fid.Fid,
					      auio->uio_offset,
					      bparms->length);
		if (code == 0) {
		    COND_GUNLOCK(locked);
		    bytes = rx_Read(tcall, (char *)&length_hi,
				    sizeof(afs_int32));
		    COND_RE_GLOCK(locked);

		    if (bytes != sizeof(afs_int32)) {
			length_hi = 0;
			code = rx_Error(tcall);
			COND_GUNLOCK(locked);
			code = rx_EndCall(tcall, code);
			COND_RE_GLOCK(locked);
			tcall = NULL;
		    }
		}
	    } /* afs_serverHasNo64Bit */
	    if (code == RXGEN_OPCODE || afs_serverHasNo64Bit(tc)) {
		if (auio->uio_offset > 0x7FFFFFFF) {
		    code = EFBIG;
		} else {
		    afs_int32 pos;
		    pos = auio->uio_offset;
		    COND_GUNLOCK(locked);
		    if (!tcall)
			tcall = rx_NewCall(rxconn);
		    code = StartRXAFS_FetchData(tcall,
					(struct AFSFid *) &avc->f.fid.Fid,
					pos, bparms->length);
		    COND_RE_GLOCK(locked);
		}
		afs_serverSetNo64Bit(tc);
	    }
#else
	    code = StartRXAFS_FetchData(tcall,
			                (struct AFSFid *) &avc->f.fid.Fid,
					auio->uio_offset, bparms->length);
#endif
	    if (code == 0) {
		code = afs_NoCacheFetchProc(tcall, avc, auio,
				            1 /* release_pages */,
					    bparms->length);
	    } else {
		afs_warn("BYPASS: StartRXAFS_FetchData failed: %d\n", code);
		unlock_and_release_pages(auio);
		goto done;
	    }
	    if (code == 0) {
		code = EndRXAFS_FetchData(tcall, &tcallspec->OutStatus,
					  &tcallspec->CallBack,
					  &tcallspec->tsync);
	    } else {
		afs_warn("BYPASS: NoCacheFetchProc failed: %d\n", code);
	    }
	    code = rx_EndCall(tcall, code);
	} else {
	    afs_warn("BYPASS: No connection.\n");
	    code = -1;
	    unlock_and_release_pages(auio);
	    goto done;
	}
    } while (afs_Analyze(tc, rxconn, code, &avc->f.fid, areq,
						 AFS_STATS_FS_RPCIDX_FETCHDATA,
						 SHARED_LOCK,0));
done:
    /*
     * Copy appropriate fields into vcache
     */

    if (!code)
	afs_ProcessFS(avc, &tcallspec->OutStatus, areq);

    osi_Free(areq, sizeof(struct vrequest));
    osi_Free(tcallspec, sizeof(struct tlocal1));
    osi_Free(bparms, sizeof(struct nocache_read_request));
#ifndef UKERNEL
    /* in UKERNEL, the "pages" are passed in */
    osi_Free(iovecp, auio->uio_iovcnt * sizeof(struct iovec));
    osi_Free(auio, sizeof(struct uio));
#endif
    return code;
}
/* dispatch a no-cache read request */
afs_int32
afs_ReadNoCache(struct vcache *avc,
		struct nocache_read_request *bparms,
		afs_ucred_t *acred)
{
    afs_int32 code;
    afs_int32 bcnt;
    struct brequest *breq;
    struct vrequest *areq;

    /* the reciever will free this */
    areq = osi_Alloc(sizeof(struct vrequest));

    if (avc->vc_error) {
	code = EIO;
	afs_warn("afs_ReadNoCache VCache Error!\n");
	goto cleanup;
    }
    if ((code = afs_InitReq(areq, acred))) {
	afs_warn("afs_ReadNoCache afs_InitReq error!\n");
	goto cleanup;
    }

    AFS_GLOCK();
    code = afs_VerifyVCache(avc, areq);
    AFS_GUNLOCK();

    if (code) {
	code = afs_CheckCode(code, areq, 11);	/* failed to get it */
	afs_warn("afs_ReadNoCache Failed to verify VCache!\n");
	goto cleanup;
    }

    bparms->areq = areq;

    /* and queue this one */
    bcnt = 1;
    AFS_GLOCK();
    while(bcnt < 20) {
	breq = afs_BQueue(BOP_FETCH_NOCACHE, avc, B_DONTWAIT, 0, acred, 1, 1,
			  bparms, (void *)0, (void *)0);
	if(breq != 0) {
	    code = 0;
	    break;
	}
	afs_osi_Wait(10 * bcnt, 0, 0);
    }
    AFS_GUNLOCK();

    if(!breq) {
    	code = EBUSY;
	goto cleanup;
    }

    return code;

cleanup:
    /* If there's a problem before we queue the request, we need to
     * do everything that would normally happen when the request was
     * processed, like unlocking the pages and freeing memory.
     */
    unlock_and_release_pages(bparms->auio);
    osi_Free(areq, sizeof(struct vrequest));
    osi_Free(bparms->auio->uio_iov,
	     bparms->auio->uio_iovcnt * sizeof(struct iovec));
    osi_Free(bparms->auio, sizeof(struct uio));
    osi_Free(bparms, sizeof(struct nocache_read_request));
    return code;
}
Exemple #7
0
/*
 * afs_TruncateAllSegments
 *
 * Description:
 *	Truncate a cache file.
 *
 * Parameters:
 *	avc  : Ptr to vcache entry to truncate.
 *	alen : Number of bytes to make the file.
 *	areq : Ptr to request structure.
 *
 * Environment:
 *	Called with avc write-locked; in VFS40 systems, pvnLock is also
 *	held.
 */
int
afs_TruncateAllSegments(struct vcache *avc, afs_size_t alen,
			struct vrequest *areq, afs_ucred_t *acred)
{
    struct dcache *tdc;
    afs_int32 code;
    afs_int32 index;
    afs_size_t newSize;

    int dcCount, dcPos;
    struct dcache **tdcArray = NULL;

    AFS_STATCNT(afs_TruncateAllSegments);
    avc->f.m.Date = osi_Time();
    afs_Trace3(afs_iclSetp, CM_TRACE_TRUNCALL, ICL_TYPE_POINTER, avc,
	       ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length),
	       ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(alen));
    if (alen >= avc->f.m.Length) {
	/*
	 * Special speedup since Sun's vm extends the file this way;
	 * we've never written to the file thus we can just set the new
	 * length and avoid the needless calls below.
	 * Also used for ftruncate calls which can extend the file.
	 * To completely minimize the possible extra StoreMini RPC, we really
	 * should keep the ExtendedPos as well and clear this flag if we
	 * truncate below that value before we store the file back.
	 */
	avc->f.states |= CExtendedFile;
	avc->f.m.Length = alen;
	return 0;
    }
#if	(defined(AFS_SUN5_ENV))

    /* Zero unused portion of last page */
    osi_VM_PreTruncate(avc, alen, acred);

#endif

#if	(defined(AFS_SUN5_ENV))
    ObtainWriteLock(&avc->vlock, 546);
    avc->activeV++;		/* Block new getpages */
    ReleaseWriteLock(&avc->vlock);
#endif

    ReleaseWriteLock(&avc->lock);
    AFS_GUNLOCK();

    /* Flush pages beyond end-of-file. */
    osi_VM_Truncate(avc, alen, acred);

    AFS_GLOCK();
    ObtainWriteLock(&avc->lock, 79);

    avc->f.m.Length = alen;

    if (alen < avc->f.truncPos)
	avc->f.truncPos = alen;
    code = DVHash(&avc->f.fid);

    /* block out others from screwing with this table */
    ObtainWriteLock(&afs_xdcache, 287);

    dcCount = 0;
    for (index = afs_dvhashTbl[code]; index != NULLIDX;) {
	if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
	    tdc = afs_GetValidDSlot(index);
	    if (!tdc) {
		ReleaseWriteLock(&afs_xdcache);
		code = EIO;
		goto done;
	    }
	    ReleaseReadLock(&tdc->tlock);
	    if (!FidCmp(&tdc->f.fid, &avc->f.fid))
		dcCount++;
	    afs_PutDCache(tdc);
	}
	index = afs_dvnextTbl[index];
    }

    /* Now allocate space where we can save those dcache entries, and
     * do a second pass over them..  Since we're holding xdcache, it
     * shouldn't be changing.
     */
    tdcArray = osi_Alloc(dcCount * sizeof(struct dcache *));
    dcPos = 0;

    for (index = afs_dvhashTbl[code]; index != NULLIDX; index = afs_dvnextTbl[index]) {
	if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
	    tdc = afs_GetValidDSlot(index);
	    if (!tdc) {
		/* make sure we put back all of the tdcArray members before
		 * bailing out */
		/* remember, the last valid tdc is at dcPos-1, so start at
		 * dcPos-1, not at dcPos itself. */
		for (dcPos = dcPos - 1; dcPos >= 0; dcPos--) {
		    tdc = tdcArray[dcPos];
		    afs_PutDCache(tdc);
		}
		code = EIO;
		goto done;
	    }
	    ReleaseReadLock(&tdc->tlock);
	    if (!FidCmp(&tdc->f.fid, &avc->f.fid)) {
		/* same file, and modified, we'll store it back */
		if (dcPos < dcCount) {
		    tdcArray[dcPos++] = tdc;
		} else {
		    afs_PutDCache(tdc);
		}
	    } else {
		afs_PutDCache(tdc);
	    }
	}
    }

    ReleaseWriteLock(&afs_xdcache);

    /* Now we loop over the array of dcache entries and truncate them */
    for (index = 0; index < dcPos; index++) {
	struct osi_file *tfile;

	tdc = tdcArray[index];

	newSize = alen - AFS_CHUNKTOBASE(tdc->f.chunk);
	if (newSize < 0)
	    newSize = 0;
	ObtainSharedLock(&tdc->lock, 672);
	if (newSize < tdc->f.chunkBytes && newSize < MAX_AFS_UINT32) {
	    UpgradeSToWLock(&tdc->lock, 673);
	    tdc->f.states |= DWriting;
	    tfile = afs_CFileOpen(&tdc->f.inode);
	    afs_CFileTruncate(tfile, (afs_int32)newSize);
	    afs_CFileClose(tfile);
	    afs_AdjustSize(tdc, (afs_int32)newSize);
	    if (alen < tdc->validPos) {
                if (alen < AFS_CHUNKTOBASE(tdc->f.chunk))
                    tdc->validPos = 0;
                else
                    tdc->validPos = alen;
            }
	    ConvertWToSLock(&tdc->lock);
	}
	ReleaseSharedLock(&tdc->lock);
	afs_PutDCache(tdc);
    }

    code = 0;

 done:
    if (tdcArray) {
	osi_Free(tdcArray, dcCount * sizeof(struct dcache *));
    }
#if	(defined(AFS_SUN5_ENV))
    ObtainWriteLock(&avc->vlock, 547);
    if (--avc->activeV == 0 && (avc->vstates & VRevokeWait)) {
	avc->vstates &= ~VRevokeWait;
	afs_osi_Wakeup((char *)&avc->vstates);
    }
    ReleaseWriteLock(&avc->vlock);
#endif

    return code;
}
Exemple #8
0
int
afs_InvalidateAllSegments(struct vcache *avc)
{
    struct dcache *tdc;
    afs_int32 hash;
    afs_int32 index;
    struct dcache **dcList;
    int i, dcListMax, dcListCount;

    AFS_STATCNT(afs_InvalidateAllSegments);
    afs_Trace2(afs_iclSetp, CM_TRACE_INVALL, ICL_TYPE_POINTER, avc,
	       ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
    hash = DVHash(&avc->f.fid);
    avc->f.truncPos = AFS_NOTRUNC;	/* don't truncate later */
    avc->f.states &= ~CExtendedFile;	/* not any more */
    ObtainWriteLock(&afs_xcbhash, 459);
    afs_DequeueCallback(avc);
    avc->f.states &= ~(CStatd | CDirty);	/* mark status information as bad, too */
    ReleaseWriteLock(&afs_xcbhash);
    if (avc->f.fid.Fid.Vnode & 1 || (vType(avc) == VDIR))
	osi_dnlc_purgedp(avc);
    /* Blow away pages; for now, only for Solaris */
#if	(defined(AFS_SUN5_ENV))
    if (WriteLocked(&avc->lock))
	osi_ReleaseVM(avc, (afs_ucred_t *)0);
#endif
    /*
     * Block out others from screwing with this table; is a read lock
     * sufficient?
     */
    ObtainWriteLock(&afs_xdcache, 286);
    dcListMax = 0;

    for (index = afs_dvhashTbl[hash]; index != NULLIDX;) {
	if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
	    tdc = afs_GetValidDSlot(index);
	    if (!tdc) {
		/* In the case of fatal errors during stores, we MUST
		 * invalidate all of the relevant chunks. Otherwise, the chunks
		 * will be left with the 'new' data that was never successfully
		 * written to the server, but the DV in the dcache is still the
		 * old DV. So, we may indefinitely serve data to applications
		 * that is not actually in the file on the fileserver. If we
		 * cannot afs_GetValidDSlot the appropriate entries, currently
		 * there is no way to ensure the dcache is invalidated. So for
		 * now, to avoid risking serving bad data from the cache, panic
		 * instead. */
		osi_Panic("afs_InvalidateAllSegments tdc count");
	    }
	    ReleaseReadLock(&tdc->tlock);
	    if (!FidCmp(&tdc->f.fid, &avc->f.fid))
		dcListMax++;
	    afs_PutDCache(tdc);
	}
	index = afs_dvnextTbl[index];
    }

    dcList = osi_Alloc(dcListMax * sizeof(struct dcache *));
    dcListCount = 0;

    for (index = afs_dvhashTbl[hash]; index != NULLIDX;) {
	if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
	    tdc = afs_GetValidDSlot(index);
	    if (!tdc) {
		/* We cannot proceed after getting this error; we risk serving
		 * incorrect data to applications. So panic instead. See the
		 * above comment next to the previous afs_GetValidDSlot call
		 * for details. */
		osi_Panic("afs_InvalidateAllSegments tdc store");
	    }
	    ReleaseReadLock(&tdc->tlock);
	    if (!FidCmp(&tdc->f.fid, &avc->f.fid)) {
		/* same file? we'll zap it */
		if (afs_indexFlags[index] & IFDataMod) {
		    afs_stats_cmperf.cacheCurrDirtyChunks--;
		    /* don't write it back */
		    afs_indexFlags[index] &= ~IFDataMod;
		}
		afs_indexFlags[index] &= ~IFAnyPages;
		if (dcListCount < dcListMax)
		    dcList[dcListCount++] = tdc;
		else
		    afs_PutDCache(tdc);
	    } else {
		afs_PutDCache(tdc);
	    }
	}
	index = afs_dvnextTbl[index];
    }
    ReleaseWriteLock(&afs_xdcache);

    for (i = 0; i < dcListCount; i++) {
	tdc = dcList[i];

	ObtainWriteLock(&tdc->lock, 679);
	ZapDCE(tdc);
	if (vType(avc) == VDIR)
	    DZap(tdc);
	ReleaseWriteLock(&tdc->lock);
	afs_PutDCache(tdc);
    }

    osi_Free(dcList, dcListMax * sizeof(struct dcache *));

    return 0;
}
Exemple #9
0
static long
CallSimultaneously(u_int threads, opaque rock, long (*proc)(int, opaque))
{
    long code;
    int i;
#ifdef AFS_PTHREAD_ENV
    pthread_once(&workerOnce, WorkerInit);
#endif

    workers = 0;
    for (i = 0; i < threads; i++) {
	struct worker *w;
#ifdef AFS_PTHREAD_ENV
	pthread_t pid;
#else
	PROCESS pid;
#endif
	assert(i < MAX_CTHREADS);
	w = (struct worker *)osi_Alloc(sizeof(struct worker));
	memset(w, 0, sizeof(*w));
	w->next = workers;
	workers = w;
	w->index = i;
	w->exitCode = RXKST_PROCESSRUNNING;
	w->rock = rock;
	w->proc = proc;
#ifdef AFS_PTHREAD_ENV
	{
	    pthread_attr_t tattr;

	    code = pthread_attr_init(&tattr);
	    if (code) {
		afs_com_err(whoami, code,
			"can't pthread_attr_init worker process");
		return code;
	    }

	    code =
		pthread_attr_setdetachstate(&tattr, PTHREAD_CREATE_DETACHED);
	    if (code) {
		afs_com_err(whoami, code,
			"can't pthread_attr_setdetachstate worker process");
		return code;
	    }

	    code = pthread_create(&pid, &tattr, DoWorker, (void *)w);
	}
#else
	code =
	    LWP_CreateProcess(DoWorker, 16000, LWP_NORMAL_PRIORITY,
			      (opaque) w, "Worker Process", &pid);
#endif
	if (code) {
	    afs_com_err(whoami, code, "can't create worker process");
	    return code;
	}
    }
    code = 0;			/* last non-zero code encountered */
#ifdef AFS_PTHREAD_ENV
    pthread_mutex_lock(&workerLock);
#endif
    while (workers) {
	struct worker *w, *prevW, *nextW;
	prevW = 0;
	for (w = workers; w; w = nextW) {
	    nextW = w->next;
	    if (w->exitCode != RXKST_PROCESSRUNNING) {
		if (w->exitCode) {
		    if (code == 0)
			code = w->exitCode;
		}
		if (prevW)
		    prevW->next = w->next;
		else
		    workers = w->next;
		osi_Free(w, sizeof(*w));
		continue;	/* don't bump prevW */
	    }
	    prevW = w;
	}
#ifdef AFS_PTHREAD_ENV
	if (workers)
	    pthread_cond_wait(&workerCV, &workerLock);
#else
	if (workers)
	    LWP_WaitProcess(&workers);
#endif
    }
#ifdef AFS_PTHREAD_ENV
    pthread_mutex_unlock(&workerLock);
#endif
    return code;
}
Exemple #10
0
int
afs_InvalidateAllSegments(struct vcache *avc)
{
    struct dcache *tdc;
    afs_int32 hash;
    afs_int32 index;
    struct dcache **dcList;
    int i, dcListMax, dcListCount;

    AFS_STATCNT(afs_InvalidateAllSegments);
    afs_Trace2(afs_iclSetp, CM_TRACE_INVALL, ICL_TYPE_POINTER, avc,
	       ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
    hash = DVHash(&avc->f.fid);
    avc->f.truncPos = AFS_NOTRUNC;	/* don't truncate later */
    avc->f.states &= ~CExtendedFile;	/* not any more */
    ObtainWriteLock(&afs_xcbhash, 459);
    afs_DequeueCallback(avc);
    avc->f.states &= ~(CStatd | CDirty);	/* mark status information as bad, too */
    ReleaseWriteLock(&afs_xcbhash);
    if (avc->f.fid.Fid.Vnode & 1 || (vType(avc) == VDIR))
	osi_dnlc_purgedp(avc);
    /* Blow away pages; for now, only for Solaris */
#if	(defined(AFS_SUN5_ENV))
    if (WriteLocked(&avc->lock))
	osi_ReleaseVM(avc, (afs_ucred_t *)0);
#endif
    /*
     * Block out others from screwing with this table; is a read lock
     * sufficient?
     */
    ObtainWriteLock(&afs_xdcache, 286);
    dcListMax = 0;

    for (index = afs_dvhashTbl[hash]; index != NULLIDX;) {
	if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
	    tdc = afs_GetDSlot(index, 0);
	    ReleaseReadLock(&tdc->tlock);
	    if (!FidCmp(&tdc->f.fid, &avc->f.fid))
		dcListMax++;
	    afs_PutDCache(tdc);
	}
	index = afs_dvnextTbl[index];
    }

    dcList = osi_Alloc(dcListMax * sizeof(struct dcache *));
    dcListCount = 0;

    for (index = afs_dvhashTbl[hash]; index != NULLIDX;) {
	if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
	    tdc = afs_GetDSlot(index, 0);
	    ReleaseReadLock(&tdc->tlock);
	    if (!FidCmp(&tdc->f.fid, &avc->f.fid)) {
		/* same file? we'll zap it */
		if (afs_indexFlags[index] & IFDataMod) {
		    afs_stats_cmperf.cacheCurrDirtyChunks--;
		    /* don't write it back */
		    afs_indexFlags[index] &= ~IFDataMod;
		}
		afs_indexFlags[index] &= ~IFAnyPages;
		if (dcListCount < dcListMax)
		    dcList[dcListCount++] = tdc;
		else
		    afs_PutDCache(tdc);
	    } else {
		afs_PutDCache(tdc);
	    }
	}
	index = afs_dvnextTbl[index];
    }
    ReleaseWriteLock(&afs_xdcache);

    for (i = 0; i < dcListCount; i++) {
	tdc = dcList[i];

	ObtainWriteLock(&tdc->lock, 679);
	ZapDCE(tdc);
	if (vType(avc) == VDIR)
	    DZap(tdc);
	ReleaseWriteLock(&tdc->lock);
	afs_PutDCache(tdc);
    }

    osi_Free(dcList, dcListMax * sizeof(struct dcache *));

    return 0;
}
Exemple #11
0
static int
extractPioctlToken(struct tokenJar *token,
		   struct token_opaque *opaque) {
    XDR xdrs;
    struct ktc_tokenUnion *pioctlToken;
    int code;

    memset(opaque, 0, sizeof(token_opaque));

    pioctlToken = osi_Alloc(sizeof(struct ktc_tokenUnion));
    if (pioctlToken == NULL)
	return ENOMEM;

    pioctlToken->at_type = token->type;

    switch (token->type) {
      case RX_SECIDX_KAD:
	code = rxkad_extractTokenForPioctl(token, pioctlToken);
	break;
      default:
	code = EINVAL;;
    }

    if (code)
	goto out;

    xdrlen_create(&xdrs);
    if (!xdr_ktc_tokenUnion(&xdrs, pioctlToken)) {
	code = EINVAL;
	xdr_destroy(&xdrs);
	goto out;
    }

    opaque->token_opaque_len = xdr_getpos(&xdrs);
    xdr_destroy(&xdrs);

    opaque->token_opaque_val = osi_Alloc(opaque->token_opaque_len);
    if (opaque->token_opaque_val == NULL) {
	code = ENOMEM;
	goto out;
    }

    xdrmem_create(&xdrs,
		  opaque->token_opaque_val,
		  opaque->token_opaque_len,
		  XDR_ENCODE);
    if (!xdr_ktc_tokenUnion(&xdrs, pioctlToken)) {
	code = EINVAL;
	xdr_destroy(&xdrs);
	goto out;
    }
    xdr_destroy(&xdrs);

out:
    xdr_free((xdrproc_t) xdr_ktc_tokenUnion, &pioctlToken);
    osi_Free(pioctlToken, sizeof(struct ktc_tokenUnion));

    if (code != 0) {
	osi_Free(opaque->token_opaque_val, opaque->token_opaque_len);
	opaque->token_opaque_val = NULL;
	opaque->token_opaque_len = 0;
    }
    return code;
}
Exemple #12
0
/* called with the GLOCK held */
int
afs_syscall_pioctl(char *path, unsigned int com, caddr_t cmarg, int follow)
{
    cred_t *credp = crref();	/* don't free until done! */
    struct afs_ioctl data;
    struct clientcred ccred;
    struct rmtbulk idata, odata;
    short in_size, out_size;
    afs_int32 code = 0, pag, err;
    gid_t g0, g1;
    char *abspath, *pathbuf = 0;

    AFS_STATCNT(afs_syscall_pioctl);
    if (follow)
	follow = 1;		/* compat. with old venus */
    code = copyin_afs_ioctl(cmarg, &data);
    if (code) goto out;

    if ((com & 0xff) == 90) {
	/* PSetClientContext, in any space */
	code = EINVAL;
	goto out;
    }

    /* Special handling for a few pioctls */
    switch (com & 0xffff) {
	case (0x5600 |  3): /* VIOCSETTOK */
	    code = afspag_PSetTokens(data.in, data.in_size, &credp);
	    if (code) goto out;
	    break;

	case (0x5600 |  9): /* VIOCUNLOG */
	case (0x5600 | 21): /* VIOCUNPAG */
	    code = afspag_PUnlog(data.in, data.in_size, &credp);
	    if (code) goto out;
	    break;

	case (0x5600 | 38): /* VIOC_AFS_SYSNAME */
	    code = afspag_PSetSysName(data.in, data.in_size, &credp);
	    if (code) goto out;
	    break;
    }

    /* Set up credentials */
    memset(&ccred, 0, sizeof(ccred));
    pag = PagInCred(credp);
    ccred.uid = afs_cr_uid(credp);
    if (pag != NOPAG) {
	 afs_get_groups_from_pag(pag, &g0, &g1);
	 ccred.group0 = g0;
	 ccred.group1 = g1;
    }

    /*
     * Copy the path and convert to absolute, if one was given.
     * NB: We can only use osI_AllocLargeSpace here as long as
     * RMTSYS_MAXPATHLEN is less than AFS_LRALLOCSIZ.
     */
    if (path) {
	pathbuf = osi_AllocLargeSpace(RMTSYS_MAXPATHLEN);
	if (!pathbuf) {
	    code = ENOMEM;
	    goto out;
	}
	code = osi_abspath(path, pathbuf, RMTSYS_MAXPATHLEN, 0, &abspath);
	if (code)
	    goto out_path;
    } else {
	abspath = NIL_PATHP;
    }

    /* Allocate, copy, and convert incoming data */
    idata.rmtbulk_len = in_size = data.in_size;
    if (in_size  < 0 || in_size  > MAXBUFFERLEN) {
	code = EINVAL;
	goto out_path;
    }
    if (in_size > AFS_LRALLOCSIZ)
	 idata.rmtbulk_val = osi_Alloc(in_size);
    else
	 idata.rmtbulk_val = osi_AllocLargeSpace(AFS_LRALLOCSIZ);
    if (!idata.rmtbulk_val) {
	code = ENOMEM;
	goto out_path;
    }
    if (in_size) {
	AFS_COPYIN(data.in, idata.rmtbulk_val, in_size, code);
	if (code)
	    goto out_idata;
	inparam_conversion(com, idata.rmtbulk_val, in_size, 0);
    }

    /* Allocate space for outgoing data */
    odata.rmtbulk_len = out_size = data.out_size;
    if (out_size < 0 || out_size > MAXBUFFERLEN) {
	code = EINVAL;
	goto out_idata;
    }
    if (out_size > AFS_LRALLOCSIZ)
	 odata.rmtbulk_val = osi_Alloc(out_size);
    else
	 odata.rmtbulk_val = osi_AllocLargeSpace(AFS_LRALLOCSIZ);
    if (!odata.rmtbulk_val) {
	code = ENOMEM;
	goto out_idata;
    }

    AFS_GUNLOCK();
    code = RMTSYS_Pioctl(rmtsys_conn, &ccred, abspath, com, follow,
			 &idata, &odata, &err);
    AFS_GLOCK();
    if (code)
	goto out_odata;

    /* Convert and copy out the result */
    if (odata.rmtbulk_len > out_size) {
	code = E2BIG;
	goto out_odata;
    }
    if (odata.rmtbulk_len) {
	outparam_conversion(com, odata.rmtbulk_val, odata.rmtbulk_len, 1);
	AFS_COPYOUT(odata.rmtbulk_val, data.out, odata.rmtbulk_len, code);
    }
    if (!code)
	code = err;

out_odata:
    if (out_size > AFS_LRALLOCSIZ)
	osi_Free(odata.rmtbulk_val, out_size);
    else
	osi_FreeLargeSpace(odata.rmtbulk_val);

out_idata:
    if (in_size > AFS_LRALLOCSIZ)
	osi_Free(idata.rmtbulk_val, in_size);
    else
	osi_FreeLargeSpace(idata.rmtbulk_val);

out_path:
    if (path)
	osi_FreeLargeSpace(pathbuf);

out:
    crfree(credp);
#if defined(KERNEL_HAVE_UERROR)
    if (!getuerror())
	setuerror(code);
    return (getuerror());
#else
    return (code);
#endif
}