示例#1
0
struct rx_securityClass *
rxkad_NewServerSecurityObject(rxkad_level level, void *get_key_rock,
			      int (*get_key) (void *get_key_rock, int kvno,
					      struct ktc_encryptionKey *
					      serverKey),
			      int (*user_ok) (char *name, char *instance,
					      char *cell, afs_int32 kvno))
{
    struct rx_securityClass *tsc;
    struct rxkad_sprivate *tsp;
    int size;

    if (!get_key)
	return 0;

    size = sizeof(struct rx_securityClass);
    tsc = (struct rx_securityClass *)osi_Alloc(size);
    memset(tsc, 0, size);
    tsc->refCount = 1;		/* caller has one reference */
    tsc->ops = &rxkad_server_ops;
    size = sizeof(struct rxkad_sprivate);
    tsp = (struct rxkad_sprivate *)osi_Alloc(size);
    memset(tsp, 0, size);
    tsc->privateData = (char *)tsp;

    tsp->type |= rxkad_server;	/* so can identify later */
    tsp->level = level;		/* level of encryption */
    tsp->get_key_rock = get_key_rock;
    tsp->get_key = get_key;	/* to get server ticket */
    tsp->user_ok = user_ok;	/* to inform server of client id. */
    init_random_int32();

    INC_RXKAD_STATS(serverObjects);
    return tsc;
}
示例#2
0
struct multi_handle *
multi_Init(struct rx_connection **conns, int nConns)
{
    struct rx_call **calls;
    short *ready;
    struct multi_handle *mh;
    int i;

    /*
     * Note: all structures that are possibly referenced by other
     * processes must be allocated.  In some kernels variables allocated on
     * a process stack will not be accessible to other processes
     */

    calls = (struct rx_call **)osi_Alloc(sizeof(struct rx_call *) * nConns);
    ready = (short *)osi_Alloc(sizeof(short *) * nConns);
    mh = (struct multi_handle *)osi_Alloc(sizeof(struct multi_handle));
    if (!calls || !ready || !mh)
	osi_Panic("multi_Rx: no mem\n");
    memset(mh, 0, sizeof(struct multi_handle));
    mh->calls = calls;
    mh->nextReady = mh->firstNotReady = mh->ready = ready;
    mh->nReady = 0;
    mh->nConns = nConns;

#ifdef RX_ENABLE_LOCKS
    MUTEX_INIT(&mh->lock, "rx_multi_lock", MUTEX_DEFAULT, 0);
    CV_INIT(&mh->cv, "rx_multi_cv", CV_DEFAULT, 0);
#endif /* RX_ENABLE_LOCKS */
    for (i = 0; i < nConns; i++) {
	struct rx_call *call;
	call = mh->calls[i] = rx_NewCall(conns[i]);
	rx_SetArrivalProc(call, multi_Ready, (void *) mh, i);
    }
    return mh;
}
示例#3
0
static long
DoClient(int index, opaque rock)
{
    struct client *c = (struct client *)rock;
    long code = 0;
    int i;
    u_long n, inc_n;

    n = 95678;
    for (i = 0; i < c->fastCalls[index]; i++) {
	code = RXKST_Fast(c->conn, n, &inc_n);
	if (code)
	    return (code);
	if (n + 1 != inc_n)
	    return RXKST_INCFAILED;
	n++;
    }

    for (i = 0; i < c->slowCalls[index]; i++) {
	u_long ntime;
	u_long now;
	code = RXKST_Slow(c->conn, 1, &ntime);
	if (code)
	    return (code);
	now = FT_ApproxTime();
	if ((ntime < now - maxSkew) || (ntime > now + maxSkew))
	    return RXKST_TIMESKEW;
    }

    if (c->copiousCalls[index] > 0) {
	u_long buflen = 10000;
	char *buf = osi_Alloc(buflen);
	for (i = 0; i < c->copiousCalls[index]; i++) {
	    code = Copious(c, buf, buflen);
	    if (code)
		break;
	}
	osi_Free(buf, buflen);
	if (code)
	    return code;
    }
    return 0;
}
示例#4
0
/* Cannot have static linkage--called from BPrefetch (afs_daemons) */
afs_int32
afs_PrefetchNoCache(struct vcache *avc,
		    afs_ucred_t *acred,
		    struct nocache_read_request *bparms)
{
    struct uio *auio;
#ifndef UKERNEL
    struct iovec *iovecp;
#endif
    struct vrequest *areq;
    afs_int32 code = 0;
    struct rx_connection *rxconn;
#ifdef AFS_64BIT_CLIENT
    afs_int32 length_hi, bytes, locked;
#endif

    struct afs_conn *tc;
    struct rx_call *tcall;
    struct tlocal1 {
	struct AFSVolSync tsync;
	struct AFSFetchStatus OutStatus;
	struct AFSCallBack CallBack;
    };
    struct tlocal1 *tcallspec;

    auio = bparms->auio;
    areq = bparms->areq;
#ifndef UKERNEL
    iovecp = auio->uio_iov;
#endif

    tcallspec = osi_Alloc(sizeof(struct tlocal1));
    do {
	tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK /* ignored */, &rxconn);
	if (tc) {
	    avc->callback = tc->parent->srvr->server;
	    tcall = rx_NewCall(rxconn);
#ifdef AFS_64BIT_CLIENT
	    if (!afs_serverHasNo64Bit(tc)) {
		code = StartRXAFS_FetchData64(tcall,
					      (struct AFSFid *) &avc->f.fid.Fid,
					      auio->uio_offset,
					      bparms->length);
		if (code == 0) {
		    COND_GUNLOCK(locked);
		    bytes = rx_Read(tcall, (char *)&length_hi,
				    sizeof(afs_int32));
		    COND_RE_GLOCK(locked);

		    if (bytes != sizeof(afs_int32)) {
			length_hi = 0;
			code = rx_Error(tcall);
			COND_GUNLOCK(locked);
			code = rx_EndCall(tcall, code);
			COND_RE_GLOCK(locked);
			tcall = NULL;
		    }
		}
	    } /* afs_serverHasNo64Bit */
	    if (code == RXGEN_OPCODE || afs_serverHasNo64Bit(tc)) {
		if (auio->uio_offset > 0x7FFFFFFF) {
		    code = EFBIG;
		} else {
		    afs_int32 pos;
		    pos = auio->uio_offset;
		    COND_GUNLOCK(locked);
		    if (!tcall)
			tcall = rx_NewCall(rxconn);
		    code = StartRXAFS_FetchData(tcall,
					(struct AFSFid *) &avc->f.fid.Fid,
					pos, bparms->length);
		    COND_RE_GLOCK(locked);
		}
		afs_serverSetNo64Bit(tc);
	    }
#else
	    code = StartRXAFS_FetchData(tcall,
			                (struct AFSFid *) &avc->f.fid.Fid,
					auio->uio_offset, bparms->length);
#endif
	    if (code == 0) {
		code = afs_NoCacheFetchProc(tcall, avc, auio,
				            1 /* release_pages */,
					    bparms->length);
	    } else {
		afs_warn("BYPASS: StartRXAFS_FetchData failed: %d\n", code);
		unlock_and_release_pages(auio);
		goto done;
	    }
	    if (code == 0) {
		code = EndRXAFS_FetchData(tcall, &tcallspec->OutStatus,
					  &tcallspec->CallBack,
					  &tcallspec->tsync);
	    } else {
		afs_warn("BYPASS: NoCacheFetchProc failed: %d\n", code);
	    }
	    code = rx_EndCall(tcall, code);
	} else {
	    afs_warn("BYPASS: No connection.\n");
	    code = -1;
	    unlock_and_release_pages(auio);
	    goto done;
	}
    } while (afs_Analyze(tc, rxconn, code, &avc->f.fid, areq,
						 AFS_STATS_FS_RPCIDX_FETCHDATA,
						 SHARED_LOCK,0));
done:
    /*
     * Copy appropriate fields into vcache
     */

    if (!code)
	afs_ProcessFS(avc, &tcallspec->OutStatus, areq);

    osi_Free(areq, sizeof(struct vrequest));
    osi_Free(tcallspec, sizeof(struct tlocal1));
    osi_Free(bparms, sizeof(struct nocache_read_request));
#ifndef UKERNEL
    /* in UKERNEL, the "pages" are passed in */
    osi_Free(iovecp, auio->uio_iovcnt * sizeof(struct iovec));
    osi_Free(auio, sizeof(struct uio));
#endif
    return code;
}
示例#5
0
/* dispatch a no-cache read request */
afs_int32
afs_ReadNoCache(struct vcache *avc,
		struct nocache_read_request *bparms,
		afs_ucred_t *acred)
{
    afs_int32 code;
    afs_int32 bcnt;
    struct brequest *breq;
    struct vrequest *areq;

    /* the reciever will free this */
    areq = osi_Alloc(sizeof(struct vrequest));

    if (avc->vc_error) {
	code = EIO;
	afs_warn("afs_ReadNoCache VCache Error!\n");
	goto cleanup;
    }
    if ((code = afs_InitReq(areq, acred))) {
	afs_warn("afs_ReadNoCache afs_InitReq error!\n");
	goto cleanup;
    }

    AFS_GLOCK();
    code = afs_VerifyVCache(avc, areq);
    AFS_GUNLOCK();

    if (code) {
	code = afs_CheckCode(code, areq, 11);	/* failed to get it */
	afs_warn("afs_ReadNoCache Failed to verify VCache!\n");
	goto cleanup;
    }

    bparms->areq = areq;

    /* and queue this one */
    bcnt = 1;
    AFS_GLOCK();
    while(bcnt < 20) {
	breq = afs_BQueue(BOP_FETCH_NOCACHE, avc, B_DONTWAIT, 0, acred, 1, 1,
			  bparms, (void *)0, (void *)0);
	if(breq != 0) {
	    code = 0;
	    break;
	}
	afs_osi_Wait(10 * bcnt, 0, 0);
    }
    AFS_GUNLOCK();

    if(!breq) {
    	code = EBUSY;
	goto cleanup;
    }

    return code;

cleanup:
    /* If there's a problem before we queue the request, we need to
     * do everything that would normally happen when the request was
     * processed, like unlocking the pages and freeing memory.
     */
    unlock_and_release_pages(bparms->auio);
    osi_Free(areq, sizeof(struct vrequest));
    osi_Free(bparms->auio->uio_iov,
	     bparms->auio->uio_iovcnt * sizeof(struct iovec));
    osi_Free(bparms->auio, sizeof(struct uio));
    osi_Free(bparms, sizeof(struct nocache_read_request));
    return code;
}
示例#6
0
/*
 * afs_TruncateAllSegments
 *
 * Description:
 *	Truncate a cache file.
 *
 * Parameters:
 *	avc  : Ptr to vcache entry to truncate.
 *	alen : Number of bytes to make the file.
 *	areq : Ptr to request structure.
 *
 * Environment:
 *	Called with avc write-locked; in VFS40 systems, pvnLock is also
 *	held.
 */
int
afs_TruncateAllSegments(struct vcache *avc, afs_size_t alen,
			struct vrequest *areq, afs_ucred_t *acred)
{
    struct dcache *tdc;
    afs_int32 code;
    afs_int32 index;
    afs_size_t newSize;

    int dcCount, dcPos;
    struct dcache **tdcArray = NULL;

    AFS_STATCNT(afs_TruncateAllSegments);
    avc->f.m.Date = osi_Time();
    afs_Trace3(afs_iclSetp, CM_TRACE_TRUNCALL, ICL_TYPE_POINTER, avc,
	       ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length),
	       ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(alen));
    if (alen >= avc->f.m.Length) {
	/*
	 * Special speedup since Sun's vm extends the file this way;
	 * we've never written to the file thus we can just set the new
	 * length and avoid the needless calls below.
	 * Also used for ftruncate calls which can extend the file.
	 * To completely minimize the possible extra StoreMini RPC, we really
	 * should keep the ExtendedPos as well and clear this flag if we
	 * truncate below that value before we store the file back.
	 */
	avc->f.states |= CExtendedFile;
	avc->f.m.Length = alen;
	return 0;
    }
#if	(defined(AFS_SUN5_ENV))

    /* Zero unused portion of last page */
    osi_VM_PreTruncate(avc, alen, acred);

#endif

#if	(defined(AFS_SUN5_ENV))
    ObtainWriteLock(&avc->vlock, 546);
    avc->activeV++;		/* Block new getpages */
    ReleaseWriteLock(&avc->vlock);
#endif

    ReleaseWriteLock(&avc->lock);
    AFS_GUNLOCK();

    /* Flush pages beyond end-of-file. */
    osi_VM_Truncate(avc, alen, acred);

    AFS_GLOCK();
    ObtainWriteLock(&avc->lock, 79);

    avc->f.m.Length = alen;

    if (alen < avc->f.truncPos)
	avc->f.truncPos = alen;
    code = DVHash(&avc->f.fid);

    /* block out others from screwing with this table */
    ObtainWriteLock(&afs_xdcache, 287);

    dcCount = 0;
    for (index = afs_dvhashTbl[code]; index != NULLIDX;) {
	if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
	    tdc = afs_GetValidDSlot(index);
	    if (!tdc) {
		ReleaseWriteLock(&afs_xdcache);
		code = EIO;
		goto done;
	    }
	    ReleaseReadLock(&tdc->tlock);
	    if (!FidCmp(&tdc->f.fid, &avc->f.fid))
		dcCount++;
	    afs_PutDCache(tdc);
	}
	index = afs_dvnextTbl[index];
    }

    /* Now allocate space where we can save those dcache entries, and
     * do a second pass over them..  Since we're holding xdcache, it
     * shouldn't be changing.
     */
    tdcArray = osi_Alloc(dcCount * sizeof(struct dcache *));
    dcPos = 0;

    for (index = afs_dvhashTbl[code]; index != NULLIDX; index = afs_dvnextTbl[index]) {
	if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
	    tdc = afs_GetValidDSlot(index);
	    if (!tdc) {
		/* make sure we put back all of the tdcArray members before
		 * bailing out */
		/* remember, the last valid tdc is at dcPos-1, so start at
		 * dcPos-1, not at dcPos itself. */
		for (dcPos = dcPos - 1; dcPos >= 0; dcPos--) {
		    tdc = tdcArray[dcPos];
		    afs_PutDCache(tdc);
		}
		code = EIO;
		goto done;
	    }
	    ReleaseReadLock(&tdc->tlock);
	    if (!FidCmp(&tdc->f.fid, &avc->f.fid)) {
		/* same file, and modified, we'll store it back */
		if (dcPos < dcCount) {
		    tdcArray[dcPos++] = tdc;
		} else {
		    afs_PutDCache(tdc);
		}
	    } else {
		afs_PutDCache(tdc);
	    }
	}
    }

    ReleaseWriteLock(&afs_xdcache);

    /* Now we loop over the array of dcache entries and truncate them */
    for (index = 0; index < dcPos; index++) {
	struct osi_file *tfile;

	tdc = tdcArray[index];

	newSize = alen - AFS_CHUNKTOBASE(tdc->f.chunk);
	if (newSize < 0)
	    newSize = 0;
	ObtainSharedLock(&tdc->lock, 672);
	if (newSize < tdc->f.chunkBytes && newSize < MAX_AFS_UINT32) {
	    UpgradeSToWLock(&tdc->lock, 673);
	    tdc->f.states |= DWriting;
	    tfile = afs_CFileOpen(&tdc->f.inode);
	    afs_CFileTruncate(tfile, (afs_int32)newSize);
	    afs_CFileClose(tfile);
	    afs_AdjustSize(tdc, (afs_int32)newSize);
	    if (alen < tdc->validPos) {
                if (alen < AFS_CHUNKTOBASE(tdc->f.chunk))
                    tdc->validPos = 0;
                else
                    tdc->validPos = alen;
            }
	    ConvertWToSLock(&tdc->lock);
	}
	ReleaseSharedLock(&tdc->lock);
	afs_PutDCache(tdc);
    }

    code = 0;

 done:
    if (tdcArray) {
	osi_Free(tdcArray, dcCount * sizeof(struct dcache *));
    }
#if	(defined(AFS_SUN5_ENV))
    ObtainWriteLock(&avc->vlock, 547);
    if (--avc->activeV == 0 && (avc->vstates & VRevokeWait)) {
	avc->vstates &= ~VRevokeWait;
	afs_osi_Wakeup((char *)&avc->vstates);
    }
    ReleaseWriteLock(&avc->vlock);
#endif

    return code;
}
示例#7
0
int
afs_InvalidateAllSegments(struct vcache *avc)
{
    struct dcache *tdc;
    afs_int32 hash;
    afs_int32 index;
    struct dcache **dcList;
    int i, dcListMax, dcListCount;

    AFS_STATCNT(afs_InvalidateAllSegments);
    afs_Trace2(afs_iclSetp, CM_TRACE_INVALL, ICL_TYPE_POINTER, avc,
	       ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
    hash = DVHash(&avc->f.fid);
    avc->f.truncPos = AFS_NOTRUNC;	/* don't truncate later */
    avc->f.states &= ~CExtendedFile;	/* not any more */
    ObtainWriteLock(&afs_xcbhash, 459);
    afs_DequeueCallback(avc);
    avc->f.states &= ~(CStatd | CDirty);	/* mark status information as bad, too */
    ReleaseWriteLock(&afs_xcbhash);
    if (avc->f.fid.Fid.Vnode & 1 || (vType(avc) == VDIR))
	osi_dnlc_purgedp(avc);
    /* Blow away pages; for now, only for Solaris */
#if	(defined(AFS_SUN5_ENV))
    if (WriteLocked(&avc->lock))
	osi_ReleaseVM(avc, (afs_ucred_t *)0);
#endif
    /*
     * Block out others from screwing with this table; is a read lock
     * sufficient?
     */
    ObtainWriteLock(&afs_xdcache, 286);
    dcListMax = 0;

    for (index = afs_dvhashTbl[hash]; index != NULLIDX;) {
	if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
	    tdc = afs_GetValidDSlot(index);
	    if (!tdc) {
		/* In the case of fatal errors during stores, we MUST
		 * invalidate all of the relevant chunks. Otherwise, the chunks
		 * will be left with the 'new' data that was never successfully
		 * written to the server, but the DV in the dcache is still the
		 * old DV. So, we may indefinitely serve data to applications
		 * that is not actually in the file on the fileserver. If we
		 * cannot afs_GetValidDSlot the appropriate entries, currently
		 * there is no way to ensure the dcache is invalidated. So for
		 * now, to avoid risking serving bad data from the cache, panic
		 * instead. */
		osi_Panic("afs_InvalidateAllSegments tdc count");
	    }
	    ReleaseReadLock(&tdc->tlock);
	    if (!FidCmp(&tdc->f.fid, &avc->f.fid))
		dcListMax++;
	    afs_PutDCache(tdc);
	}
	index = afs_dvnextTbl[index];
    }

    dcList = osi_Alloc(dcListMax * sizeof(struct dcache *));
    dcListCount = 0;

    for (index = afs_dvhashTbl[hash]; index != NULLIDX;) {
	if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
	    tdc = afs_GetValidDSlot(index);
	    if (!tdc) {
		/* We cannot proceed after getting this error; we risk serving
		 * incorrect data to applications. So panic instead. See the
		 * above comment next to the previous afs_GetValidDSlot call
		 * for details. */
		osi_Panic("afs_InvalidateAllSegments tdc store");
	    }
	    ReleaseReadLock(&tdc->tlock);
	    if (!FidCmp(&tdc->f.fid, &avc->f.fid)) {
		/* same file? we'll zap it */
		if (afs_indexFlags[index] & IFDataMod) {
		    afs_stats_cmperf.cacheCurrDirtyChunks--;
		    /* don't write it back */
		    afs_indexFlags[index] &= ~IFDataMod;
		}
		afs_indexFlags[index] &= ~IFAnyPages;
		if (dcListCount < dcListMax)
		    dcList[dcListCount++] = tdc;
		else
		    afs_PutDCache(tdc);
	    } else {
		afs_PutDCache(tdc);
	    }
	}
	index = afs_dvnextTbl[index];
    }
    ReleaseWriteLock(&afs_xdcache);

    for (i = 0; i < dcListCount; i++) {
	tdc = dcList[i];

	ObtainWriteLock(&tdc->lock, 679);
	ZapDCE(tdc);
	if (vType(avc) == VDIR)
	    DZap(tdc);
	ReleaseWriteLock(&tdc->lock);
	afs_PutDCache(tdc);
    }

    osi_Free(dcList, dcListMax * sizeof(struct dcache *));

    return 0;
}
示例#8
0
/* XXX this does some copying of data in and out of the packet, but I'll bet it
 * could just do it in place, especially if I used rx_Pullup...
 */
int
rxkad_CheckResponse(struct rx_securityClass *aobj,
		    struct rx_connection *aconn, struct rx_packet *apacket)
{
    struct rxkad_sconn *sconn;
    struct rxkad_sprivate *tsp;
    struct ktc_encryptionKey serverKey;
    struct rxkad_oldChallengeResponse oldr;	/* response format */
    struct rxkad_v2ChallengeResponse v2r;
    afs_int32 tlen;		/* ticket len */
    afs_int32 kvno;		/* key version of ticket */
    char tix[MAXKTCTICKETLEN];
    afs_int32 incChallengeID;
    rxkad_level level;
    int code;
    /* ticket contents */
    struct ktc_principal client;
    struct ktc_encryptionKey sessionkey;
    afs_int32 host;
    afs_uint32 start;
    afs_uint32 end;
    unsigned int pos;
    struct rxkad_serverinfo *rock;

    sconn = (struct rxkad_sconn *)aconn->securityData;
    tsp = (struct rxkad_sprivate *)aobj->privateData;

    if (sconn->cksumSeen) {
	/* expect v2 response, leave fields in v2r in network order for cksum
	 * computation which follows decryption. */
	if (rx_GetDataSize(apacket) < sizeof(v2r))
	    return RXKADPACKETSHORT;
	rx_packetread(apacket, 0, sizeof(v2r), &v2r);
	pos = sizeof(v2r);
	/* version == 2 */
	/* ignore spare */
	kvno = ntohl(v2r.kvno);
	tlen = ntohl(v2r.ticketLen);
	if (rx_GetDataSize(apacket) < sizeof(v2r) + tlen)
	    return RXKADPACKETSHORT;
    } else {
	/* expect old format response */
	if (rx_GetDataSize(apacket) < sizeof(oldr))
	    return RXKADPACKETSHORT;
	rx_packetread(apacket, 0, sizeof(oldr), &oldr);
	pos = sizeof(oldr);

	kvno = ntohl(oldr.kvno);
	tlen = ntohl(oldr.ticketLen);
	if (rx_GetDataSize(apacket) != sizeof(oldr) + tlen)
	    return RXKADPACKETSHORT;
    }
    if ((tlen < MINKTCTICKETLEN) || (tlen > MAXKTCTICKETLEN))
	return RXKADTICKETLEN;

    rx_packetread(apacket, pos, tlen, tix);	/* get ticket */

    /*
     * We allow the ticket to be optionally decoded by an alternate
     * ticket decoder, if the function variable
     * rxkad_AlternateTicketDecoder is set. That function should
     * return a code of -1 if it wants the ticket to be decoded by
     * the standard decoder.
     */
    if (rxkad_AlternateTicketDecoder) {
	code =
	    rxkad_AlternateTicketDecoder(kvno, tix, tlen, client.name,
					 client.instance, client.cell,
					 &sessionkey, &host, &start, &end);
	if (code && code != -1) {
	    return code;
	}
    } else {
	code = -1;		/* No alternate ticket decoder present */
    }

    /*
     * If the alternate decoder is not present, or returns -1, then
     * assume the ticket is of the default style.
     */
    if (code == -1 && ((kvno == RXKAD_TKT_TYPE_KERBEROS_V5)
	|| (kvno == RXKAD_TKT_TYPE_KERBEROS_V5_ENCPART_ONLY))) {
	code =
	    tkt_DecodeTicket5(tix, tlen, tsp->get_key, tsp->get_key_rock,
			      kvno, client.name, client.instance, client.cell,
			      &sessionkey, &host, &start, &end, 
			      tsp->flags & RXS_CONFIG_FLAGS_DISABLE_DOTCHECK);
	if (code)
	    return code;
    }

    /*
     * If the alternate decoder/kerberos 5 decoder is not present, or
     * returns -1, then assume the ticket is of the default style.
     */
    if (code == -1) {
	/* get ticket's key */
	code = (*tsp->get_key) (tsp->get_key_rock, kvno, &serverKey);
	if (code)
	    return RXKADUNKNOWNKEY;	/* invalid kvno */
	code =
	    tkt_DecodeTicket(tix, tlen, &serverKey, client.name,
			     client.instance, client.cell, &sessionkey, &host,
			     &start, &end);
	if (code)
	    return code;
    }
    code = tkt_CheckTimes(start, end, time(0));
    if (code == 0) 
	return RXKADNOAUTH;
    else if (code == -1)
	return RXKADEXPIRED;
    else if (code < -1)
	return RXKADBADTICKET;

    code = fc_keysched(&sessionkey, sconn->keysched);
    if (code)
	return RXKADBADKEY;
    memcpy(sconn->ivec, &sessionkey, sizeof(sconn->ivec));

    if (sconn->cksumSeen) {
	/* using v2 response */
	afs_uint32 cksum;	/* observed cksum */
	struct rxkad_endpoint endpoint;	/* connections endpoint */
	int i;
	afs_uint32 xor[2];

	memcpy(xor, sconn->ivec, 2 * sizeof(afs_int32));
	fc_cbc_encrypt(&v2r.encrypted, &v2r.encrypted, sizeof(v2r.encrypted),
		       sconn->keysched, xor, DECRYPT);
	cksum = rxkad_CksumChallengeResponse(&v2r);
	if (cksum != v2r.encrypted.endpoint.cksum)
	    return RXKADSEALEDINCON;
	(void)rxkad_SetupEndpoint(aconn, &endpoint);
	v2r.encrypted.endpoint.cksum = 0;
	if (memcmp(&endpoint, &v2r.encrypted.endpoint, sizeof(endpoint)) != 0)
	    return RXKADSEALEDINCON;
	for (i = 0; i < RX_MAXCALLS; i++) {
	    v2r.encrypted.callNumbers[i] =
		ntohl(v2r.encrypted.callNumbers[i]);
	    if (v2r.encrypted.callNumbers[i] < 0)
		return RXKADSEALEDINCON;
	}

	(void)rxi_SetCallNumberVector(aconn, v2r.encrypted.callNumbers);
	incChallengeID = ntohl(v2r.encrypted.incChallengeID);
	level = ntohl(v2r.encrypted.level);
    } else {
	/* expect old format response */
	fc_ecb_encrypt(&oldr.encrypted, &oldr.encrypted, sconn->keysched,
		       DECRYPT);
	incChallengeID = ntohl(oldr.encrypted.incChallengeID);
	level = ntohl(oldr.encrypted.level);
    }
    if (incChallengeID != sconn->challengeID + 1)
	return RXKADOUTOFSEQUENCE;	/* replay attempt */
    if ((level < sconn->level) || (level > rxkad_crypt))
	return RXKADLEVELFAIL;
    sconn->level = level;
    rxkad_SetLevel(aconn, sconn->level);
    INC_RXKAD_STATS(responses[rxkad_LevelIndex(sconn->level)]);
    /* now compute endpoint-specific info used for computing 16 bit checksum */
    rxkad_DeriveXORInfo(aconn, sconn->keysched, sconn->ivec, sconn->preSeq);

    /* otherwise things are ok */
    sconn->expirationTime = end;
    sconn->authenticated = 1;

    if (tsp->user_ok) {
	code = tsp->user_ok(client.name, client.instance, client.cell, kvno);
	if (code)
	    return RXKADNOAUTH;
    } else {			/* save the info for later retreival */
	int size = sizeof(struct rxkad_serverinfo);
	rock = (struct rxkad_serverinfo *)osi_Alloc(size);
	memset(rock, 0, size);
	rock->kvno = kvno;
	memcpy(&rock->client, &client, sizeof(rock->client));
	sconn->rock = rock;
    }
    return 0;
}
示例#9
0
/* Add the indicated event (function, arg) at the specified clock time */
struct rxevent *
rxevent_Post(struct clock * when, void (*func)(), void *arg, void *arg1)
/* when - When event should happen, in clock (clock.h) units */
{
    struct rxevent *ev, *qe, *qpr;

#ifdef RXDEBUG
    if (Log) {
	struct clock now;

	clock_GetTime(&now);
	fprintf(Log, "%ld.%ld: rxevent_Post(%ld.%ld, %p, %p)\n",
		now.sec, now.usec, when->sec, when->usec, func, arg);
    }
#endif
#if defined(AFS_SGIMP_ENV)
    ASSERT(osi_rxislocked());
#endif

    /*
     * If we're short on free event entries, create a block of new ones and
     * add them to the free queue
     */
    if (queue_IsEmpty(&rxevent_free)) {
	int i;

#if	defined(AFS_AIX32_ENV) && defined(KERNEL)
	ev = (struct rxevent *) rxi_Alloc(sizeof(struct rxevent));
	queue_Append(&rxevent_free, &ev[0]), rxevent_nFree++;
#else
	ev = (struct rxevent *) osi_Alloc(sizeof(struct rxevent) *
					  rxevent_allocUnit);
	xsp = xfreemallocs;
	xfreemallocs = (struct xfreelist *) ev;
	xfreemallocs->next = xsp;
	for (i = 0; i < rxevent_allocUnit; i++)
	    queue_Append(&rxevent_free, &ev[i]), rxevent_nFree++;
#endif
    }
    /* Grab and initialize a new rxevent structure */
    ev = queue_First(&rxevent_free, rxevent);
    queue_Remove(ev);
    rxevent_nFree--;

    /* Record user defined event state */
    ev->eventTime = *when;
    ev->func = func;
    ev->arg = arg;
    ev->arg1 = arg1;
    rxevent_nPosted += 1;	       /* Rather than ++, to shut high-C up
				        * regarding never-set variables */

    /*
     * Locate a slot for the new entry.  The queue is ordered by time, and we
     * assume that a new entry is likely to be greater than a majority of the
     * entries already on the queue (unless there's very few entries on the
     * queue), so we scan it backwards
     */
    for (queue_ScanBackwards(&rxevent_queue, qe, qpr, rxevent)) {
	if (clock_Ge(when, &qe->eventTime)) {
	    queue_InsertAfter(qe, ev);
	    return ev;
	}
    }
    /* The event is to expire earlier than any existing events */
    queue_Prepend(&rxevent_queue, ev);
    if (rxevent_ScheduledEarlierEvent)
	(*rxevent_ScheduledEarlierEvent) ();	/* Notify our external
						 * scheduler */
    return ev;
}
示例#10
0
int
main(int argc, char **argv)
{
    char *hostname;
    struct hostent *hostent;
    afs_uint32 host;
    int logstdout = 0;
    struct rx_connection *conn;
    struct rx_call *call;
    struct rx_peer *peer;
    int err = 0;
    int nCalls = 1, nBytes = 1;
    int bufferSize = 4000000;
    char *buffer;
    char *sendFile = 0;
    int setFD = 0;
    int jumbo = 0;

#if !defined(AFS_NT40_ENV) && !defined(AFS_LINUX20_ENV)
    setlinebuf(stdout);
    rxi_syscallp = test_syscall;
#endif


    argv++;
    argc--;
    while (argc && **argv == '-') {
	if (strcmp(*argv, "-silent") == 0)
	    print = 0;
	if (strcmp(*argv, "-jumbo") == 0)
	    jumbo = 1;
	else if (strcmp(*argv, "-nc") == 0)
	    nCalls = atoi(*++argv), argc--;
	else if (strcmp(*argv, "-nb") == 0)
	    nBytes = atoi(*++argv), argc--;
	else if (strcmp(*argv, "-np") == 0)
	    rx_nPackets = atoi(*++argv), argc--;
	else if (!strcmp(*argv, "-nsf"))
	    rxi_nSendFrags = atoi(*++argv), argc--;
	else if (!strcmp(*argv, "-nrf"))
	    rxi_nRecvFrags = atoi(*++argv), argc--;
	else if (strcmp(*argv, "-twind") == 0)
	    rx_initSendWindow = atoi(*++argv), argc--;
	else if (strcmp(*argv, "-rwind") == 0)
	    rx_initReceiveWindow = atoi(*++argv), argc--;
	else if (strcmp(*argv, "-rxlog") == 0)
	    rxlog = 1;
	else if (strcmp(*argv, "-logstdout") == 0)
	    logstdout = 1;
	else if (strcmp(*argv, "-eventlog") == 0)
	    eventlog = 1;
	else if (strcmp(*argv, "-drop") == 0) {
#ifdef RXDEBUG
	    rx_intentionallyDroppedPacketsPer100 = atoi(*++argv), argc--;
#else
            fprintf(stderr, "ERROR: Compiled without RXDEBUG\n");
#endif
        }
	else if (strcmp(*argv, "-burst") == 0) {
	    burst = atoi(*++argv), argc--;
	    burstTime.sec = atoi(*++argv), argc--;
	    burstTime.usec = atoi(*++argv), argc--;
	} else if (strcmp(*argv, "-retry") == 0) {
	    retryTime.sec = atoi(*++argv), argc--;
	    retryTime.usec = atoi(*++argv), argc--;
	} else if (strcmp(*argv, "-timeout") == 0)
	    timeout = atoi(*++argv), argc--;
	else if (strcmp(*argv, "-fill") == 0)
	    fillPackets++;
	else if (strcmp(*argv, "-file") == 0)
	    sendFile = *++argv, argc--;
	else if (strcmp(*argv, "-timereadvs") == 0)
	    timeReadvs = 1;
	else if (strcmp(*argv, "-wait") == 0) {
	    /* Wait time between calls--to test lastack code */
	    waitTime.sec = atoi(*++argv), argc--;
	    waitTime.usec = atoi(*++argv), argc--;
	} else if (strcmp(*argv, "-compute") == 0) {
	    /* Simulated "compute" time for each call--to test acknowledgement protocol.  This is simulated by doing an iomgr_select:  imperfect, admittedly. */
	    computeTime.sec = atoi(*++argv), argc--;
	    computeTime.usec = atoi(*++argv), argc--;
	} else if (strcmp(*argv, "-fd") == 0) {
	    /* Open at least this many fd's. */
	    setFD = atoi(*++argv), argc--;
	} else {
	    err = 1;
	    break;
	}
	argv++, argc--;
    }
    if (err || argc != 1)
	Quit("usage: rx_ctest [-silent] [-rxlog] [-eventlog] [-nc NCALLS] [-np NPACKETS] hostname");
    hostname = *argv++, argc--;

    if (rxlog || eventlog) {
	if (logstdout)
	    debugFile = stdout;
	else
	    debugFile = fopen("rx_ctest.db", "w");
	if (debugFile == NULL)
	    Quit("Couldn't open rx_ctest.db");
	if (rxlog)
	    rx_debugFile = debugFile;
	if (eventlog)
	    rxevent_debugFile = debugFile;
    }

    signal(SIGINT, intSignal);	/*Changed to sigquit since dbx is broken right now */
#ifndef AFS_NT40_ENV
    signal(SIGQUIT, quitSignal);
#endif

#ifdef AFS_NT40_ENV
    if (afs_winsockInit() < 0) {
	printf("Can't initialize winsock.\n");
	exit(1);
    }
    rx_EnableHotThread();
#endif

    rx_SetUdpBufSize(256 * 1024);

    if (!jumbo)
        rx_SetNoJumbo();

    hostent = gethostbyname(hostname);
    if (!hostent)
	Abort("host %s not found", hostname);
    if (hostent->h_length != 4)
	Abort("host address is disagreeable length (%d)", hostent->h_length);
    memcpy((char *)&host, hostent->h_addr, sizeof(host));
    if (setFD > 0)
	OpenFD(setFD);
    if (rx_Init(0) != 0) {
	printf("RX failed to initialize, exiting.\n");
	exit(2);
    }
    if (setFD > 0) {
	printf("rx_socket=%d\n", rx_socket);
    }

    printf("Using %d packet buffers\n", rx_nPackets);

    conn =
	rx_NewConnection(host, htons(2500), 3,
			 rxnull_NewClientSecurityObject(), 0);

    if (!conn)
	Abort("unable to make a new connection");

    /* Set initial parameters.  This is (currently) not the approved interface */
    peer = rx_PeerOf(conn);
    if (burst)
	peer->burstSize = peer->burst = burst;
    if (!clock_IsZero(&burstTime))
	peer->burstWait = burstTime;
    if (!clock_IsZero(&retryTime))
	peer->rtt = _8THMSEC(&retryTime);
    if (sendFile)
	SendFile(sendFile, conn);
    else {
	buffer = (char *)osi_Alloc(bufferSize);
	while (nCalls--) {
	    struct clock startTime;
	    struct timeval t;
	    int nbytes;
	    int nSent;
	    int bytesSent = 0;
	    int bytesRead = 0;
	    call = rx_NewCall(conn);
	    if (!call)
		Abort("unable to make a new call");

	    clock_GetTime(&startTime);
	    for (bytesSent = 0; bytesSent < nBytes; bytesSent += nSent) {
		int tryCount;
		tryCount =
		    (bufferSize >
		     nBytes - bytesSent) ? nBytes - bytesSent : bufferSize;
		nSent = rx_Write(call, buffer, tryCount);
		if (nSent == 0)
		    break;

	    }
	    for (bytesRead = 0; (nbytes = rx_Read(call, buffer, bufferSize));
		 bytesRead += nbytes) {
	    };
	    if (print)
		printf("Received %d characters in response\n", bytesRead);
	    err = rx_EndCall(call, 0);
	    if (err)
		printf("Error %d returned from rpc call\n", err);
	    else {
		struct clock totalTime;
		float elapsedTime;
		clock_GetTime(&totalTime);
		clock_Sub(&totalTime, &startTime);
		elapsedTime = clock_Float(&totalTime);
		fprintf(stderr,
			"Sent %d bytes in %0.3f seconds:  %0.0f bytes per second\n",
			bytesSent, elapsedTime, bytesSent / elapsedTime);
	    }
	    if (!clock_IsZero(&computeTime)) {
		t.tv_sec = computeTime.sec;
		t.tv_usec = computeTime.usec;
		if (select(0, 0, 0, 0, &t) != 0)
		    Quit("Select didn't return 0");
	    }
	    if (!clock_IsZero(&waitTime)) {
		struct timeval t;
		t.tv_sec = waitTime.sec;
		t.tv_usec = waitTime.usec;
#ifdef AFS_PTHREAD_ENV
		select(0, 0, 0, 0, &t);
#else
		IOMGR_Sleep(t.tv_sec);
#endif
	    }
            if (debugFile)
                rx_PrintPeerStats(debugFile, rx_PeerOf(conn));
            rx_PrintPeerStats(stdout, rx_PeerOf(conn));
	}
    }
    Quit("testclient: done!\n");
    return 0;
}
示例#11
0
int
SendFile(char *file, struct rx_connection *conn)
{
    struct rx_call *call;
    int fd;
    struct stat status;
    int blockSize, bytesLeft;
    char *buf;
    int nbytes;
    int err;
    struct clock startTime;
    int receivedStore = 0;
    struct clock totalReadvDelay;
    int nReadvs;
    int code;
#ifdef	AFS_AIX_ENV
#include <sys/statfs.h>
    struct statfs tstatfs;
#endif

    if (timeReadvs) {
	nReadvs = 0;
	clock_Zero(&totalReadvDelay);
    }
    fd = open(file, O_RDONLY, 0);
    if (fd < 0)
	Abort("Couldn't open %s\n", file);
    fstat(fd, &status);
#ifdef AFS_NT40_ENV
    blockSize = 1024;
#else
#ifdef	AFS_AIX_ENV
/* Unfortunately in AIX valuable fields such as st_blksize are gone from the stat structure!! */
    fstatfs(fd, &tstatfs);
    blockSize = tstatfs.f_bsize;
#else
    blockSize = status.st_blksize;
#endif
#endif
    buf = (char *)osi_Alloc(blockSize);
    bytesLeft = status.st_size;
    clock_GetTime(&startTime);
    call = rx_NewCall(conn);
    while (bytesLeft) {
	if (!receivedStore && rx_GetRemoteStatus(call) == 79) {
	    receivedStore = 1;
	    fprintf(stderr,
		    "Remote status indicates file accepted (\"received store\")\n");
	}
	nbytes = (bytesLeft > blockSize ? blockSize : bytesLeft);
	errno = 0;
	code = read(fd, buf, nbytes);
	if (code != nbytes) {
	    Abort("Only read %d bytes of %d, errno=%d\n", code, nbytes,
		  errno);
	}
	code = rx_Write(call, buf, nbytes);
	if (code != nbytes) {
	    Abort("Only wrote %d bytes of %d\n", code, nbytes);
	}
	bytesLeft -= nbytes;
    }
    while ((nbytes = rx_Read(call, buf, sizeof(buf))) > 0) {
	char *p = buf;
	while (nbytes--) {
	    putchar(*p);
	    p++;
	}
    }
    if ((err = rx_EndCall(call, 0)) != 0) {
	fprintf(stderr, "rx_Endcall returned error %d\n", err);
    } else {
	struct clock totalTime;
	float elapsedTime;
	clock_GetTime(&totalTime);
	clock_Sub(&totalTime, &startTime);
	elapsedTime = totalTime.sec + totalTime.usec / 1e6;
	fprintf(stderr,
		"Sent %d bytes in %0.3f seconds:  %0.0f bytes per second\n",
		(int) status.st_size, elapsedTime, status.st_size / elapsedTime);
	if (timeReadvs) {
	    float delay = clock_Float(&totalReadvDelay) / nReadvs;
	    fprintf(stderr, "%d readvs, average delay of %0.4f seconds\n",
		    nReadvs, delay);
	}
    }
    close(fd);

    return(0);
}
示例#12
0
static long
CallSimultaneously(u_int threads, opaque rock, long (*proc)(int, opaque))
{
    long code;
    int i;
#ifdef AFS_PTHREAD_ENV
    pthread_once(&workerOnce, WorkerInit);
#endif

    workers = 0;
    for (i = 0; i < threads; i++) {
	struct worker *w;
#ifdef AFS_PTHREAD_ENV
	pthread_t pid;
#else
	PROCESS pid;
#endif
	assert(i < MAX_CTHREADS);
	w = (struct worker *)osi_Alloc(sizeof(struct worker));
	memset(w, 0, sizeof(*w));
	w->next = workers;
	workers = w;
	w->index = i;
	w->exitCode = RXKST_PROCESSRUNNING;
	w->rock = rock;
	w->proc = proc;
#ifdef AFS_PTHREAD_ENV
	{
	    pthread_attr_t tattr;

	    code = pthread_attr_init(&tattr);
	    if (code) {
		afs_com_err(whoami, code,
			"can't pthread_attr_init worker process");
		return code;
	    }

	    code =
		pthread_attr_setdetachstate(&tattr, PTHREAD_CREATE_DETACHED);
	    if (code) {
		afs_com_err(whoami, code,
			"can't pthread_attr_setdetachstate worker process");
		return code;
	    }

	    code = pthread_create(&pid, &tattr, DoWorker, (void *)w);
	}
#else
	code =
	    LWP_CreateProcess(DoWorker, 16000, LWP_NORMAL_PRIORITY,
			      (opaque) w, "Worker Process", &pid);
#endif
	if (code) {
	    afs_com_err(whoami, code, "can't create worker process");
	    return code;
	}
    }
    code = 0;			/* last non-zero code encountered */
#ifdef AFS_PTHREAD_ENV
    pthread_mutex_lock(&workerLock);
#endif
    while (workers) {
	struct worker *w, *prevW, *nextW;
	prevW = 0;
	for (w = workers; w; w = nextW) {
	    nextW = w->next;
	    if (w->exitCode != RXKST_PROCESSRUNNING) {
		if (w->exitCode) {
		    if (code == 0)
			code = w->exitCode;
		}
		if (prevW)
		    prevW->next = w->next;
		else
		    workers = w->next;
		osi_Free(w, sizeof(*w));
		continue;	/* don't bump prevW */
	    }
	    prevW = w;
	}
#ifdef AFS_PTHREAD_ENV
	if (workers)
	    pthread_cond_wait(&workerCV, &workerLock);
#else
	if (workers)
	    LWP_WaitProcess(&workers);
#endif
    }
#ifdef AFS_PTHREAD_ENV
    pthread_mutex_unlock(&workerLock);
#endif
    return code;
}
示例#13
0
int
afs_InvalidateAllSegments(struct vcache *avc)
{
    struct dcache *tdc;
    afs_int32 hash;
    afs_int32 index;
    struct dcache **dcList;
    int i, dcListMax, dcListCount;

    AFS_STATCNT(afs_InvalidateAllSegments);
    afs_Trace2(afs_iclSetp, CM_TRACE_INVALL, ICL_TYPE_POINTER, avc,
	       ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
    hash = DVHash(&avc->f.fid);
    avc->f.truncPos = AFS_NOTRUNC;	/* don't truncate later */
    avc->f.states &= ~CExtendedFile;	/* not any more */
    ObtainWriteLock(&afs_xcbhash, 459);
    afs_DequeueCallback(avc);
    avc->f.states &= ~(CStatd | CDirty);	/* mark status information as bad, too */
    ReleaseWriteLock(&afs_xcbhash);
    if (avc->f.fid.Fid.Vnode & 1 || (vType(avc) == VDIR))
	osi_dnlc_purgedp(avc);
    /* Blow away pages; for now, only for Solaris */
#if	(defined(AFS_SUN5_ENV))
    if (WriteLocked(&avc->lock))
	osi_ReleaseVM(avc, (afs_ucred_t *)0);
#endif
    /*
     * Block out others from screwing with this table; is a read lock
     * sufficient?
     */
    ObtainWriteLock(&afs_xdcache, 286);
    dcListMax = 0;

    for (index = afs_dvhashTbl[hash]; index != NULLIDX;) {
	if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
	    tdc = afs_GetDSlot(index, 0);
	    ReleaseReadLock(&tdc->tlock);
	    if (!FidCmp(&tdc->f.fid, &avc->f.fid))
		dcListMax++;
	    afs_PutDCache(tdc);
	}
	index = afs_dvnextTbl[index];
    }

    dcList = osi_Alloc(dcListMax * sizeof(struct dcache *));
    dcListCount = 0;

    for (index = afs_dvhashTbl[hash]; index != NULLIDX;) {
	if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
	    tdc = afs_GetDSlot(index, 0);
	    ReleaseReadLock(&tdc->tlock);
	    if (!FidCmp(&tdc->f.fid, &avc->f.fid)) {
		/* same file? we'll zap it */
		if (afs_indexFlags[index] & IFDataMod) {
		    afs_stats_cmperf.cacheCurrDirtyChunks--;
		    /* don't write it back */
		    afs_indexFlags[index] &= ~IFDataMod;
		}
		afs_indexFlags[index] &= ~IFAnyPages;
		if (dcListCount < dcListMax)
		    dcList[dcListCount++] = tdc;
		else
		    afs_PutDCache(tdc);
	    } else {
		afs_PutDCache(tdc);
	    }
	}
	index = afs_dvnextTbl[index];
    }
    ReleaseWriteLock(&afs_xdcache);

    for (i = 0; i < dcListCount; i++) {
	tdc = dcList[i];

	ObtainWriteLock(&tdc->lock, 679);
	ZapDCE(tdc);
	if (vType(avc) == VDIR)
	    DZap(tdc);
	ReleaseWriteLock(&tdc->lock);
	afs_PutDCache(tdc);
    }

    osi_Free(dcList, dcListMax * sizeof(struct dcache *));

    return 0;
}
示例#14
0
int
afs_fill_super(struct super_block *sb, void *data, int silent)
{
    int code = 0;

    AFS_GLOCK();
    if (afs_was_mounted) {
	printf
	    ("You must reload the AFS kernel extensions before remounting AFS.\n");
	AFS_GUNLOCK();
	return -EINVAL;
    }
    afs_was_mounted = 1;

    /* Set basics of super_block */
   __module_get(THIS_MODULE);

    afs_globalVFS = sb;
    sb->s_flags |= MS_NOATIME;
    sb->s_blocksize = 1024;
    sb->s_blocksize_bits = 10;
    sb->s_magic = AFS_VFSMAGIC;
    sb->s_op = &afs_sops;	/* Super block (vfs) ops */
    /* used for inodes backing_dev_info field, also */
    afs_backing_dev_info = osi_Alloc(sizeof(struct backing_dev_info));
#if defined(HAVE_LINUX_BDI_INIT)
    bdi_init(afs_backing_dev_info);
#endif
#if defined(STRUCT_BACKING_DEV_INFO_HAS_NAME)
    afs_backing_dev_info->name = "openafs";
#endif
    afs_backing_dev_info->ra_pages = 32;
#if defined (STRUCT_SUPER_BLOCK_HAS_S_BDI)
    sb->s_bdi = afs_backing_dev_info;
    /* The name specified here will appear in the flushing thread name - flush-afs */
    bdi_register(afs_backing_dev_info, NULL, "afs");
#endif
#if !defined(AFS_NONFSTRANS)
    sb->s_export_op = &afs_export_ops;
#endif
#if defined(MAX_NON_LFS)
#ifdef AFS_64BIT_CLIENT
#if !defined(MAX_LFS_FILESIZE)
#if BITS_PER_LONG==32
#define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) 
#elif BITS_PER_LONG==64
#define MAX_LFS_FILESIZE 0x7fffffffffffffff
#endif
#endif
    sb->s_maxbytes = MAX_LFS_FILESIZE;
#else
    sb->s_maxbytes = MAX_NON_LFS;
#endif
#endif
    code = afs_root(sb);
    if (code) {
	afs_globalVFS = NULL;
	osi_linux_free_inode_pages();
        module_put(THIS_MODULE);
    }

    AFS_GUNLOCK();
    return code ? -EINVAL : 0;
}
示例#15
0
static int
extractPioctlToken(struct tokenJar *token,
		   struct token_opaque *opaque) {
    XDR xdrs;
    struct ktc_tokenUnion *pioctlToken;
    int code;

    memset(opaque, 0, sizeof(token_opaque));

    pioctlToken = osi_Alloc(sizeof(struct ktc_tokenUnion));
    if (pioctlToken == NULL)
	return ENOMEM;

    pioctlToken->at_type = token->type;

    switch (token->type) {
      case RX_SECIDX_KAD:
	code = rxkad_extractTokenForPioctl(token, pioctlToken);
	break;
      default:
	code = EINVAL;;
    }

    if (code)
	goto out;

    xdrlen_create(&xdrs);
    if (!xdr_ktc_tokenUnion(&xdrs, pioctlToken)) {
	code = EINVAL;
	xdr_destroy(&xdrs);
	goto out;
    }

    opaque->token_opaque_len = xdr_getpos(&xdrs);
    xdr_destroy(&xdrs);

    opaque->token_opaque_val = osi_Alloc(opaque->token_opaque_len);
    if (opaque->token_opaque_val == NULL) {
	code = ENOMEM;
	goto out;
    }

    xdrmem_create(&xdrs,
		  opaque->token_opaque_val,
		  opaque->token_opaque_len,
		  XDR_ENCODE);
    if (!xdr_ktc_tokenUnion(&xdrs, pioctlToken)) {
	code = EINVAL;
	xdr_destroy(&xdrs);
	goto out;
    }
    xdr_destroy(&xdrs);

out:
    xdr_free((xdrproc_t) xdr_ktc_tokenUnion, &pioctlToken);
    osi_Free(pioctlToken, sizeof(struct ktc_tokenUnion));

    if (code != 0) {
	osi_Free(opaque->token_opaque_val, opaque->token_opaque_len);
	opaque->token_opaque_val = NULL;
	opaque->token_opaque_len = 0;
    }
    return code;
}
示例#16
0
/* called with the GLOCK held */
int
afs_syscall_pioctl(char *path, unsigned int com, caddr_t cmarg, int follow)
{
    cred_t *credp = crref();	/* don't free until done! */
    struct afs_ioctl data;
    struct clientcred ccred;
    struct rmtbulk idata, odata;
    short in_size, out_size;
    afs_int32 code = 0, pag, err;
    gid_t g0, g1;
    char *abspath, *pathbuf = 0;

    AFS_STATCNT(afs_syscall_pioctl);
    if (follow)
	follow = 1;		/* compat. with old venus */
    code = copyin_afs_ioctl(cmarg, &data);
    if (code) goto out;

    if ((com & 0xff) == 90) {
	/* PSetClientContext, in any space */
	code = EINVAL;
	goto out;
    }

    /* Special handling for a few pioctls */
    switch (com & 0xffff) {
	case (0x5600 |  3): /* VIOCSETTOK */
	    code = afspag_PSetTokens(data.in, data.in_size, &credp);
	    if (code) goto out;
	    break;

	case (0x5600 |  9): /* VIOCUNLOG */
	case (0x5600 | 21): /* VIOCUNPAG */
	    code = afspag_PUnlog(data.in, data.in_size, &credp);
	    if (code) goto out;
	    break;

	case (0x5600 | 38): /* VIOC_AFS_SYSNAME */
	    code = afspag_PSetSysName(data.in, data.in_size, &credp);
	    if (code) goto out;
	    break;
    }

    /* Set up credentials */
    memset(&ccred, 0, sizeof(ccred));
    pag = PagInCred(credp);
    ccred.uid = afs_cr_uid(credp);
    if (pag != NOPAG) {
	 afs_get_groups_from_pag(pag, &g0, &g1);
	 ccred.group0 = g0;
	 ccred.group1 = g1;
    }

    /*
     * Copy the path and convert to absolute, if one was given.
     * NB: We can only use osI_AllocLargeSpace here as long as
     * RMTSYS_MAXPATHLEN is less than AFS_LRALLOCSIZ.
     */
    if (path) {
	pathbuf = osi_AllocLargeSpace(RMTSYS_MAXPATHLEN);
	if (!pathbuf) {
	    code = ENOMEM;
	    goto out;
	}
	code = osi_abspath(path, pathbuf, RMTSYS_MAXPATHLEN, 0, &abspath);
	if (code)
	    goto out_path;
    } else {
	abspath = NIL_PATHP;
    }

    /* Allocate, copy, and convert incoming data */
    idata.rmtbulk_len = in_size = data.in_size;
    if (in_size  < 0 || in_size  > MAXBUFFERLEN) {
	code = EINVAL;
	goto out_path;
    }
    if (in_size > AFS_LRALLOCSIZ)
	 idata.rmtbulk_val = osi_Alloc(in_size);
    else
	 idata.rmtbulk_val = osi_AllocLargeSpace(AFS_LRALLOCSIZ);
    if (!idata.rmtbulk_val) {
	code = ENOMEM;
	goto out_path;
    }
    if (in_size) {
	AFS_COPYIN(data.in, idata.rmtbulk_val, in_size, code);
	if (code)
	    goto out_idata;
	inparam_conversion(com, idata.rmtbulk_val, in_size, 0);
    }

    /* Allocate space for outgoing data */
    odata.rmtbulk_len = out_size = data.out_size;
    if (out_size < 0 || out_size > MAXBUFFERLEN) {
	code = EINVAL;
	goto out_idata;
    }
    if (out_size > AFS_LRALLOCSIZ)
	 odata.rmtbulk_val = osi_Alloc(out_size);
    else
	 odata.rmtbulk_val = osi_AllocLargeSpace(AFS_LRALLOCSIZ);
    if (!odata.rmtbulk_val) {
	code = ENOMEM;
	goto out_idata;
    }

    AFS_GUNLOCK();
    code = RMTSYS_Pioctl(rmtsys_conn, &ccred, abspath, com, follow,
			 &idata, &odata, &err);
    AFS_GLOCK();
    if (code)
	goto out_odata;

    /* Convert and copy out the result */
    if (odata.rmtbulk_len > out_size) {
	code = E2BIG;
	goto out_odata;
    }
    if (odata.rmtbulk_len) {
	outparam_conversion(com, odata.rmtbulk_val, odata.rmtbulk_len, 1);
	AFS_COPYOUT(odata.rmtbulk_val, data.out, odata.rmtbulk_len, code);
    }
    if (!code)
	code = err;

out_odata:
    if (out_size > AFS_LRALLOCSIZ)
	osi_Free(odata.rmtbulk_val, out_size);
    else
	osi_FreeLargeSpace(odata.rmtbulk_val);

out_idata:
    if (in_size > AFS_LRALLOCSIZ)
	osi_Free(idata.rmtbulk_val, in_size);
    else
	osi_FreeLargeSpace(idata.rmtbulk_val);

out_path:
    if (path)
	osi_FreeLargeSpace(pathbuf);

out:
    crfree(credp);
#if defined(KERNEL_HAVE_UERROR)
    if (!getuerror())
	setuerror(code);
    return (getuerror());
#else
    return (code);
#endif
}