示例#1
0
int
main(int argc, char **argv)
{
    struct clock clock_old = {0, 0};
    struct clock first;

    clock_Init ();

    clock_NewTime ();
    clock_GetTime (&first);

    for (;;) {
	struct clock now;

	clock_NewTime ();
	clock_GetTime (&now);
	if (now.sec < clock_old.sec ||
	    (now.sec == clock_old.sec && now.usec < clock_old.usec))
	    abort ();
	/* printf ("%6ld.%6ld\r", now.sec, now.usec); */
	if (now.sec > first.sec + 10)
	    break;
    }
    return 0;
}
示例#2
0
/*
 * Process all events that have expired relative to the current clock time
 * (which is not re-evaluated unless clock_NewTime has been called).
 * The relative time to the next event is returned in the output parameter
 * next and the function returns 1.  If there are is no next event,
 * the function returns 0.
 */
int
rxevent_RaiseEvents(struct clock * next)
{
    struct rxevent *qe;
    struct clock now;

#ifdef RXDEBUG
    if (Log)
	fprintf(Log, "rxevent_RaiseEvents(%ld.%ld)\n", now.sec, now.usec);
#endif

    /*
     * Events are sorted by time, so only scan until an event is found that
     * has not yet timed out
     */
    while (queue_IsNotEmpty(&rxevent_queue)) {
	clock_GetTime(&now);
	qe = queue_First(&rxevent_queue, rxevent);
	if (clock_Lt(&now, &qe->eventTime)) {
	    *next = qe->eventTime;
	    clock_Sub(next, &now);
	    return 1;
	}
	queue_Remove(qe);
	rxevent_nPosted--;
	qe->func(qe, qe->arg, qe->arg1);
	queue_Append(&rxevent_free, qe);
	rxevent_nFree++;
    }
    return 0;
}
示例#3
0
/*
 * Called from rx_Init()
 */
void
rxi_InitializeThreadSupport(void)
{
	/* listeners_started must only be reset if
	 * the listener thread terminates */
	/* listeners_started = 0; */
    clock_GetTime(&rxi_clockNow);
}
示例#4
0
/*
 * The event handling process.
 */
static void *
event_handler(void *argp)
{
    unsigned long rx_pthread_n_event_expired = 0;
    unsigned long rx_pthread_n_event_waits = 0;
    long rx_pthread_n_event_woken = 0;
    unsigned long rx_pthread_n_event_error = 0;
    struct timespec rx_pthread_next_event_time = { 0, 0 };
    int error;

    MUTEX_ENTER(&event_handler_mutex);

    for (;;) {
	struct clock cv;
	struct clock next;

	MUTEX_EXIT(&event_handler_mutex);

	next.sec = 30;		/* Time to sleep if there are no events scheduled */
	next.usec = 0;
	clock_GetTime(&cv);
	rxevent_RaiseEvents(&next);

	MUTEX_ENTER(&event_handler_mutex);
	if (rx_pthread_event_rescheduled) {
	    rx_pthread_event_rescheduled = 0;
	    continue;
	}

	clock_Add(&cv, &next);
	rx_pthread_next_event_time.tv_sec = cv.sec;
	rx_pthread_next_event_time.tv_nsec = cv.usec * 1000;
	rx_pthread_n_event_waits++;
	error = CV_TIMEDWAIT(&rx_event_handler_cond, &event_handler_mutex, &rx_pthread_next_event_time);
        if (error == 0) {
	    rx_pthread_n_event_woken++;
        }
#ifdef AFS_NT40_ENV
        else if (error == ETIMEDOUT) {
	    rx_pthread_n_event_expired++;
	} else {
            rx_pthread_n_event_error++;
        }
#else
        else if (errno == ETIMEDOUT) {
            rx_pthread_n_event_expired++;
        } else {
            rx_pthread_n_event_error++;
        }
#endif
	rx_pthread_event_rescheduled = 0;
    }
示例#5
0
/* allocate a new connetion ID in place */
int
rxkad_AllocCID(struct rx_securityClass *aobj, struct rx_connection *aconn)
{
    struct rxkad_cprivate *tcp;
    struct rxkad_cidgen tgen;
    static afs_int32 counter = 0;	/* not used anymore */

    LOCK_CUID;
    if (Cuid[0] == 0) {
	afs_uint32 xor[2];
	tgen.ipAddr = rxi_getaddr();	/* comes back in net order */
	clock_GetTime(&tgen.time);	/* changes time1 and time2 */
	tgen.time.sec = htonl(tgen.time.sec);
	tgen.time.usec = htonl(tgen.time.usec);
	tgen.counter = htonl(counter);
	counter++;
#ifdef KERNEL
	tgen.random1 = afs_random() & 0x7fffffff;	/* was "80000" */
	tgen.random2 = afs_random() & 0x7fffffff;	/* was "htonl(100)" */
#else
	tgen.random1 = htonl(getpid());
	tgen.random2 = htonl(100);
#endif
	if (aobj) {
	    /* block is ready for encryption with session key, let's go for it. */
	    tcp = (struct rxkad_cprivate *)aobj->privateData;
	    memcpy((void *)xor, (void *)tcp->ivec, 2 * sizeof(afs_int32));
	    fc_cbc_encrypt((char *)&tgen, (char *)&tgen, sizeof(tgen),
			   tcp->keysched, xor, ENCRYPT);
	} else {
	    /* Create a session key so that we can encrypt it */

	}
	memcpy((void *)Cuid,
	       ((char *)&tgen) + sizeof(tgen) - ENCRYPTIONBLOCKSIZE,
	       ENCRYPTIONBLOCKSIZE);
	Cuid[0] = (Cuid[0] & ~0x40000000) | 0x80000000;
	Cuid[1] &= RX_CIDMASK;
	rx_SetEpoch(Cuid[0]);	/* for future rxnull connections */
	rxkad_EpochWasSet++;
    }

    if (!aconn) {
	UNLOCK_CUID;
	return 0;
    }
    aconn->epoch = Cuid[0];
    aconn->cid = Cuid[1];
    Cuid[1] += 1 << RX_CIDSHIFT;
    UNLOCK_CUID;
    return 0;
}
示例#6
0
int
main(int argc, char **argv)
{
    struct clock clock_old = {0, 0};

    clock_Init ();
    for (;;) {
	struct clock now;

	clock_NewTime ();
	clock_GetTime (&now);
	if (now.sec < clock_old.sec ||
	    (now.sec == clock_old.sec && now.usec < clock_old.usec))
	    abort ();
	printf ("%6ld.%6ld\r", now.sec, now.usec);
    }
    return 0;
}
示例#7
0
/*
 * Cancel an event by moving it from the event queue to the free list.
 * Warning, the event must be on the event queue! If not, this should core
 * dump (reference through 0).  This routine should be called using the macro
 * event_Cancel, which checks for a null event and also nulls the caller's
 * event pointer after cancelling the event.
 */
void
rxevent_Cancel_1(struct rxevent * ev)
{
#ifdef RXDEBUG
    if (Log) {
	struct clock now;

	clock_GetTime(&now);
	fprintf(Log, "%ld.%ld: rxevent_Cancel_1(%ld.%ld, %p, %p)\n",
		now.sec, now.usec, ev->eventTime.sec, ev->eventTime.usec,
		ev->func, ev->arg);
    }
#endif
    /*
     * Append it to the free list (rather than prepending) to keep
     * the free list hot so nothing pages out
     */
#if defined(AFS_SGIMP_ENV)
    ASSERT(osi_rxislocked());
#endif
    queue_MoveAppend(&rxevent_free, ev);
    rxevent_nPosted--;
    rxevent_nFree++;
}
示例#8
0
文件: rx_trace.c 项目: hwr/openafs
void
rxi_calltrace(unsigned int event, struct rx_call *call)
{
    struct clock now;
    struct rx_trace rxtinfo;

    if (!rxi_tracename[0])
	return;

    if (rxi_logfd < 0) {
	rxi_logfd = open(rxi_tracename, O_WRONLY | O_CREAT | O_TRUNC, 0777);
	if (rxi_logfd < 0)
	    rxi_tracename[0] = '\0';
    }
    clock_GetTime(&now);

    rxtinfo.event = event;
    rxtinfo.now = now.sec * 1000 + now.usec / 1000;
    rxtinfo.cid = call->conn->cid;
    rxtinfo.call = *(call->callNumber);
    rxtinfo.qlen = rx_atomic_read(&rx_nWaiting);
    rxtinfo.servicetime = 0;
    rxtinfo.waittime = 0;

    switch (event) {
    case RX_CALL_END:
	clock_Sub(&now, &(call->traceStart));
	rxtinfo.servicetime = now.sec * 10000 + now.usec / 100;
	if (call->traceWait.sec) {
	    now = call->traceStart;
	    clock_Sub(&now, &(call->traceWait));
	    rxtinfo.waittime = now.sec * 10000 + now.usec / 100;
	} else
	    rxtinfo.waittime = 0;
	call->traceWait.sec = call->traceWait.usec = call->traceStart.sec =
	    call->traceStart.usec = 0;
	break;

    case RX_CALL_START:
	call->traceStart = now;
	if (call->traceWait.sec) {
	    clock_Sub(&now, &(call->traceWait));
	    rxtinfo.waittime = now.sec * 10000 + now.usec / 100;
	} else
	    rxtinfo.waittime = 0;
	break;

    case RX_TRACE_DROP:
	if (call->traceWait.sec) {
	    clock_Sub(&now, &(call->traceWait));
	    rxtinfo.waittime = now.sec * 10000 + now.usec / 100;
	} else
	    rxtinfo.waittime = 0;
	break;

    case RX_CALL_ARRIVAL:
	call->traceWait = now;
    default:
	break;
    }

    memcpy(rxi_tracebuf + rxi_tracepos, &rxtinfo, sizeof(struct rx_trace));
    rxi_tracepos += sizeof(struct rx_trace);
    if (rxi_tracepos >= (4096 - sizeof(struct rx_trace)))
	rxi_flushtrace();
}
示例#9
0
/* Add the indicated event (function, arg) at the specified clock time */
struct rxevent *
rxevent_Post(struct clock * when, void (*func)(), void *arg, void *arg1)
/* when - When event should happen, in clock (clock.h) units */
{
    struct rxevent *ev, *qe, *qpr;

#ifdef RXDEBUG
    if (Log) {
	struct clock now;

	clock_GetTime(&now);
	fprintf(Log, "%ld.%ld: rxevent_Post(%ld.%ld, %p, %p)\n",
		now.sec, now.usec, when->sec, when->usec, func, arg);
    }
#endif
#if defined(AFS_SGIMP_ENV)
    ASSERT(osi_rxislocked());
#endif

    /*
     * If we're short on free event entries, create a block of new ones and
     * add them to the free queue
     */
    if (queue_IsEmpty(&rxevent_free)) {
	int i;

#if	defined(AFS_AIX32_ENV) && defined(KERNEL)
	ev = (struct rxevent *) rxi_Alloc(sizeof(struct rxevent));
	queue_Append(&rxevent_free, &ev[0]), rxevent_nFree++;
#else
	ev = (struct rxevent *) osi_Alloc(sizeof(struct rxevent) *
					  rxevent_allocUnit);
	xsp = xfreemallocs;
	xfreemallocs = (struct xfreelist *) ev;
	xfreemallocs->next = xsp;
	for (i = 0; i < rxevent_allocUnit; i++)
	    queue_Append(&rxevent_free, &ev[i]), rxevent_nFree++;
#endif
    }
    /* Grab and initialize a new rxevent structure */
    ev = queue_First(&rxevent_free, rxevent);
    queue_Remove(ev);
    rxevent_nFree--;

    /* Record user defined event state */
    ev->eventTime = *when;
    ev->func = func;
    ev->arg = arg;
    ev->arg1 = arg1;
    rxevent_nPosted += 1;	       /* Rather than ++, to shut high-C up
				        * regarding never-set variables */

    /*
     * Locate a slot for the new entry.  The queue is ordered by time, and we
     * assume that a new entry is likely to be greater than a majority of the
     * entries already on the queue (unless there's very few entries on the
     * queue), so we scan it backwards
     */
    for (queue_ScanBackwards(&rxevent_queue, qe, qpr, rxevent)) {
	if (clock_Ge(when, &qe->eventTime)) {
	    queue_InsertAfter(qe, ev);
	    return ev;
	}
    }
    /* The event is to expire earlier than any existing events */
    queue_Prepend(&rxevent_queue, ev);
    if (rxevent_ScheduledEarlierEvent)
	(*rxevent_ScheduledEarlierEvent) ();	/* Notify our external
						 * scheduler */
    return ev;
}
示例#10
0
int
main(int argc, char **argv)
{
    char *hostname;
    struct hostent *hostent;
    afs_uint32 host;
    int logstdout = 0;
    struct rx_connection *conn;
    struct rx_call *call;
    struct rx_peer *peer;
    int err = 0;
    int nCalls = 1, nBytes = 1;
    int bufferSize = 4000000;
    char *buffer;
    char *sendFile = 0;
    int setFD = 0;
    int jumbo = 0;

#if !defined(AFS_NT40_ENV) && !defined(AFS_LINUX20_ENV)
    setlinebuf(stdout);
    rxi_syscallp = test_syscall;
#endif


    argv++;
    argc--;
    while (argc && **argv == '-') {
	if (strcmp(*argv, "-silent") == 0)
	    print = 0;
	if (strcmp(*argv, "-jumbo") == 0)
	    jumbo = 1;
	else if (strcmp(*argv, "-nc") == 0)
	    nCalls = atoi(*++argv), argc--;
	else if (strcmp(*argv, "-nb") == 0)
	    nBytes = atoi(*++argv), argc--;
	else if (strcmp(*argv, "-np") == 0)
	    rx_nPackets = atoi(*++argv), argc--;
	else if (!strcmp(*argv, "-nsf"))
	    rxi_nSendFrags = atoi(*++argv), argc--;
	else if (!strcmp(*argv, "-nrf"))
	    rxi_nRecvFrags = atoi(*++argv), argc--;
	else if (strcmp(*argv, "-twind") == 0)
	    rx_initSendWindow = atoi(*++argv), argc--;
	else if (strcmp(*argv, "-rwind") == 0)
	    rx_initReceiveWindow = atoi(*++argv), argc--;
	else if (strcmp(*argv, "-rxlog") == 0)
	    rxlog = 1;
	else if (strcmp(*argv, "-logstdout") == 0)
	    logstdout = 1;
	else if (strcmp(*argv, "-eventlog") == 0)
	    eventlog = 1;
	else if (strcmp(*argv, "-drop") == 0) {
#ifdef RXDEBUG
	    rx_intentionallyDroppedPacketsPer100 = atoi(*++argv), argc--;
#else
            fprintf(stderr, "ERROR: Compiled without RXDEBUG\n");
#endif
        }
	else if (strcmp(*argv, "-burst") == 0) {
	    burst = atoi(*++argv), argc--;
	    burstTime.sec = atoi(*++argv), argc--;
	    burstTime.usec = atoi(*++argv), argc--;
	} else if (strcmp(*argv, "-retry") == 0) {
	    retryTime.sec = atoi(*++argv), argc--;
	    retryTime.usec = atoi(*++argv), argc--;
	} else if (strcmp(*argv, "-timeout") == 0)
	    timeout = atoi(*++argv), argc--;
	else if (strcmp(*argv, "-fill") == 0)
	    fillPackets++;
	else if (strcmp(*argv, "-file") == 0)
	    sendFile = *++argv, argc--;
	else if (strcmp(*argv, "-timereadvs") == 0)
	    timeReadvs = 1;
	else if (strcmp(*argv, "-wait") == 0) {
	    /* Wait time between calls--to test lastack code */
	    waitTime.sec = atoi(*++argv), argc--;
	    waitTime.usec = atoi(*++argv), argc--;
	} else if (strcmp(*argv, "-compute") == 0) {
	    /* Simulated "compute" time for each call--to test acknowledgement protocol.  This is simulated by doing an iomgr_select:  imperfect, admittedly. */
	    computeTime.sec = atoi(*++argv), argc--;
	    computeTime.usec = atoi(*++argv), argc--;
	} else if (strcmp(*argv, "-fd") == 0) {
	    /* Open at least this many fd's. */
	    setFD = atoi(*++argv), argc--;
	} else {
	    err = 1;
	    break;
	}
	argv++, argc--;
    }
    if (err || argc != 1)
	Quit("usage: rx_ctest [-silent] [-rxlog] [-eventlog] [-nc NCALLS] [-np NPACKETS] hostname");
    hostname = *argv++, argc--;

    if (rxlog || eventlog) {
	if (logstdout)
	    debugFile = stdout;
	else
	    debugFile = fopen("rx_ctest.db", "w");
	if (debugFile == NULL)
	    Quit("Couldn't open rx_ctest.db");
	if (rxlog)
	    rx_debugFile = debugFile;
	if (eventlog)
	    rxevent_debugFile = debugFile;
    }

    signal(SIGINT, intSignal);	/*Changed to sigquit since dbx is broken right now */
#ifndef AFS_NT40_ENV
    signal(SIGQUIT, quitSignal);
#endif

#ifdef AFS_NT40_ENV
    if (afs_winsockInit() < 0) {
	printf("Can't initialize winsock.\n");
	exit(1);
    }
    rx_EnableHotThread();
#endif

    rx_SetUdpBufSize(256 * 1024);

    if (!jumbo)
        rx_SetNoJumbo();

    hostent = gethostbyname(hostname);
    if (!hostent)
	Abort("host %s not found", hostname);
    if (hostent->h_length != 4)
	Abort("host address is disagreeable length (%d)", hostent->h_length);
    memcpy((char *)&host, hostent->h_addr, sizeof(host));
    if (setFD > 0)
	OpenFD(setFD);
    if (rx_Init(0) != 0) {
	printf("RX failed to initialize, exiting.\n");
	exit(2);
    }
    if (setFD > 0) {
	printf("rx_socket=%d\n", rx_socket);
    }

    printf("Using %d packet buffers\n", rx_nPackets);

    conn =
	rx_NewConnection(host, htons(2500), 3,
			 rxnull_NewClientSecurityObject(), 0);

    if (!conn)
	Abort("unable to make a new connection");

    /* Set initial parameters.  This is (currently) not the approved interface */
    peer = rx_PeerOf(conn);
    if (burst)
	peer->burstSize = peer->burst = burst;
    if (!clock_IsZero(&burstTime))
	peer->burstWait = burstTime;
    if (!clock_IsZero(&retryTime))
	peer->rtt = _8THMSEC(&retryTime);
    if (sendFile)
	SendFile(sendFile, conn);
    else {
	buffer = (char *)osi_Alloc(bufferSize);
	while (nCalls--) {
	    struct clock startTime;
	    struct timeval t;
	    int nbytes;
	    int nSent;
	    int bytesSent = 0;
	    int bytesRead = 0;
	    call = rx_NewCall(conn);
	    if (!call)
		Abort("unable to make a new call");

	    clock_GetTime(&startTime);
	    for (bytesSent = 0; bytesSent < nBytes; bytesSent += nSent) {
		int tryCount;
		tryCount =
		    (bufferSize >
		     nBytes - bytesSent) ? nBytes - bytesSent : bufferSize;
		nSent = rx_Write(call, buffer, tryCount);
		if (nSent == 0)
		    break;

	    }
	    for (bytesRead = 0; (nbytes = rx_Read(call, buffer, bufferSize));
		 bytesRead += nbytes) {
	    };
	    if (print)
		printf("Received %d characters in response\n", bytesRead);
	    err = rx_EndCall(call, 0);
	    if (err)
		printf("Error %d returned from rpc call\n", err);
	    else {
		struct clock totalTime;
		float elapsedTime;
		clock_GetTime(&totalTime);
		clock_Sub(&totalTime, &startTime);
		elapsedTime = clock_Float(&totalTime);
		fprintf(stderr,
			"Sent %d bytes in %0.3f seconds:  %0.0f bytes per second\n",
			bytesSent, elapsedTime, bytesSent / elapsedTime);
	    }
	    if (!clock_IsZero(&computeTime)) {
		t.tv_sec = computeTime.sec;
		t.tv_usec = computeTime.usec;
		if (select(0, 0, 0, 0, &t) != 0)
		    Quit("Select didn't return 0");
	    }
	    if (!clock_IsZero(&waitTime)) {
		struct timeval t;
		t.tv_sec = waitTime.sec;
		t.tv_usec = waitTime.usec;
#ifdef AFS_PTHREAD_ENV
		select(0, 0, 0, 0, &t);
#else
		IOMGR_Sleep(t.tv_sec);
#endif
	    }
            if (debugFile)
                rx_PrintPeerStats(debugFile, rx_PeerOf(conn));
            rx_PrintPeerStats(stdout, rx_PeerOf(conn));
	}
    }
    Quit("testclient: done!\n");
    return 0;
}
示例#11
0
int
SendFile(char *file, struct rx_connection *conn)
{
    struct rx_call *call;
    int fd;
    struct stat status;
    int blockSize, bytesLeft;
    char *buf;
    int nbytes;
    int err;
    struct clock startTime;
    int receivedStore = 0;
    struct clock totalReadvDelay;
    int nReadvs;
    int code;
#ifdef	AFS_AIX_ENV
#include <sys/statfs.h>
    struct statfs tstatfs;
#endif

    if (timeReadvs) {
	nReadvs = 0;
	clock_Zero(&totalReadvDelay);
    }
    fd = open(file, O_RDONLY, 0);
    if (fd < 0)
	Abort("Couldn't open %s\n", file);
    fstat(fd, &status);
#ifdef AFS_NT40_ENV
    blockSize = 1024;
#else
#ifdef	AFS_AIX_ENV
/* Unfortunately in AIX valuable fields such as st_blksize are gone from the stat structure!! */
    fstatfs(fd, &tstatfs);
    blockSize = tstatfs.f_bsize;
#else
    blockSize = status.st_blksize;
#endif
#endif
    buf = (char *)osi_Alloc(blockSize);
    bytesLeft = status.st_size;
    clock_GetTime(&startTime);
    call = rx_NewCall(conn);
    while (bytesLeft) {
	if (!receivedStore && rx_GetRemoteStatus(call) == 79) {
	    receivedStore = 1;
	    fprintf(stderr,
		    "Remote status indicates file accepted (\"received store\")\n");
	}
	nbytes = (bytesLeft > blockSize ? blockSize : bytesLeft);
	errno = 0;
	code = read(fd, buf, nbytes);
	if (code != nbytes) {
	    Abort("Only read %d bytes of %d, errno=%d\n", code, nbytes,
		  errno);
	}
	code = rx_Write(call, buf, nbytes);
	if (code != nbytes) {
	    Abort("Only wrote %d bytes of %d\n", code, nbytes);
	}
	bytesLeft -= nbytes;
    }
    while ((nbytes = rx_Read(call, buf, sizeof(buf))) > 0) {
	char *p = buf;
	while (nbytes--) {
	    putchar(*p);
	    p++;
	}
    }
    if ((err = rx_EndCall(call, 0)) != 0) {
	fprintf(stderr, "rx_Endcall returned error %d\n", err);
    } else {
	struct clock totalTime;
	float elapsedTime;
	clock_GetTime(&totalTime);
	clock_Sub(&totalTime, &startTime);
	elapsedTime = totalTime.sec + totalTime.usec / 1e6;
	fprintf(stderr,
		"Sent %d bytes in %0.3f seconds:  %0.0f bytes per second\n",
		(int) status.st_size, elapsedTime, status.st_size / elapsedTime);
	if (timeReadvs) {
	    float delay = clock_Float(&totalReadvDelay) / nReadvs;
	    fprintf(stderr, "%d readvs, average delay of %0.4f seconds\n",
		    nReadvs, delay);
	}
    }
    close(fd);

    return(0);
}
示例#12
0
/* rxi_ReadProc -- internal version.
 *
 * LOCKS USED -- called at netpri
 */
int
rxi_ReadProc(struct rx_call *call, char *buf,
	     int nbytes)
{
    struct rx_packet *cp = call->currentPacket;
    struct rx_packet *rp;
    int requestCount;
    unsigned int t;

/* XXXX took out clock_NewTime from here.  Was it needed? */
    requestCount = nbytes;

    /* Free any packets from the last call to ReadvProc/WritevProc */
    if (queue_IsNotEmpty(&call->iovq)) {
#ifdef RXDEBUG_PACKET
        call->iovqc -=
#endif /* RXDEBUG_PACKET */
            rxi_FreePackets(0, &call->iovq);
    }

    do {
	if (call->nLeft == 0) {
	    /* Get next packet */
	    MUTEX_ENTER(&call->lock);
	    for (;;) {
		if (call->error || (call->mode != RX_MODE_RECEIVING)) {
		    if (call->error) {
                        call->mode = RX_MODE_ERROR;
			MUTEX_EXIT(&call->lock);
			return 0;
		    }
		    if (call->mode == RX_MODE_SENDING) {
                        MUTEX_EXIT(&call->lock);
			rxi_FlushWrite(call);
                        MUTEX_ENTER(&call->lock);
			continue;
		    }
		}
		if (queue_IsNotEmpty(&call->rq)) {
		    /* Check that next packet available is next in sequence */
		    rp = queue_First(&call->rq, rx_packet);
		    if (rp->header.seq == call->rnext) {
			afs_int32 error;
			struct rx_connection *conn = call->conn;
			queue_Remove(rp);
#ifdef RX_TRACK_PACKETS
			rp->flags &= ~RX_PKTFLAG_RQ;
#endif
#ifdef RXDEBUG_PACKET
                        call->rqc--;
#endif /* RXDEBUG_PACKET */

			/* RXS_CheckPacket called to undo RXS_PreparePacket's
			 * work.  It may reduce the length of the packet by up
			 * to conn->maxTrailerSize, to reflect the length of the
			 * data + the header. */
			if ((error =
			     RXS_CheckPacket(conn->securityObject, call,
					     rp))) {
			    /* Used to merely shut down the call, but now we
			     * shut down the whole connection since this may
			     * indicate an attempt to hijack it */

			    MUTEX_EXIT(&call->lock);
			    rxi_ConnectionError(conn, error);
			    MUTEX_ENTER(&conn->conn_data_lock);
			    rp = rxi_SendConnectionAbort(conn, rp, 0, 0);
			    MUTEX_EXIT(&conn->conn_data_lock);
			    rxi_FreePacket(rp);

			    return 0;
			}
			call->rnext++;
			cp = call->currentPacket = rp;
#ifdef RX_TRACK_PACKETS
			call->currentPacket->flags |= RX_PKTFLAG_CP;
#endif
			call->curvec = 1;	/* 0th vec is always header */
			/* begin at the beginning [ more or less ], continue
			 * on until the end, then stop. */
			call->curpos =
			    (char *)cp->wirevec[1].iov_base +
			    call->conn->securityHeaderSize;
			call->curlen =
			    cp->wirevec[1].iov_len -
			    call->conn->securityHeaderSize;

			/* Notice that this code works correctly if the data
			 * size is 0 (which it may be--no reply arguments from
			 * server, for example).  This relies heavily on the
			 * fact that the code below immediately frees the packet
			 * (no yields, etc.).  If it didn't, this would be a
			 * problem because a value of zero for call->nLeft
			 * normally means that there is no read packet */
			call->nLeft = cp->length;
			hadd32(call->bytesRcvd, cp->length);

			/* Send a hard ack for every rxi_HardAckRate+1 packets
			 * consumed. Otherwise schedule an event to send
			 * the hard ack later on.
			 */
			call->nHardAcks++;
			if (!(call->flags & RX_CALL_RECEIVE_DONE)) {
			    if (call->nHardAcks > (u_short) rxi_HardAckRate) {
				rxevent_Cancel(call->delayedAckEvent, call,
					       RX_CALL_REFCOUNT_DELAY);
				rxi_SendAck(call, 0, 0, RX_ACK_DELAY, 0);
			    } else {
				struct clock when, now;
				clock_GetTime(&now);
				when = now;
				/* Delay to consolidate ack packets */
				clock_Add(&when, &rx_hardAckDelay);
				if (!call->delayedAckEvent
				    || clock_Gt(&call->delayedAckEvent->
						eventTime, &when)) {
				    rxevent_Cancel(call->delayedAckEvent,
						   call,
						   RX_CALL_REFCOUNT_DELAY);
                                    MUTEX_ENTER(&rx_refcnt_mutex);
				    CALL_HOLD(call, RX_CALL_REFCOUNT_DELAY);
                                    MUTEX_EXIT(&rx_refcnt_mutex);
                                    call->delayedAckEvent =
				      rxevent_PostNow(&when, &now,
						     rxi_SendDelayedAck, call,
						     0);
				}
			    }
			}
			break;
		    }
		}

                /*
                 * If we reach this point either we have no packets in the
                 * receive queue or the next packet in the queue is not the
                 * one we are looking for.  There is nothing else for us to
                 * do but wait for another packet to arrive.
                 */

		/* Are there ever going to be any more packets? */
		if (call->flags & RX_CALL_RECEIVE_DONE) {
		    MUTEX_EXIT(&call->lock);
		    return requestCount - nbytes;
		}
		/* Wait for in-sequence packet */
		call->flags |= RX_CALL_READER_WAIT;
		clock_NewTime();
		call->startWait = clock_Sec();
		while (call->flags & RX_CALL_READER_WAIT) {
#ifdef	RX_ENABLE_LOCKS
		    CV_WAIT(&call->cv_rq, &call->lock);
#else
		    osi_rxSleep(&call->rq);
#endif
		}
                cp = call->currentPacket;

		call->startWait = 0;
#ifdef RX_ENABLE_LOCKS
		if (call->error) {
		    MUTEX_EXIT(&call->lock);
		    return 0;
		}
#endif /* RX_ENABLE_LOCKS */
	    }
	    MUTEX_EXIT(&call->lock);
	} else
	    /* osi_Assert(cp); */
	    /* MTUXXX  this should be replaced by some error-recovery code before shipping */
	    /* yes, the following block is allowed to be the ELSE clause (or not) */
	    /* It's possible for call->nLeft to be smaller than any particular
	     * iov_len.  Usually, recvmsg doesn't change the iov_len, since it
	     * reflects the size of the buffer.  We have to keep track of the
	     * number of bytes read in the length field of the packet struct.  On
	     * the final portion of a received packet, it's almost certain that
	     * call->nLeft will be smaller than the final buffer. */
	    while (nbytes && cp) {
		t = MIN((int)call->curlen, nbytes);
		t = MIN(t, (int)call->nLeft);
		memcpy(buf, call->curpos, t);
		buf += t;
		nbytes -= t;
		call->curpos += t;
		call->curlen -= t;
		call->nLeft -= t;

		if (!call->nLeft) {
		    /* out of packet.  Get another one. */
#ifdef RX_TRACK_PACKETS
		    call->currentPacket->flags &= ~RX_PKTFLAG_CP;
#endif
		    rxi_FreePacket(cp);
		    cp = call->currentPacket = (struct rx_packet *)0;
		} else if (!call->curlen) {
		    /* need to get another struct iov */
		    if (++call->curvec >= cp->niovecs) {
			/* current packet is exhausted, get ready for another */
			/* don't worry about curvec and stuff, they get set somewhere else */
#ifdef RX_TRACK_PACKETS
			call->currentPacket->flags &= ~RX_PKTFLAG_CP;
#endif
			rxi_FreePacket(cp);
			cp = call->currentPacket = (struct rx_packet *)0;
			call->nLeft = 0;
		    } else {
			call->curpos =
			    (char *)cp->wirevec[call->curvec].iov_base;
			call->curlen = cp->wirevec[call->curvec].iov_len;
		    }
		}
	    }
	if (!nbytes) {
	    /* user buffer is full, return */
	    return requestCount;
	}

    } while (nbytes);

    return requestCount;
}
示例#13
0
/* rxi_FillReadVec
 *
 * Uses packets in the receive queue to fill in as much of the
 * current iovec as possible. Does not block if it runs out
 * of packets to complete the iovec. Return true if an ack packet
 * was sent, otherwise return false */
int
rxi_FillReadVec(struct rx_call *call, afs_uint32 serial)
{
    int didConsume = 0;
    int didHardAck = 0;
    unsigned int t;
    struct rx_packet *rp;
    struct rx_packet *curp;
    struct iovec *call_iov;
    struct iovec *cur_iov = NULL;

    curp = call->currentPacket;
    if (curp) {
	cur_iov = &curp->wirevec[call->curvec];
    }
    call_iov = &call->iov[call->iovNext];

    while (!call->error && call->iovNBytes && call->iovNext < call->iovMax) {
	if (call->nLeft == 0) {
	    /* Get next packet */
	    if (queue_IsNotEmpty(&call->rq)) {
		/* Check that next packet available is next in sequence */
		rp = queue_First(&call->rq, rx_packet);
		if (rp->header.seq == call->rnext) {
		    afs_int32 error;
		    struct rx_connection *conn = call->conn;
		    queue_Remove(rp);
#ifdef RX_TRACK_PACKETS
		    rp->flags &= ~RX_PKTFLAG_RQ;
#endif
#ifdef RXDEBUG_PACKET
                    call->rqc--;
#endif /* RXDEBUG_PACKET */

		    /* RXS_CheckPacket called to undo RXS_PreparePacket's
		     * work.  It may reduce the length of the packet by up
		     * to conn->maxTrailerSize, to reflect the length of the
		     * data + the header. */
		    if ((error =
			 RXS_CheckPacket(conn->securityObject, call, rp))) {
			/* Used to merely shut down the call, but now we
			 * shut down the whole connection since this may
			 * indicate an attempt to hijack it */

			MUTEX_EXIT(&call->lock);
			rxi_ConnectionError(conn, error);
			MUTEX_ENTER(&conn->conn_data_lock);
			rp = rxi_SendConnectionAbort(conn, rp, 0, 0);
			MUTEX_EXIT(&conn->conn_data_lock);
			rxi_FreePacket(rp);
			MUTEX_ENTER(&call->lock);

			return 1;
		    }
		    call->rnext++;
		    curp = call->currentPacket = rp;
#ifdef RX_TRACK_PACKETS
		    call->currentPacket->flags |= RX_PKTFLAG_CP;
#endif
		    call->curvec = 1;	/* 0th vec is always header */
		    cur_iov = &curp->wirevec[1];
		    /* begin at the beginning [ more or less ], continue
		     * on until the end, then stop. */
		    call->curpos =
			(char *)curp->wirevec[1].iov_base +
			call->conn->securityHeaderSize;
		    call->curlen =
			curp->wirevec[1].iov_len -
			call->conn->securityHeaderSize;

		    /* Notice that this code works correctly if the data
		     * size is 0 (which it may be--no reply arguments from
		     * server, for example).  This relies heavily on the
		     * fact that the code below immediately frees the packet
		     * (no yields, etc.).  If it didn't, this would be a
		     * problem because a value of zero for call->nLeft
		     * normally means that there is no read packet */
		    call->nLeft = curp->length;
		    hadd32(call->bytesRcvd, curp->length);

		    /* Send a hard ack for every rxi_HardAckRate+1 packets
		     * consumed. Otherwise schedule an event to send
		     * the hard ack later on.
		     */
		    call->nHardAcks++;
		    didConsume = 1;
		    continue;
		}
	    }
	    break;
	}

	/* It's possible for call->nLeft to be smaller than any particular
	 * iov_len.  Usually, recvmsg doesn't change the iov_len, since it
	 * reflects the size of the buffer.  We have to keep track of the
	 * number of bytes read in the length field of the packet struct.  On
	 * the final portion of a received packet, it's almost certain that
	 * call->nLeft will be smaller than the final buffer. */
	while (call->iovNBytes && call->iovNext < call->iovMax && curp) {

	    t = MIN((int)call->curlen, call->iovNBytes);
	    t = MIN(t, (int)call->nLeft);
	    call_iov->iov_base = call->curpos;
	    call_iov->iov_len = t;
	    call_iov++;
	    call->iovNext++;
	    call->iovNBytes -= t;
	    call->curpos += t;
	    call->curlen -= t;
	    call->nLeft -= t;

	    if (!call->nLeft) {
		/* out of packet.  Get another one. */
#ifdef RX_TRACK_PACKETS
                curp->flags &= ~RX_PKTFLAG_CP;
                curp->flags |= RX_PKTFLAG_IOVQ;
#endif
		queue_Append(&call->iovq, curp);
#ifdef RXDEBUG_PACKET
                call->iovqc++;
#endif /* RXDEBUG_PACKET */
		curp = call->currentPacket = (struct rx_packet *)0;
	    } else if (!call->curlen) {
		/* need to get another struct iov */
		if (++call->curvec >= curp->niovecs) {
		    /* current packet is exhausted, get ready for another */
		    /* don't worry about curvec and stuff, they get set somewhere else */
#ifdef RX_TRACK_PACKETS
		    curp->flags &= ~RX_PKTFLAG_CP;
		    curp->flags |= RX_PKTFLAG_IOVQ;
#endif
		    queue_Append(&call->iovq, curp);
#ifdef RXDEBUG_PACKET
                    call->iovqc++;
#endif /* RXDEBUG_PACKET */
		    curp = call->currentPacket = (struct rx_packet *)0;
		    call->nLeft = 0;
		} else {
		    cur_iov++;
		    call->curpos = (char *)cur_iov->iov_base;
		    call->curlen = cur_iov->iov_len;
		}
	    }
	}
    }

    /* If we consumed any packets then check whether we need to
     * send a hard ack. */
    if (didConsume && (!(call->flags & RX_CALL_RECEIVE_DONE))) {
	if (call->nHardAcks > (u_short) rxi_HardAckRate) {
	    rxevent_Cancel(call->delayedAckEvent, call,
			   RX_CALL_REFCOUNT_DELAY);
	    rxi_SendAck(call, 0, serial, RX_ACK_DELAY, 0);
	    didHardAck = 1;
	} else {
	    struct clock when, now;
	    clock_GetTime(&now);
	    when = now;
	    /* Delay to consolidate ack packets */
	    clock_Add(&when, &rx_hardAckDelay);
	    if (!call->delayedAckEvent
		|| clock_Gt(&call->delayedAckEvent->eventTime, &when)) {
		rxevent_Cancel(call->delayedAckEvent, call,
			       RX_CALL_REFCOUNT_DELAY);
                MUTEX_ENTER(&rx_refcnt_mutex);
		CALL_HOLD(call, RX_CALL_REFCOUNT_DELAY);
                MUTEX_EXIT(&rx_refcnt_mutex);
		call->delayedAckEvent =
		    rxevent_PostNow(&when, &now, rxi_SendDelayedAck, call, 0);
	    }
	}
    }
    return didHardAck;
}