Beispiel #1
0
int
rxi_WriteProc(struct rx_call *call, char *buf,
	      int nbytes)
{
    struct rx_connection *conn = call->conn;
    unsigned int t;
    int requestCount = nbytes;

    /* Free any packets from the last call to ReadvProc/WritevProc */
    if (!opr_queue_IsEmpty(&call->app.iovq)) {
#ifdef RXDEBUG_PACKET
        call->iovqc -=
#endif /* RXDEBUG_PACKET */
            rxi_FreePackets(0, &call->app.iovq);
    }

    if (call->app.mode != RX_MODE_SENDING) {
	if ((conn->type == RX_SERVER_CONNECTION)
	    && (call->app.mode == RX_MODE_RECEIVING)) {
	    call->app.mode = RX_MODE_SENDING;
	    if (call->app.currentPacket) {
#ifdef RX_TRACK_PACKETS
		call->app.currentPacket->flags &= ~RX_PKTFLAG_CP;
#endif
		rxi_FreePacket(call->app.currentPacket);
		call->app.currentPacket = NULL;
		call->app.nLeft = 0;
		call->app.nFree = 0;
	    }
	} else {
	    return 0;
	}
    }

    /* Loop condition is checked at end, so that a write of 0 bytes
     * will force a packet to be created--specially for the case where
     * there are 0 bytes on the stream, but we must send a packet
     * anyway. */
    do {
	if (call->app.nFree == 0) {
	    MUTEX_ENTER(&call->lock);
            if (call->error)
                call->app.mode = RX_MODE_ERROR;
	    if (!call->error && call->app.currentPacket) {
		clock_NewTime();	/* Bogus:  need new time package */
		/* The 0, below, specifies that it is not the last packet:
		 * there will be others. PrepareSendPacket may
		 * alter the packet length by up to
		 * conn->securityMaxTrailerSize */
		call->app.bytesSent += call->app.currentPacket->length;
		rxi_PrepareSendPacket(call, call->app.currentPacket, 0);
                /* PrepareSendPacket drops the call lock */
                rxi_WaitforTQBusy(call);
#ifdef RX_TRACK_PACKETS
		call->app.currentPacket->flags |= RX_PKTFLAG_TQ;
#endif
		opr_queue_Append(&call->tq,
				 &call->app.currentPacket->entry);
#ifdef RXDEBUG_PACKET
                call->tqc++;
#endif /* RXDEBUG_PACKET */
#ifdef RX_TRACK_PACKETS
                call->app.currentPacket->flags &= ~RX_PKTFLAG_CP;
#endif
                call->app.currentPacket = NULL;

		/* If the call is in recovery, let it exhaust its current
		 * retransmit queue before forcing it to send new packets
		 */
		if (!(call->flags & (RX_CALL_FAST_RECOVER))) {
		    rxi_Start(call, 0);
		}
	    } else if (call->app.currentPacket) {
#ifdef RX_TRACK_PACKETS
		call->app.currentPacket->flags &= ~RX_PKTFLAG_CP;
#endif
		rxi_FreePacket(call->app.currentPacket);
		call->app.currentPacket = NULL;
	    }
	    /* Wait for transmit window to open up */
	    while (!call->error
		   && call->tnext + 1 > call->tfirst + (2 * call->twind)) {
		clock_NewTime();
		call->startWait = clock_Sec();

#ifdef	RX_ENABLE_LOCKS
		CV_WAIT(&call->cv_twind, &call->lock);
#else
		call->flags |= RX_CALL_WAIT_WINDOW_ALLOC;
		osi_rxSleep(&call->twind);
#endif

		call->startWait = 0;
#ifdef RX_ENABLE_LOCKS
		if (call->error) {
                    call->app.mode = RX_MODE_ERROR;
		    MUTEX_EXIT(&call->lock);
		    return 0;
		}
#endif /* RX_ENABLE_LOCKS */
	    }
	    if ((call->app.currentPacket = rxi_AllocSendPacket(call, nbytes))) {
#ifdef RX_TRACK_PACKETS
		call->app.currentPacket->flags |= RX_PKTFLAG_CP;
#endif
		call->app.nFree = call->app.currentPacket->length;
		call->app.curvec = 1;	/* 0th vec is always header */
		/* begin at the beginning [ more or less ], continue
		 * on until the end, then stop. */
		call->app.curpos =
		    (char *) call->app.currentPacket->wirevec[1].iov_base +
		    call->conn->securityHeaderSize;
		call->app.curlen =
		    call->app.currentPacket->wirevec[1].iov_len -
		    call->conn->securityHeaderSize;
	    }
	    if (call->error) {
                call->app.mode = RX_MODE_ERROR;
		if (call->app.currentPacket) {
#ifdef RX_TRACK_PACKETS
		    call->app.currentPacket->flags &= ~RX_PKTFLAG_CP;
#endif
		    rxi_FreePacket(call->app.currentPacket);
		    call->app.currentPacket = NULL;
		}
		MUTEX_EXIT(&call->lock);
		return 0;
	    }
	    MUTEX_EXIT(&call->lock);
	}

	if (call->app.currentPacket && (int)call->app.nFree < nbytes) {
	    /* Try to extend the current buffer */
	    int len, mud;
	    len = call->app.currentPacket->length;
	    mud = rx_MaxUserDataSize(call);
	    if (mud > len) {
		int want;
		want = MIN(nbytes - (int)call->app.nFree, mud - len);
		rxi_AllocDataBuf(call->app.currentPacket, want,
				 RX_PACKET_CLASS_SEND_CBUF);
		if (call->app.currentPacket->length > (unsigned)mud)
		    call->app.currentPacket->length = mud;
		call->app.nFree += (call->app.currentPacket->length - len);
	    }
	}

	/* If the remaining bytes fit in the buffer, then store them
	 * and return.  Don't ship a buffer that's full immediately to
	 * the peer--we don't know if it's the last buffer yet */

	if (!call->app.currentPacket) {
	    call->app.nFree = 0;
	}

	while (nbytes && call->app.nFree) {

	    t = MIN((int)call->app.curlen, nbytes);
	    t = MIN((int)call->app.nFree, t);
	    memcpy(call->app.curpos, buf, t);
	    buf += t;
	    nbytes -= t;
	    call->app.curpos += t;
	    call->app.curlen -= (u_short)t;
	    call->app.nFree -= (u_short)t;

	    if (!call->app.curlen) {
		/* need to get another struct iov */
		if (++call->app.curvec >= call->app.currentPacket->niovecs) {
		    /* current packet is full, extend or send it */
		    call->app.nFree = 0;
		} else {
		    call->app.curpos =
			call->app.currentPacket->wirevec[call->app.curvec].iov_base;
		    call->app.curlen =
			call->app.currentPacket->wirevec[call->app.curvec].iov_len;
		}
	    }
	}			/* while bytes to send and room to send them */

	/* might be out of space now */
	if (!nbytes) {
	    return requestCount;
	} else;			/* more data to send, so get another packet and keep going */
    } while (nbytes);

    return requestCount - nbytes;
}
Beispiel #2
0
/* Flush any buffered data to the stream, switch to read mode
 * (clients) or to EOF mode (servers)
 *
 * LOCKS HELD: called at netpri.
 */
void
rxi_FlushWrite(struct rx_call *call)
{
    struct rx_packet *cp = NULL;

    /* Free any packets from the last call to ReadvProc/WritevProc */
    if (queue_IsNotEmpty(&call->iovq)) {
#ifdef RXDEBUG_PACKET
        call->iovqc -=
#endif /* RXDEBUG_PACKET */
            rxi_FreePackets(0, &call->iovq);
    }

    if (call->mode == RX_MODE_SENDING) {

	call->mode =
	    (call->conn->type ==
	     RX_CLIENT_CONNECTION ? RX_MODE_RECEIVING : RX_MODE_EOF);

#ifdef RX_KERNEL_TRACE
	{
	    int glockOwner = ISAFS_GLOCK();
	    if (!glockOwner)
		AFS_GLOCK();
	    afs_Trace3(afs_iclSetp, CM_TRACE_WASHERE, ICL_TYPE_STRING,
		       __FILE__, ICL_TYPE_INT32, __LINE__, ICL_TYPE_POINTER,
		       call);
	    if (!glockOwner)
		AFS_GUNLOCK();
	}
#endif

        MUTEX_ENTER(&call->lock);
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
        rxi_WaitforTQBusy(call);
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
        if (call->error)
            call->mode = RX_MODE_ERROR;

        cp = call->currentPacket;

	if (cp) {
	    /* cp->length is only supposed to be the user's data */
	    /* cp->length was already set to (then-current)
	     * MaxUserDataSize or less. */
#ifdef RX_TRACK_PACKETS
	    cp->flags &= ~RX_PKTFLAG_CP;
#endif
	    cp->length -= call->nFree;
	    call->currentPacket = (struct rx_packet *)0;
	    call->nFree = 0;
	} else {
	    cp = rxi_AllocSendPacket(call, 0);
	    if (!cp) {
		/* Mode can no longer be MODE_SENDING */
		return;
	    }
	    cp->length = 0;
	    cp->niovecs = 2;	/* header + space for rxkad stuff */
	    call->nFree = 0;
	}

	/* The 1 specifies that this is the last packet */
	hadd32(call->bytesSent, cp->length);
	rxi_PrepareSendPacket(call, cp, 1);
#ifdef RX_TRACK_PACKETS
	cp->flags |= RX_PKTFLAG_TQ;
#endif
	queue_Append(&call->tq, cp);
#ifdef RXDEBUG_PACKET
        call->tqc++;
#endif /* RXDEBUG_PACKET */
	if (!
	    (call->
	     flags & (RX_CALL_FAST_RECOVER | RX_CALL_FAST_RECOVER_WAIT))) {
	    rxi_Start(0, call, 0, 0);
	}
        MUTEX_EXIT(&call->lock);
    }
}
Beispiel #3
0
/* rxi_WritevProc -- internal version.
 *
 * Send buffers allocated in rxi_WritevAlloc.
 *
 * LOCKS USED -- called at netpri.
 */
int
rxi_WritevProc(struct rx_call *call, struct iovec *iov, int nio, int nbytes)
{
#ifdef RX_TRACK_PACKETS
    struct opr_queue *cursor;
#endif
    int nextio;
    int requestCount;
    struct opr_queue tmpq;
#ifdef RXDEBUG_PACKET
    u_short tmpqc;
#endif

    requestCount = nbytes;
    nextio = 0;

    MUTEX_ENTER(&call->lock);
    if (call->error) {
        call->app.mode = RX_MODE_ERROR;
    } else if (call->app.mode != RX_MODE_SENDING) {
	call->error = RX_PROTOCOL_ERROR;
    }
    rxi_WaitforTQBusy(call);

    if (call->error) {
        call->app.mode = RX_MODE_ERROR;
	MUTEX_EXIT(&call->lock);
	if (call->app.currentPacket) {
#ifdef RX_TRACK_PACKETS
            call->app.currentPacket->flags &= ~RX_PKTFLAG_CP;
            call->app.currentPacket->flags |= RX_PKTFLAG_IOVQ;
#endif
	    opr_queue_Prepend(&call->app.iovq,
			      &call->app.currentPacket->entry);
#ifdef RXDEBUG_PACKET
            call->iovqc++;
#endif /* RXDEBUG_PACKET */
	    call->app.currentPacket = NULL;
	}
#ifdef RXDEBUG_PACKET
        call->iovqc -=
#endif /* RXDEBUG_PACKET */
            rxi_FreePackets(0, &call->app.iovq);
	return 0;
    }

    /* Loop through the I/O vector adjusting packet pointers.
     * Place full packets back onto the iovq once they are ready
     * to send. Set RX_PROTOCOL_ERROR if any problems are found in
     * the iovec. We put the loop condition at the end to ensure that
     * a zero length write will push a short packet. */
    nextio = 0;
    opr_queue_Init(&tmpq);
#ifdef RXDEBUG_PACKET
    tmpqc = 0;
#endif /* RXDEBUG_PACKET */
    do {
	if (call->app.nFree == 0 && call->app.currentPacket) {
	    clock_NewTime();	/* Bogus:  need new time package */
	    /* The 0, below, specifies that it is not the last packet:
	     * there will be others. PrepareSendPacket may
	     * alter the packet length by up to
	     * conn->securityMaxTrailerSize */
	    call->app.bytesSent += call->app.currentPacket->length;
	    rxi_PrepareSendPacket(call, call->app.currentPacket, 0);
            /* PrepareSendPacket drops the call lock */
            rxi_WaitforTQBusy(call);
	    opr_queue_Append(&tmpq, &call->app.currentPacket->entry);
#ifdef RXDEBUG_PACKET
            tmpqc++;
#endif /* RXDEBUG_PACKET */
            call->app.currentPacket = NULL;

	    /* The head of the iovq is now the current packet */
	    if (nbytes) {
		if (opr_queue_IsEmpty(&call->app.iovq)) {
                    MUTEX_EXIT(&call->lock);
		    call->error = RX_PROTOCOL_ERROR;
#ifdef RXDEBUG_PACKET
                    tmpqc -=
#endif /* RXDEBUG_PACKET */
                        rxi_FreePackets(0, &tmpq);
		    return 0;
		}
		call->app.currentPacket
			= opr_queue_First(&call->app.iovq, struct rx_packet,
					  entry);
		opr_queue_Remove(&call->app.currentPacket->entry);
#ifdef RX_TRACK_PACKETS
                call->app.currentPacket->flags &= ~RX_PKTFLAG_IOVQ;
		call->app.currentPacket->flags |= RX_PKTFLAG_CP;
#endif
#ifdef RXDEBUG_PACKET
                call->iovqc--;
#endif /* RXDEBUG_PACKET */
		call->app.nFree = call->app.currentPacket->length;
		call->app.curvec = 1;
		call->app.curpos =
		    (char *) call->app.currentPacket->wirevec[1].iov_base +
		    call->conn->securityHeaderSize;
		call->app.curlen =
		    call->app.currentPacket->wirevec[1].iov_len -
		    call->conn->securityHeaderSize;
	    }
	}

	if (nbytes) {
	    /* The next iovec should point to the current position */
	    if (iov[nextio].iov_base != call->app.curpos
		|| iov[nextio].iov_len > (int)call->app.curlen) {
		call->error = RX_PROTOCOL_ERROR;
                MUTEX_EXIT(&call->lock);
		if (call->app.currentPacket) {
#ifdef RX_TRACK_PACKETS
		    call->app.currentPacket->flags &= ~RX_PKTFLAG_CP;
#endif
                    opr_queue_Prepend(&tmpq,
				      &call->app.currentPacket->entry);
#ifdef RXDEBUG_PACKET
                    tmpqc++;
#endif /* RXDEBUG_PACKET */
                    call->app.currentPacket = NULL;
		}
#ifdef RXDEBUG_PACKET
                tmpqc -=
#endif /* RXDEBUG_PACKET */
                    rxi_FreePackets(0, &tmpq);
		return 0;
	    }
	    nbytes -= iov[nextio].iov_len;
	    call->app.curpos += iov[nextio].iov_len;
	    call->app.curlen -= iov[nextio].iov_len;
	    call->app.nFree -= iov[nextio].iov_len;
	    nextio++;
	    if (call->app.curlen == 0) {
		if (++call->app.curvec > call->app.currentPacket->niovecs) {
		    call->app.nFree = 0;
		} else {
		    call->app.curpos =
			call->app.currentPacket->wirevec[call->app.curvec].iov_base;
		    call->app.curlen =
			call->app.currentPacket->wirevec[call->app.curvec].iov_len;
		}
	    }
	}
    } while (nbytes && nextio < nio);
Beispiel #4
0
/* rxi_WritevProc -- internal version.
 *
 * Send buffers allocated in rxi_WritevAlloc.
 *
 * LOCKS USED -- called at netpri.
 */
int
rxi_WritevProc(struct rx_call *call, struct iovec *iov, int nio, int nbytes)
{
    struct rx_packet *cp = NULL;
#ifdef RX_TRACK_PACKETS
    struct rx_packet *p, *np;
#endif
    int nextio;
    int requestCount;
    struct rx_queue tmpq;
#ifdef RXDEBUG_PACKET
    u_short tmpqc;
#endif

    requestCount = nbytes;
    nextio = 0;

    MUTEX_ENTER(&call->lock);
    if (call->error) {
        call->mode = RX_MODE_ERROR;
    } else if (call->mode != RX_MODE_SENDING) {
	call->error = RX_PROTOCOL_ERROR;
    }
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
    rxi_WaitforTQBusy(call);
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
    cp = call->currentPacket;

    if (call->error) {
        call->mode = RX_MODE_ERROR;
	MUTEX_EXIT(&call->lock);
	if (cp) {
#ifdef RX_TRACK_PACKETS
            cp->flags &= ~RX_PKTFLAG_CP;
            cp->flags |= RX_PKTFLAG_IOVQ;
#endif
	    queue_Prepend(&call->iovq, cp);
#ifdef RXDEBUG_PACKET
            call->iovqc++;
#endif /* RXDEBUG_PACKET */
	    call->currentPacket = (struct rx_packet *)0;
	}
#ifdef RXDEBUG_PACKET
        call->iovqc -=
#endif /* RXDEBUG_PACKET */
            rxi_FreePackets(0, &call->iovq);
	return 0;
    }

    /* Loop through the I/O vector adjusting packet pointers.
     * Place full packets back onto the iovq once they are ready
     * to send. Set RX_PROTOCOL_ERROR if any problems are found in
     * the iovec. We put the loop condition at the end to ensure that
     * a zero length write will push a short packet. */
    nextio = 0;
    queue_Init(&tmpq);
#ifdef RXDEBUG_PACKET
    tmpqc = 0;
#endif /* RXDEBUG_PACKET */
    do {
	if (call->nFree == 0 && cp) {
	    clock_NewTime();	/* Bogus:  need new time package */
	    /* The 0, below, specifies that it is not the last packet:
	     * there will be others. PrepareSendPacket may
	     * alter the packet length by up to
	     * conn->securityMaxTrailerSize */
	    hadd32(call->bytesSent, cp->length);
	    rxi_PrepareSendPacket(call, cp, 0);
	    queue_Append(&tmpq, cp);
#ifdef RXDEBUG_PACKET
            tmpqc++;
#endif /* RXDEBUG_PACKET */
            cp = call->currentPacket = (struct rx_packet *)0;

	    /* The head of the iovq is now the current packet */
	    if (nbytes) {
		if (queue_IsEmpty(&call->iovq)) {
                    MUTEX_EXIT(&call->lock);
		    call->error = RX_PROTOCOL_ERROR;
#ifdef RXDEBUG_PACKET
                    tmpqc -=
#endif /* RXDEBUG_PACKET */
                        rxi_FreePackets(0, &tmpq);
		    return 0;
		}
		cp = queue_First(&call->iovq, rx_packet);
		queue_Remove(cp);
#ifdef RX_TRACK_PACKETS
                cp->flags &= ~RX_PKTFLAG_IOVQ;
#endif
#ifdef RXDEBUG_PACKET
                call->iovqc--;
#endif /* RXDEBUG_PACKET */
#ifdef RX_TRACK_PACKETS
                cp->flags |= RX_PKTFLAG_CP;
#endif
		call->currentPacket = cp;
		call->nFree = cp->length;
		call->curvec = 1;
		call->curpos =
		    (char *)cp->wirevec[1].iov_base +
		    call->conn->securityHeaderSize;
		call->curlen =
		    cp->wirevec[1].iov_len - call->conn->securityHeaderSize;
	    }
	}

	if (nbytes) {
	    /* The next iovec should point to the current position */
	    if (iov[nextio].iov_base != call->curpos
		|| iov[nextio].iov_len > (int)call->curlen) {
		call->error = RX_PROTOCOL_ERROR;
                MUTEX_EXIT(&call->lock);
		if (cp) {
#ifdef RX_TRACK_PACKETS
		    cp->flags &= ~RX_PKTFLAG_CP;
#endif
                    queue_Prepend(&tmpq, cp);
#ifdef RXDEBUG_PACKET
                    tmpqc++;
#endif /* RXDEBUG_PACKET */
                    cp = call->currentPacket = (struct rx_packet *)0;
		}
#ifdef RXDEBUG_PACKET
                tmpqc -=
#endif /* RXDEBUG_PACKET */
                    rxi_FreePackets(0, &tmpq);
		return 0;
	    }
	    nbytes -= iov[nextio].iov_len;
	    call->curpos += iov[nextio].iov_len;
	    call->curlen -= iov[nextio].iov_len;
	    call->nFree -= iov[nextio].iov_len;
	    nextio++;
	    if (call->curlen == 0) {
		if (++call->curvec > cp->niovecs) {
		    call->nFree = 0;
		} else {
		    call->curpos = (char *)cp->wirevec[call->curvec].iov_base;
		    call->curlen = cp->wirevec[call->curvec].iov_len;
		}
	    }
	}
    } while (nbytes && nextio < nio);

    /* Move the packets from the temporary queue onto the transmit queue.
     * We may end up with more than call->twind packets on the queue. */

#ifdef RX_TRACK_PACKETS
    for (queue_Scan(&tmpq, p, np, rx_packet))
    {
        p->flags |= RX_PKTFLAG_TQ;
    }
#endif

    if (call->error)
        call->mode = RX_MODE_ERROR;

    queue_SpliceAppend(&call->tq, &tmpq);

    if (!(call->flags & (RX_CALL_FAST_RECOVER | RX_CALL_FAST_RECOVER_WAIT))) {
	rxi_Start(0, call, 0, 0);
    }

    /* Wait for the length of the transmit queue to fall below call->twind */
    while (!call->error && call->tnext + 1 > call->tfirst + (2 * call->twind)) {
	clock_NewTime();
	call->startWait = clock_Sec();
#ifdef	RX_ENABLE_LOCKS
	CV_WAIT(&call->cv_twind, &call->lock);
#else
	call->flags |= RX_CALL_WAIT_WINDOW_ALLOC;
	osi_rxSleep(&call->twind);
#endif
	call->startWait = 0;
    }

    /* cp is no longer valid since we may have given up the lock */
    cp = call->currentPacket;

    if (call->error) {
        call->mode = RX_MODE_ERROR;
        call->currentPacket = NULL;
        MUTEX_EXIT(&call->lock);
	if (cp) {
#ifdef RX_TRACK_PACKETS
	    cp->flags &= ~RX_PKTFLAG_CP;
#endif
	    rxi_FreePacket(cp);
	}
	return 0;
    }
    MUTEX_EXIT(&call->lock);

    return requestCount - nbytes;
}