static void put_thread(thread_p old) { CloseHandle(old->t_handle); pthread_mutex_lock(&cache_Q_mutex); queue_Prepend(&cache_Q, old); pthread_mutex_unlock(&cache_Q_mutex); }
/* Add the indicated event (function, arg) at the specified clock time */ struct rxevent * rxevent_Post(struct clock * when, void (*func)(), void *arg, void *arg1) /* when - When event should happen, in clock (clock.h) units */ { struct rxevent *ev, *qe, *qpr; #ifdef RXDEBUG if (Log) { struct clock now; clock_GetTime(&now); fprintf(Log, "%ld.%ld: rxevent_Post(%ld.%ld, %p, %p)\n", now.sec, now.usec, when->sec, when->usec, func, arg); } #endif #if defined(AFS_SGIMP_ENV) ASSERT(osi_rxislocked()); #endif /* * If we're short on free event entries, create a block of new ones and * add them to the free queue */ if (queue_IsEmpty(&rxevent_free)) { int i; #if defined(AFS_AIX32_ENV) && defined(KERNEL) ev = (struct rxevent *) rxi_Alloc(sizeof(struct rxevent)); queue_Append(&rxevent_free, &ev[0]), rxevent_nFree++; #else ev = (struct rxevent *) osi_Alloc(sizeof(struct rxevent) * rxevent_allocUnit); xsp = xfreemallocs; xfreemallocs = (struct xfreelist *) ev; xfreemallocs->next = xsp; for (i = 0; i < rxevent_allocUnit; i++) queue_Append(&rxevent_free, &ev[i]), rxevent_nFree++; #endif } /* Grab and initialize a new rxevent structure */ ev = queue_First(&rxevent_free, rxevent); queue_Remove(ev); rxevent_nFree--; /* Record user defined event state */ ev->eventTime = *when; ev->func = func; ev->arg = arg; ev->arg1 = arg1; rxevent_nPosted += 1; /* Rather than ++, to shut high-C up * regarding never-set variables */ /* * Locate a slot for the new entry. The queue is ordered by time, and we * assume that a new entry is likely to be greater than a majority of the * entries already on the queue (unless there's very few entries on the * queue), so we scan it backwards */ for (queue_ScanBackwards(&rxevent_queue, qe, qpr, rxevent)) { if (clock_Ge(when, &qe->eventTime)) { queue_InsertAfter(qe, ev); return ev; } } /* The event is to expire earlier than any existing events */ queue_Prepend(&rxevent_queue, ev); if (rxevent_ScheduledEarlierEvent) (*rxevent_ScheduledEarlierEvent) (); /* Notify our external * scheduler */ return ev; }
/* rxi_WritevProc -- internal version. * * Send buffers allocated in rxi_WritevAlloc. * * LOCKS USED -- called at netpri. */ int rxi_WritevProc(struct rx_call *call, struct iovec *iov, int nio, int nbytes) { struct rx_packet *cp = NULL; #ifdef RX_TRACK_PACKETS struct rx_packet *p, *np; #endif int nextio; int requestCount; struct rx_queue tmpq; #ifdef RXDEBUG_PACKET u_short tmpqc; #endif requestCount = nbytes; nextio = 0; MUTEX_ENTER(&call->lock); if (call->error) { call->mode = RX_MODE_ERROR; } else if (call->mode != RX_MODE_SENDING) { call->error = RX_PROTOCOL_ERROR; } #ifdef AFS_GLOBAL_RXLOCK_KERNEL rxi_WaitforTQBusy(call); #endif /* AFS_GLOBAL_RXLOCK_KERNEL */ cp = call->currentPacket; if (call->error) { call->mode = RX_MODE_ERROR; MUTEX_EXIT(&call->lock); if (cp) { #ifdef RX_TRACK_PACKETS cp->flags &= ~RX_PKTFLAG_CP; cp->flags |= RX_PKTFLAG_IOVQ; #endif queue_Prepend(&call->iovq, cp); #ifdef RXDEBUG_PACKET call->iovqc++; #endif /* RXDEBUG_PACKET */ call->currentPacket = (struct rx_packet *)0; } #ifdef RXDEBUG_PACKET call->iovqc -= #endif /* RXDEBUG_PACKET */ rxi_FreePackets(0, &call->iovq); return 0; } /* Loop through the I/O vector adjusting packet pointers. * Place full packets back onto the iovq once they are ready * to send. Set RX_PROTOCOL_ERROR if any problems are found in * the iovec. We put the loop condition at the end to ensure that * a zero length write will push a short packet. */ nextio = 0; queue_Init(&tmpq); #ifdef RXDEBUG_PACKET tmpqc = 0; #endif /* RXDEBUG_PACKET */ do { if (call->nFree == 0 && cp) { clock_NewTime(); /* Bogus: need new time package */ /* The 0, below, specifies that it is not the last packet: * there will be others. PrepareSendPacket may * alter the packet length by up to * conn->securityMaxTrailerSize */ hadd32(call->bytesSent, cp->length); rxi_PrepareSendPacket(call, cp, 0); queue_Append(&tmpq, cp); #ifdef RXDEBUG_PACKET tmpqc++; #endif /* RXDEBUG_PACKET */ cp = call->currentPacket = (struct rx_packet *)0; /* The head of the iovq is now the current packet */ if (nbytes) { if (queue_IsEmpty(&call->iovq)) { MUTEX_EXIT(&call->lock); call->error = RX_PROTOCOL_ERROR; #ifdef RXDEBUG_PACKET tmpqc -= #endif /* RXDEBUG_PACKET */ rxi_FreePackets(0, &tmpq); return 0; } cp = queue_First(&call->iovq, rx_packet); queue_Remove(cp); #ifdef RX_TRACK_PACKETS cp->flags &= ~RX_PKTFLAG_IOVQ; #endif #ifdef RXDEBUG_PACKET call->iovqc--; #endif /* RXDEBUG_PACKET */ #ifdef RX_TRACK_PACKETS cp->flags |= RX_PKTFLAG_CP; #endif call->currentPacket = cp; call->nFree = cp->length; call->curvec = 1; call->curpos = (char *)cp->wirevec[1].iov_base + call->conn->securityHeaderSize; call->curlen = cp->wirevec[1].iov_len - call->conn->securityHeaderSize; } } if (nbytes) { /* The next iovec should point to the current position */ if (iov[nextio].iov_base != call->curpos || iov[nextio].iov_len > (int)call->curlen) { call->error = RX_PROTOCOL_ERROR; MUTEX_EXIT(&call->lock); if (cp) { #ifdef RX_TRACK_PACKETS cp->flags &= ~RX_PKTFLAG_CP; #endif queue_Prepend(&tmpq, cp); #ifdef RXDEBUG_PACKET tmpqc++; #endif /* RXDEBUG_PACKET */ cp = call->currentPacket = (struct rx_packet *)0; } #ifdef RXDEBUG_PACKET tmpqc -= #endif /* RXDEBUG_PACKET */ rxi_FreePackets(0, &tmpq); return 0; } nbytes -= iov[nextio].iov_len; call->curpos += iov[nextio].iov_len; call->curlen -= iov[nextio].iov_len; call->nFree -= iov[nextio].iov_len; nextio++; if (call->curlen == 0) { if (++call->curvec > cp->niovecs) { call->nFree = 0; } else { call->curpos = (char *)cp->wirevec[call->curvec].iov_base; call->curlen = cp->wirevec[call->curvec].iov_len; } } } } while (nbytes && nextio < nio); /* Move the packets from the temporary queue onto the transmit queue. * We may end up with more than call->twind packets on the queue. */ #ifdef RX_TRACK_PACKETS for (queue_Scan(&tmpq, p, np, rx_packet)) { p->flags |= RX_PKTFLAG_TQ; } #endif if (call->error) call->mode = RX_MODE_ERROR; queue_SpliceAppend(&call->tq, &tmpq); if (!(call->flags & (RX_CALL_FAST_RECOVER | RX_CALL_FAST_RECOVER_WAIT))) { rxi_Start(0, call, 0, 0); } /* Wait for the length of the transmit queue to fall below call->twind */ while (!call->error && call->tnext + 1 > call->tfirst + (2 * call->twind)) { clock_NewTime(); call->startWait = clock_Sec(); #ifdef RX_ENABLE_LOCKS CV_WAIT(&call->cv_twind, &call->lock); #else call->flags |= RX_CALL_WAIT_WINDOW_ALLOC; osi_rxSleep(&call->twind); #endif call->startWait = 0; } /* cp is no longer valid since we may have given up the lock */ cp = call->currentPacket; if (call->error) { call->mode = RX_MODE_ERROR; call->currentPacket = NULL; MUTEX_EXIT(&call->lock); if (cp) { #ifdef RX_TRACK_PACKETS cp->flags &= ~RX_PKTFLAG_CP; #endif rxi_FreePacket(cp); } return 0; } MUTEX_EXIT(&call->lock); return requestCount - nbytes; }