void tr_bandwidthAllocate( tr_bandwidth * b, tr_direction dir, unsigned int period_msec ) { int i, peerCount; tr_ptrArray tmp = TR_PTR_ARRAY_INIT; tr_ptrArray low = TR_PTR_ARRAY_INIT; tr_ptrArray high = TR_PTR_ARRAY_INIT; tr_ptrArray normal = TR_PTR_ARRAY_INIT; struct tr_peerIo ** peers; /* allocateBandwidth() is a helper function with two purposes: * 1. allocate bandwidth to b and its subtree * 2. accumulate an array of all the peerIos from b and its subtree. */ allocateBandwidth( b, TR_PRI_LOW, dir, period_msec, &tmp ); peers = (struct tr_peerIo**) tr_ptrArrayBase( &tmp ); peerCount = tr_ptrArraySize( &tmp ); for( i=0; i<peerCount; ++i ) { tr_peerIo * io = peers[i]; tr_peerIoRef( io ); tr_peerIoFlushOutgoingProtocolMsgs( io ); switch( io->priority ) { case TR_PRI_HIGH: tr_ptrArrayAppend( &high, io ); /* fall through */ case TR_PRI_NORMAL: tr_ptrArrayAppend( &normal, io ); /* fall through */ default: tr_ptrArrayAppend( &low, io ); } } /* First phase of IO. Tries to distribute bandwidth fairly to keep faster * peers from starving the others. Loop through the peers, giving each a * small chunk of bandwidth. Keep looping until we run out of bandwidth * and/or peers that can use it */ phaseOne( &high, dir ); phaseOne( &normal, dir ); phaseOne( &low, dir ); /* Second phase of IO. To help us scale in high bandwidth situations, * enable on-demand IO for peers with bandwidth left to burn. * This on-demand IO is enabled until (1) the peer runs out of bandwidth, * or (2) the next tr_bandwidthAllocate() call, when we start over again. */ for( i=0; i<peerCount; ++i ) tr_peerIoSetEnabled( peers[i], dir, tr_peerIoHasBandwidthLeft( peers[i], dir ) ); for( i=0; i<peerCount; ++i ) tr_peerIoUnref( peers[i] ); /* cleanup */ tr_ptrArrayDestruct( &normal, NULL ); tr_ptrArrayDestruct( &high, NULL ); tr_ptrArrayDestruct( &low, NULL ); tr_ptrArrayDestruct( &tmp, NULL ); }
static void tr_handshakeFree( tr_handshake * handshake ) { if( handshake->io ) tr_peerIoUnref( handshake->io ); /* balanced by the ref in tr_handshakeNew */ event_free( handshake->timeout_timer ); tr_free( handshake ); }
static void canReadWrapper( tr_peerIo * io ) { tr_bool err = 0; tr_bool done = 0; tr_session * session; dbgmsg( io, "canRead" ); assert( tr_isPeerIo( io ) ); assert( tr_isSession( io->session ) ); tr_peerIoRef( io ); session = io->session; /* try to consume the input buffer */ if( io->canRead ) { tr_sessionLock( session ); while( !done && !err ) { size_t piece = 0; const size_t oldLen = EVBUFFER_LENGTH( io->inbuf ); const int ret = io->canRead( io, io->userData, &piece ); const size_t used = oldLen - EVBUFFER_LENGTH( io->inbuf ); assert( tr_isPeerIo( io ) ); if( piece || (piece!=used) ) { const uint64_t now = tr_time_msec( ); if( piece ) tr_bandwidthUsed( &io->bandwidth, TR_DOWN, piece, TRUE, now ); if( used != piece ) tr_bandwidthUsed( &io->bandwidth, TR_DOWN, used - piece, FALSE, now ); } switch( ret ) { case READ_NOW: if( EVBUFFER_LENGTH( io->inbuf ) ) continue; done = 1; break; case READ_LATER: done = 1; break; case READ_ERR: err = 1; break; } assert( tr_isPeerIo( io ) ); } tr_sessionUnlock( session ); } /* keep the iobuf's excess capacity from growing too large */ if( EVBUFFER_LENGTH( io->inbuf ) == 0 ) { evbuffer_free( io->inbuf ); io->inbuf = evbuffer_new( ); } assert( tr_isPeerIo( io ) ); tr_peerIoUnref( io ); }