void tr_bandwidthAllocate( tr_bandwidth * b, tr_direction dir, unsigned int period_msec ) { int i, peerCount; tr_ptrArray tmp = TR_PTR_ARRAY_INIT; tr_ptrArray low = TR_PTR_ARRAY_INIT; tr_ptrArray high = TR_PTR_ARRAY_INIT; tr_ptrArray normal = TR_PTR_ARRAY_INIT; struct tr_peerIo ** peers; /* allocateBandwidth() is a helper function with two purposes: * 1. allocate bandwidth to b and its subtree * 2. accumulate an array of all the peerIos from b and its subtree. */ allocateBandwidth( b, TR_PRI_LOW, dir, period_msec, &tmp ); peers = (struct tr_peerIo**) tr_ptrArrayBase( &tmp ); peerCount = tr_ptrArraySize( &tmp ); for( i=0; i<peerCount; ++i ) { tr_peerIo * io = peers[i]; tr_peerIoRef( io ); tr_peerIoFlushOutgoingProtocolMsgs( io ); switch( io->priority ) { case TR_PRI_HIGH: tr_ptrArrayAppend( &high, io ); /* fall through */ case TR_PRI_NORMAL: tr_ptrArrayAppend( &normal, io ); /* fall through */ default: tr_ptrArrayAppend( &low, io ); } } /* First phase of IO. Tries to distribute bandwidth fairly to keep faster * peers from starving the others. Loop through the peers, giving each a * small chunk of bandwidth. Keep looping until we run out of bandwidth * and/or peers that can use it */ phaseOne( &high, dir ); phaseOne( &normal, dir ); phaseOne( &low, dir ); /* Second phase of IO. To help us scale in high bandwidth situations, * enable on-demand IO for peers with bandwidth left to burn. * This on-demand IO is enabled until (1) the peer runs out of bandwidth, * or (2) the next tr_bandwidthAllocate() call, when we start over again. */ for( i=0; i<peerCount; ++i ) tr_peerIoSetEnabled( peers[i], dir, tr_peerIoHasBandwidthLeft( peers[i], dir ) ); for( i=0; i<peerCount; ++i ) tr_peerIoUnref( peers[i] ); /* cleanup */ tr_ptrArrayDestruct( &normal, NULL ); tr_ptrArrayDestruct( &high, NULL ); tr_ptrArrayDestruct( &low, NULL ); tr_ptrArrayDestruct( &tmp, NULL ); }
void tr_bandwidthAllocate( tr_bandwidth * b, tr_direction dir, int period_msec ) { int i, n, peerCount; tr_ptrArray * tmp; struct tr_peerIo ** peers; const uint64_t now = tr_date( ); const uint64_t cutoff = now + 100; /* 1/10th of a second */ /* allocateBandwidth() is a helper function with two purposes: * 1. allocate bandwidth to b and its subtree * 2. accumulate an array of all the peerIos from b and its subtree. */ tmp = tr_ptrArrayNew( ); allocateBandwidth( b, dir, period_msec, tmp ); peers = (struct tr_peerIo**) tr_ptrArrayPeek( tmp, &peerCount ); /* Stop all peers from listening for the socket to be ready for IO. * See "Second phase of IO" lower in this function for more info. */ for( i=0; i<peerCount; ++i ) tr_peerIoSetEnabled( peers[i], dir, FALSE ); /* First phase of IO. Tries to distribute bandwidth fairly to keep faster * peers from starving the others. Loop through the peers, giving each a * small chunk of bandwidth. Keep looping until we run out of bandwidth * or peers that can use it */ n = peerCount; i = n ? tr_cryptoWeakRandInt( n ) : 0; /* pick a random starting point */ for( ; n>0 && tr_date()<=cutoff; ) { const int increment = n==1 ? 4096 : 1024; const int byteCount = tr_peerIoFlush( peers[i], dir, increment); if( byteCount == increment ) ++i; else { /* peer is done writing for now; move it to the end of the list */ tr_peerIo * tmp = peers[i]; peers[i] = peers[n-1]; peers[n-1] = tmp; --n; } assert( i <= n ); if( i == n ) i = 0; } /* Second phase of IO. To help us scale in high bandwidth situations, * enable on-demand IO for peers with bandwidth left to burn. * This on-demand IO is enabled until (1) the peer runs out of bandwidth, * or (2) the next tr_bandwidthAllocate() call, when we start over again. */ for( i=0; i<peerCount; ++i ) if( tr_peerIoHasBandwidthLeft( peers[i], dir ) ) tr_peerIoSetEnabled( peers[i], dir, TRUE ); /* cleanup */ tr_ptrArrayFree( tmp, NULL ); }