static void allocateBandwidth (tr_bandwidth * b, tr_priority_t parent_priority, tr_direction dir, unsigned int period_msec, tr_ptrArray * peer_pool) { const tr_priority_t priority = MAX (parent_priority, b->priority); assert (tr_isBandwidth (b)); assert (tr_isDirection (dir)); /* set the available bandwidth */ if (b->band[dir].isLimited) { const uint64_t nextPulseSpeed = b->band[dir].desiredSpeed_Bps; b->band[dir].bytesLeft = (unsigned int)(nextPulseSpeed * period_msec) / 1000u; } /* add this bandwidth's peer, if any, to the peer pool */ if (b->peer != NULL) { b->peer->priority = priority; tr_ptrArrayAppend (peer_pool, b->peer); } /* traverse & repeat for the subtree */ if (1) { int i; struct tr_bandwidth ** children = (struct tr_bandwidth**) tr_ptrArrayBase (&b->children); const int n = tr_ptrArraySize (&b->children); for (i=0; i<n; ++i) allocateBandwidth (children[i], priority, dir, period_msec, peer_pool); } }
void tr_bandwidthAllocate( tr_bandwidth * b, tr_direction dir, unsigned int period_msec ) { int i, peerCount; tr_ptrArray tmp = TR_PTR_ARRAY_INIT; tr_ptrArray low = TR_PTR_ARRAY_INIT; tr_ptrArray high = TR_PTR_ARRAY_INIT; tr_ptrArray normal = TR_PTR_ARRAY_INIT; struct tr_peerIo ** peers; /* allocateBandwidth() is a helper function with two purposes: * 1. allocate bandwidth to b and its subtree * 2. accumulate an array of all the peerIos from b and its subtree. */ allocateBandwidth( b, TR_PRI_LOW, dir, period_msec, &tmp ); peers = (struct tr_peerIo**) tr_ptrArrayBase( &tmp ); peerCount = tr_ptrArraySize( &tmp ); for( i=0; i<peerCount; ++i ) { tr_peerIo * io = peers[i]; tr_peerIoRef( io ); tr_peerIoFlushOutgoingProtocolMsgs( io ); switch( io->priority ) { case TR_PRI_HIGH: tr_ptrArrayAppend( &high, io ); /* fall through */ case TR_PRI_NORMAL: tr_ptrArrayAppend( &normal, io ); /* fall through */ default: tr_ptrArrayAppend( &low, io ); } } /* First phase of IO. Tries to distribute bandwidth fairly to keep faster * peers from starving the others. Loop through the peers, giving each a * small chunk of bandwidth. Keep looping until we run out of bandwidth * and/or peers that can use it */ phaseOne( &high, dir ); phaseOne( &normal, dir ); phaseOne( &low, dir ); /* Second phase of IO. To help us scale in high bandwidth situations, * enable on-demand IO for peers with bandwidth left to burn. * This on-demand IO is enabled until (1) the peer runs out of bandwidth, * or (2) the next tr_bandwidthAllocate() call, when we start over again. */ for( i=0; i<peerCount; ++i ) tr_peerIoSetEnabled( peers[i], dir, tr_peerIoHasBandwidthLeft( peers[i], dir ) ); for( i=0; i<peerCount; ++i ) tr_peerIoUnref( peers[i] ); /* cleanup */ tr_ptrArrayDestruct( &normal, NULL ); tr_ptrArrayDestruct( &high, NULL ); tr_ptrArrayDestruct( &low, NULL ); tr_ptrArrayDestruct( &tmp, NULL ); }
static void allocateBandwidth( tr_bandwidth * b, tr_priority_t parent_priority, tr_direction dir, unsigned int period_msec, tr_ptrArray * peer_pool ) { tr_priority_t priority; assert( tr_isBandwidth( b ) ); assert( tr_isDirection( dir ) ); /* set the available bandwidth */ if( b->band[dir].isLimited ) { const unsigned int nextPulseSpeed = b->band[dir].desiredSpeed_Bps; b->band[dir].bytesLeft = ( nextPulseSpeed * period_msec ) / 1000u; #ifdef DEBUG_DIRECTION if( dir == DEBUG_DIRECTION ) fprintf( stderr, "bandwidth %p currentPieceSpeed(%5.2f of %5.2f) desiredSpeed(%5.2f), allocating %d\n", b, currentSpeed, tr_bandwidthGetRawSpeed( b, dir ), desiredSpeed, b->band[dir].bytesLeft ); #endif } priority = MAX( parent_priority, b->priority ); /* add this bandwidth's peer, if any, to the peer pool */ if( b->peer != NULL ) { b->peer->priority = priority; tr_ptrArrayAppend( peer_pool, b->peer ); } #ifdef DEBUG_DIRECTION if( ( dir == DEBUG_DIRECTION ) && ( n > 1 ) ) fprintf( stderr, "bandwidth %p has %d peers\n", b, n ); #endif /* traverse & repeat for the subtree */ if( 1 ) { int i; struct tr_bandwidth ** children = (struct tr_bandwidth**) tr_ptrArrayBase( &b->children ); const int n = tr_ptrArraySize( &b->children ); for( i=0; i<n; ++i ) allocateBandwidth( children[i], priority, dir, period_msec, peer_pool ); } }
static void allocateBandwidth( tr_bandwidth * b, tr_direction dir, int period_msec, tr_ptrArray * peer_pool ) { assert( tr_isBandwidth( b ) ); assert( tr_isDirection( dir ) ); /* set the available bandwidth */ if( b->band[dir].isLimited ) { const double desiredSpeed = b->band[dir].desiredSpeed; const double nextPulseSpeed = desiredSpeed; b->band[dir].bytesLeft = MAX( 0.0, nextPulseSpeed * 1024.0 * period_msec / 1000.0 ); #ifdef DEBUG_DIRECTION if( dir == DEBUG_DIRECTION ) fprintf( stderr, "bandwidth %p currentPieceSpeed(%5.2f of %5.2f) desiredSpeed(%5.2f), allocating %5.2f\n", b, currentSpeed, tr_bandwidthGetRawSpeed( b, dir ), desiredSpeed, b->band[dir].bytesLeft/1024.0 ); #endif } /* traverse & repeat for the subtree */ { int i; const int n = tr_ptrArraySize( b->peers ); for( i=0; i<n; ++i ) tr_ptrArrayAppend( peer_pool, tr_ptrArrayNth( b->peers, i ) ); } #ifdef DEBUG_DIRECTION if( ( dir == DEBUG_DIRECTION ) && ( n > 1 ) ) fprintf( stderr, "bandwidth %p has %d peers\n", b, n ); #endif /* all children should reallocate too */ if( 1 ) { int i, n=0; struct tr_bandwidth ** children = (struct tr_bandwidth**) tr_ptrArrayPeek( b->children, &n ); for( i=0; i<n; ++i ) allocateBandwidth( children[i], dir, period_msec, peer_pool ); } }
void tr_bandwidthAllocate( tr_bandwidth * b, tr_direction dir, int period_msec ) { int i, n, peerCount; tr_ptrArray * tmp; struct tr_peerIo ** peers; const uint64_t now = tr_date( ); const uint64_t cutoff = now + 100; /* 1/10th of a second */ /* allocateBandwidth() is a helper function with two purposes: * 1. allocate bandwidth to b and its subtree * 2. accumulate an array of all the peerIos from b and its subtree. */ tmp = tr_ptrArrayNew( ); allocateBandwidth( b, dir, period_msec, tmp ); peers = (struct tr_peerIo**) tr_ptrArrayPeek( tmp, &peerCount ); /* Stop all peers from listening for the socket to be ready for IO. * See "Second phase of IO" lower in this function for more info. */ for( i=0; i<peerCount; ++i ) tr_peerIoSetEnabled( peers[i], dir, FALSE ); /* First phase of IO. Tries to distribute bandwidth fairly to keep faster * peers from starving the others. Loop through the peers, giving each a * small chunk of bandwidth. Keep looping until we run out of bandwidth * or peers that can use it */ n = peerCount; i = n ? tr_cryptoWeakRandInt( n ) : 0; /* pick a random starting point */ for( ; n>0 && tr_date()<=cutoff; ) { const int increment = n==1 ? 4096 : 1024; const int byteCount = tr_peerIoFlush( peers[i], dir, increment); if( byteCount == increment ) ++i; else { /* peer is done writing for now; move it to the end of the list */ tr_peerIo * tmp = peers[i]; peers[i] = peers[n-1]; peers[n-1] = tmp; --n; } assert( i <= n ); if( i == n ) i = 0; } /* Second phase of IO. To help us scale in high bandwidth situations, * enable on-demand IO for peers with bandwidth left to burn. * This on-demand IO is enabled until (1) the peer runs out of bandwidth, * or (2) the next tr_bandwidthAllocate() call, when we start over again. */ for( i=0; i<peerCount; ++i ) if( tr_peerIoHasBandwidthLeft( peers[i], dir ) ) tr_peerIoSetEnabled( peers[i], dir, TRUE ); /* cleanup */ tr_ptrArrayFree( tmp, NULL ); }