Пример #1
0
static void
phaseOne( tr_ptrArray * peerArray, tr_direction dir )
{
    int i, n;
    int peerCount = tr_ptrArraySize( peerArray );
    struct tr_peerIo ** peers = (struct tr_peerIo**) tr_ptrArrayBase( peerArray );

    /* First phase of IO. Tries to distribute bandwidth fairly to keep faster
     * peers from starving the others. Loop through the peers, giving each a
     * small chunk of bandwidth. Keep looping until we run out of bandwidth
     * and/or peers that can use it */
    n = peerCount;
    dbgmsg( "%d peers to go round-robin for %s", n, (dir==TR_UP?"upload":"download") );
    i = n ? tr_cryptoWeakRandInt( n ) : 0; /* pick a random starting point */
    while( n > 1 )
    {
        const size_t increment = 1024;
        const int bytesUsed = tr_peerIoFlush( peers[i], dir, increment );

        dbgmsg( "peer #%d of %d used %d bytes in this pass", i, n, bytesUsed );

        if( bytesUsed == (int)increment )
            ++i;
        else {
            /* peer is done writing for now; move it to the end of the list */
            tr_peerIo * pio = peers[i];
            peers[i] = peers[n-1];
            peers[n-1] = pio;
            --n;
        }

        if( i == n )
            i = 0;
    }
}
Пример #2
0
static void
phaseOne (tr_ptrArray * peerArray, tr_direction dir)
{
    int n;
    int peerCount = tr_ptrArraySize (peerArray);
    struct tr_peerIo ** peers = (struct tr_peerIo**) tr_ptrArrayBase (peerArray);

    /* First phase of IO. Tries to distribute bandwidth fairly to keep faster
     * peers from starving the others. Loop through the peers, giving each a
     * small chunk of bandwidth. Keep looping until we run out of bandwidth
     * and/or peers that can use it */
    n = peerCount;
    dbgmsg ("%d peers to go round-robin for %s", n, (dir==TR_UP?"upload":"download"));
    while (n > 0)
    {
        const int i = tr_cryptoWeakRandInt (n); /* pick a peer at random */

        /* value of 3000 bytes chosen so that when using uTP we'll send a full-size
         * frame right away and leave enough buffered data for the next frame to go
         * out in a timely manner. */
        const size_t increment = 3000;

        const int bytesUsed = tr_peerIoFlush (peers[i], dir, increment);

        dbgmsg ("peer #%d of %d used %d bytes in this pass", i, n, bytesUsed);

        if (bytesUsed != (int)increment) {
            /* peer is done writing for now; move it to the end of the list */
            tr_peerIo * pio = peers[i];
            peers[i] = peers[n-1];
            peers[n-1] = pio;
            --n;
        }
    }
}
Пример #3
0
int
tr_peerIoFlushOutgoingProtocolMsgs( tr_peerIo * io )
{
    size_t byteCount = 0;
    tr_list * it;

    /* count up how many bytes are used by non-piece-data messages
       at the front of our outbound queue */
    for( it=io->outbuf_datatypes; it!=NULL; it=it->next )
    {
        struct tr_datatype * d = it->data;

        if( d->isPieceData )
            break;

        byteCount += d->length;
    }

    return tr_peerIoFlush( io, TR_UP, byteCount );
}
Пример #4
0
void
tr_bandwidthAllocate( tr_bandwidth  * b,
                      tr_direction    dir,
                      int             period_msec )
{
    int i, n, peerCount;
    tr_ptrArray * tmp;
    struct tr_peerIo ** peers;
    const uint64_t now = tr_date( );
    const uint64_t cutoff = now + 100; /* 1/10th of a second */


    /* allocateBandwidth() is a helper function with two purposes:
     * 1. allocate bandwidth to b and its subtree
     * 2. accumulate an array of all the peerIos from b and its subtree. */
    tmp = tr_ptrArrayNew( );
    allocateBandwidth( b, dir, period_msec, tmp );
    peers = (struct tr_peerIo**) tr_ptrArrayPeek( tmp, &peerCount );

    /* Stop all peers from listening for the socket to be ready for IO.
     * See "Second phase of IO" lower in this function for more info. */
    for( i=0; i<peerCount; ++i )
        tr_peerIoSetEnabled( peers[i], dir, FALSE );

    /* First phase of IO.  Tries to distribute bandwidth fairly to keep faster
     * peers from starving the others.  Loop through the peers, giving each a
     * small chunk of bandwidth.  Keep looping until we run out of bandwidth
     * or peers that can use it */
    n = peerCount;
    i = n ? tr_cryptoWeakRandInt( n ) : 0; /* pick a random starting point */
    for( ; n>0 && tr_date()<=cutoff; )
    {
        const int increment = n==1 ? 4096 : 1024;
        const int byteCount = tr_peerIoFlush( peers[i], dir, increment);

        if( byteCount == increment )
            ++i;
        else {
            /* peer is done writing for now; move it to the end of the list */
            tr_peerIo * tmp = peers[i];
            peers[i] = peers[n-1];
            peers[n-1] = tmp;
            --n;
        }

        assert( i <= n );
        if( i == n )
            i = 0;
    }

    /* Second phase of IO.  To help us scale in high bandwidth situations,
     * enable on-demand IO for peers with bandwidth left to burn.
     * This on-demand IO is enabled until (1) the peer runs out of bandwidth,
     * or (2) the next tr_bandwidthAllocate() call, when we start over again. */
    for( i=0; i<peerCount; ++i )
        if( tr_peerIoHasBandwidthLeft( peers[i], dir ) )
            tr_peerIoSetEnabled( peers[i], dir, TRUE );

    /* cleanup */
    tr_ptrArrayFree( tmp, NULL );
}