Ejemplo n.º 1
0
static void
phaseOne( tr_ptrArray * peerArray, tr_direction dir )
{
    int i, n;
    int peerCount = tr_ptrArraySize( peerArray );
    struct tr_peerIo ** peers = (struct tr_peerIo**) tr_ptrArrayBase( peerArray );

    /* First phase of IO. Tries to distribute bandwidth fairly to keep faster
     * peers from starving the others. Loop through the peers, giving each a
     * small chunk of bandwidth. Keep looping until we run out of bandwidth
     * and/or peers that can use it */
    n = peerCount;
    dbgmsg( "%d peers to go round-robin for %s", n, (dir==TR_UP?"upload":"download") );
    i = n ? tr_cryptoWeakRandInt( n ) : 0; /* pick a random starting point */
    while( n > 1 )
    {
        const size_t increment = 1024;
        const int bytesUsed = tr_peerIoFlush( peers[i], dir, increment );

        dbgmsg( "peer #%d of %d used %d bytes in this pass", i, n, bytesUsed );

        if( bytesUsed == (int)increment )
            ++i;
        else {
            /* peer is done writing for now; move it to the end of the list */
            tr_peerIo * pio = peers[i];
            peers[i] = peers[n-1];
            peers[n-1] = pio;
            --n;
        }

        if( i == n )
            i = 0;
    }
}
Ejemplo n.º 2
0
/* return a count of how many contiguous blocks there are starting at this pos */
static int
getBlockRun (const tr_cache * cache, int pos, struct run_info * info)
{
  int i;
  const int n = tr_ptrArraySize (&cache->blocks);
  const struct cache_block * const * blocks = (const struct cache_block* const *) tr_ptrArrayBase (&cache->blocks);
  const struct cache_block * ref = blocks[pos];
  tr_block_index_t block = ref->block;

  for (i=pos; i<n; ++i, ++block)
    {
      const struct cache_block * b = blocks[i];
      if (b->block != block)
        break;
      if (b->tor != ref->tor)
        break;
      //fprintf (stderr, "pos %d tor %d block %zu time %zu\n", i, b->tor->uniqueId, (size_t)b->block, (size_t)b->time);
    }

  //fprintf (stderr, "run is %d long from [%d to %d)\n", (int)(i-pos), i, (int)pos);

  if (info != NULL)
    {
      const struct cache_block * b = blocks[i-1];
      info->last_block_time = b->time;
      info->is_piece_done = tr_torrentPieceIsComplete (b->tor, b->piece);
      info->is_multi_piece = b->piece != blocks[pos]->piece;
      info->len = i - pos;
      info->pos = pos;
    }

  return i-pos;
}
Ejemplo n.º 3
0
static void
phaseOne (tr_ptrArray * peerArray, tr_direction dir)
{
    int n;
    int peerCount = tr_ptrArraySize (peerArray);
    struct tr_peerIo ** peers = (struct tr_peerIo**) tr_ptrArrayBase (peerArray);

    /* First phase of IO. Tries to distribute bandwidth fairly to keep faster
     * peers from starving the others. Loop through the peers, giving each a
     * small chunk of bandwidth. Keep looping until we run out of bandwidth
     * and/or peers that can use it */
    n = peerCount;
    dbgmsg ("%d peers to go round-robin for %s", n, (dir==TR_UP?"upload":"download"));
    while (n > 0)
    {
        const int i = tr_cryptoWeakRandInt (n); /* pick a peer at random */

        /* value of 3000 bytes chosen so that when using uTP we'll send a full-size
         * frame right away and leave enough buffered data for the next frame to go
         * out in a timely manner. */
        const size_t increment = 3000;

        const int bytesUsed = tr_peerIoFlush (peers[i], dir, increment);

        dbgmsg ("peer #%d of %d used %d bytes in this pass", i, n, bytesUsed);

        if (bytesUsed != (int)increment) {
            /* peer is done writing for now; move it to the end of the list */
            tr_peerIo * pio = peers[i];
            peers[i] = peers[n-1];
            peers[n-1] = pio;
            --n;
        }
    }
}
Ejemplo n.º 4
0
static int
flushContiguous (tr_cache * cache, int pos, int n)
{
  int i;
  int err = 0;
  uint8_t * buf = tr_new (uint8_t, n * MAX_BLOCK_SIZE);
  uint8_t * walk = buf;
  struct cache_block ** blocks = (struct cache_block**) tr_ptrArrayBase (&cache->blocks);

  struct cache_block * b = blocks[pos];
  tr_torrent * tor = b->tor;
  const tr_piece_index_t piece = b->piece;
  const uint32_t offset = b->offset;

  for (i=pos; i<pos+n; ++i)
    {
      b = blocks[i];
      evbuffer_copyout (b->evbuf, walk, b->length);
      walk += b->length;
      evbuffer_free (b->evbuf);
      tr_free (b);
    }
  tr_ptrArrayErase (&cache->blocks, pos, pos+n);

  err = tr_ioWrite (tor, piece, offset, walk-buf, buf);
  tr_free (buf);

  ++cache->disk_writes;
  cache->disk_write_bytes += walk-buf;
  return err;
}
Ejemplo n.º 5
0
static void
allocateBandwidth (tr_bandwidth  * b,
                   tr_priority_t   parent_priority,
                   tr_direction    dir,
                   unsigned int    period_msec,
                   tr_ptrArray   * peer_pool)
{
    const tr_priority_t priority = MAX (parent_priority, b->priority);

    assert (tr_isBandwidth (b));
    assert (tr_isDirection (dir));

    /* set the available bandwidth */
    if (b->band[dir].isLimited)
    {
        const uint64_t nextPulseSpeed = b->band[dir].desiredSpeed_Bps;
        b->band[dir].bytesLeft = (unsigned int)(nextPulseSpeed * period_msec) / 1000u;
    }

    /* add this bandwidth's peer, if any, to the peer pool */
    if (b->peer != NULL) {
        b->peer->priority = priority;
        tr_ptrArrayAppend (peer_pool, b->peer);
    }

    /* traverse & repeat for the subtree */
    if (1) {
        int i;
        struct tr_bandwidth ** children = (struct tr_bandwidth**) tr_ptrArrayBase (&b->children);
        const int n = tr_ptrArraySize (&b->children);
        for (i=0; i<n; ++i)
            allocateBandwidth (children[i], priority, dir, period_msec, peer_pool);
    }
}
Ejemplo n.º 6
0
void
tr_bandwidthAllocate( tr_bandwidth  * b,
                      tr_direction    dir,
                      unsigned int    period_msec )
{
    int i, peerCount;
    tr_ptrArray tmp = TR_PTR_ARRAY_INIT;
    tr_ptrArray low = TR_PTR_ARRAY_INIT;
    tr_ptrArray high = TR_PTR_ARRAY_INIT;
    tr_ptrArray normal = TR_PTR_ARRAY_INIT;
    struct tr_peerIo ** peers;

    /* allocateBandwidth() is a helper function with two purposes:
     * 1. allocate bandwidth to b and its subtree
     * 2. accumulate an array of all the peerIos from b and its subtree. */
    allocateBandwidth( b, TR_PRI_LOW, dir, period_msec, &tmp );
    peers = (struct tr_peerIo**) tr_ptrArrayBase( &tmp );
    peerCount = tr_ptrArraySize( &tmp );

    for( i=0; i<peerCount; ++i )
    {
        tr_peerIo * io = peers[i];
        tr_peerIoRef( io );

        tr_peerIoFlushOutgoingProtocolMsgs( io );

        switch( io->priority ) {
        case TR_PRI_HIGH:
            tr_ptrArrayAppend( &high,   io ); /* fall through */
        case TR_PRI_NORMAL:
            tr_ptrArrayAppend( &normal, io ); /* fall through */
        default:
            tr_ptrArrayAppend( &low,    io );
        }
    }

    /* First phase of IO. Tries to distribute bandwidth fairly to keep faster
     * peers from starving the others. Loop through the peers, giving each a
     * small chunk of bandwidth. Keep looping until we run out of bandwidth
     * and/or peers that can use it */
    phaseOne( &high, dir );
    phaseOne( &normal, dir );
    phaseOne( &low, dir );

    /* Second phase of IO. To help us scale in high bandwidth situations,
     * enable on-demand IO for peers with bandwidth left to burn.
     * This on-demand IO is enabled until (1) the peer runs out of bandwidth,
     * or (2) the next tr_bandwidthAllocate() call, when we start over again. */
    for( i=0; i<peerCount; ++i )
        tr_peerIoSetEnabled( peers[i], dir, tr_peerIoHasBandwidthLeft( peers[i], dir ) );

    for( i=0; i<peerCount; ++i )
        tr_peerIoUnref( peers[i] );

    /* cleanup */
    tr_ptrArrayDestruct( &normal, NULL );
    tr_ptrArrayDestruct( &high, NULL );
    tr_ptrArrayDestruct( &low, NULL );
    tr_ptrArrayDestruct( &tmp, NULL );
}
Ejemplo n.º 7
0
static void
allocateBandwidth( tr_bandwidth  * b,
                   tr_priority_t   parent_priority,
                   tr_direction    dir,
                   unsigned int    period_msec,
                   tr_ptrArray   * peer_pool )
{
    tr_priority_t priority;

    assert( tr_isBandwidth( b ) );
    assert( tr_isDirection( dir ) );

    /* set the available bandwidth */
    if( b->band[dir].isLimited )
    {
        const unsigned int nextPulseSpeed = b->band[dir].desiredSpeed_Bps;
        b->band[dir].bytesLeft = ( nextPulseSpeed * period_msec ) / 1000u;

#ifdef DEBUG_DIRECTION
        if( dir == DEBUG_DIRECTION )
                fprintf( stderr, "bandwidth %p currentPieceSpeed(%5.2f of %5.2f) desiredSpeed(%5.2f), allocating %d\n",
                         b, currentSpeed, tr_bandwidthGetRawSpeed( b, dir ), desiredSpeed,
                         b->band[dir].bytesLeft );
#endif
    }

    priority = MAX( parent_priority, b->priority );

    /* add this bandwidth's peer, if any, to the peer pool */
    if( b->peer != NULL ) {
        b->peer->priority = priority;
        tr_ptrArrayAppend( peer_pool, b->peer );
    }

#ifdef DEBUG_DIRECTION
if( ( dir == DEBUG_DIRECTION ) && ( n > 1 ) )
fprintf( stderr, "bandwidth %p has %d peers\n", b, n );
#endif

    /* traverse & repeat for the subtree */
    if( 1 ) {
        int i;
        struct tr_bandwidth ** children = (struct tr_bandwidth**) tr_ptrArrayBase( &b->children );
        const int n = tr_ptrArraySize( &b->children );
        for( i=0; i<n; ++i )
            allocateBandwidth( children[i], priority, dir, period_msec, peer_pool );
    }
}
Ejemplo n.º 8
0
bool
tr_quark_lookup (const void * str, size_t len, tr_quark * setme)
{
  struct tr_key_struct tmp;
  struct tr_key_struct * match;
  static const size_t n_static = sizeof(my_static) / sizeof(struct tr_key_struct);
  bool success = false;

  assert (n_static == TR_N_KEYS);

  tmp.str = str;
  tmp.len = len;

  /* is it in our static array? */
  match = bsearch (&tmp, my_static, n_static, sizeof(struct tr_key_struct), compareKeys);
  if (match != NULL)
    {
      *setme = match - my_static;
      success = true;
    }

  /* was it added during runtime? */
  if (!success && !tr_ptrArrayEmpty(&my_runtime))
    {
      size_t i;
      struct tr_key_struct ** runtime = (struct tr_key_struct **) tr_ptrArrayBase (&my_runtime);
      const size_t n_runtime = tr_ptrArraySize (&my_runtime);
      for (i=0; i<n_runtime; ++i)
        {
          if (compareKeys (&tmp, runtime[i]) == 0)
            {
              *setme = TR_N_KEYS + i;
              success = true;
              break;
            }
        }
    }

  return success;
}
Ejemplo n.º 9
0
/* return a count of how many contiguous blocks there are starting at this pos */
static int getBlockRun(tr_cache const* cache, int pos, struct run_info* info)
{
    int const n = tr_ptrArraySize(&cache->blocks);
    struct cache_block const* const* blocks = (struct cache_block const* const*)tr_ptrArrayBase(&cache->blocks);
    struct cache_block const* ref = blocks[pos];
    tr_block_index_t block = ref->block;
    int len = 0;

    for (int i = pos; i < n; ++i, ++block, ++len)
    {
        struct cache_block const* b = blocks[i];

        if (b->block != block)
        {
            break;
        }

        if (b->tor != ref->tor)
        {
            break;
        }

        // fprintf(stderr, "pos %d tor %d block %zu time %zu\n", i, b->tor->uniqueId, (size_t)b->block, (size_t)b->time);
    }

    // fprintf(stderr, "run is %d long from [%d to %d)\n", len, pos, pos + len);

    if (info != NULL)
    {
        struct cache_block const* b = blocks[pos + len - 1];
        info->last_block_time = b->time;
        info->is_piece_done = tr_torrentPieceIsComplete(b->tor, b->piece);
        info->is_multi_piece = b->piece != blocks[pos]->piece;
        info->len = len;
        info->pos = pos;
    }

    return len;
}
Ejemplo n.º 10
0
static int
flushContiguous( tr_cache * cache, int pos, int n )
{
    int i;
    int err = 0;
    uint8_t * buf = tr_new( uint8_t, n * MAX_BLOCK_SIZE );
    uint8_t * walk = buf;
    struct cache_block ** blocks = (struct cache_block**) tr_ptrArrayBase( &cache->blocks );

    struct cache_block * b = blocks[pos];
    tr_torrent * tor             = b->tor;
    const tr_piece_index_t piece = b->piece;
    const uint32_t offset        = b->offset;

//fprintf( stderr, "flushing %d contiguous blocks [%d-%d) from cache to disk\n", n, pos, n+pos );

    for( i=pos; i<pos+n; ++i ) {
        b = blocks[i];
        memcpy( walk, b->buf, b->length );
        walk += b->length;
        tr_free( b->buf );
        tr_free( b );
    }
    tr_ptrArrayErase( &cache->blocks, pos, pos+n );

#if 0
    tr_tordbg( tor, "Writing to disk piece %d, offset %d, len %d", (int)piece, (int)offset, (int)(walk-buf) );
    tr_ndbg( MY_NAME, "Removing %d blocks from cache, rank: %d - %d left", n, rank, tr_ptrArraySize(&cache->blocks) );
    fprintf( stderr, "%s - Writing to disk piece %d, offset %d, len %d\n", tr_torrentName(tor), (int)piece, (int)offset, (int)(walk-buf) );
    fprintf( stderr, "%s - Removing %d blocks from cache; %d left\n", MY_NAME, n, tr_ptrArraySize(&cache->blocks) );
#endif

    err = tr_ioWrite( tor, piece, offset, walk-buf, buf );
    tr_free( buf );

    ++cache->disk_writes;
    cache->disk_write_bytes += walk-buf;
    return err;
}