static void phaseOne( tr_ptrArray * peerArray, tr_direction dir ) { int i, n; int peerCount = tr_ptrArraySize( peerArray ); struct tr_peerIo ** peers = (struct tr_peerIo**) tr_ptrArrayBase( peerArray ); /* First phase of IO. Tries to distribute bandwidth fairly to keep faster * peers from starving the others. Loop through the peers, giving each a * small chunk of bandwidth. Keep looping until we run out of bandwidth * and/or peers that can use it */ n = peerCount; dbgmsg( "%d peers to go round-robin for %s", n, (dir==TR_UP?"upload":"download") ); i = n ? tr_cryptoWeakRandInt( n ) : 0; /* pick a random starting point */ while( n > 1 ) { const size_t increment = 1024; const int bytesUsed = tr_peerIoFlush( peers[i], dir, increment ); dbgmsg( "peer #%d of %d used %d bytes in this pass", i, n, bytesUsed ); if( bytesUsed == (int)increment ) ++i; else { /* peer is done writing for now; move it to the end of the list */ tr_peerIo * pio = peers[i]; peers[i] = peers[n-1]; peers[n-1] = pio; --n; } if( i == n ) i = 0; } }
static void phaseOne (tr_ptrArray * peerArray, tr_direction dir) { int n; int peerCount = tr_ptrArraySize (peerArray); struct tr_peerIo ** peers = (struct tr_peerIo**) tr_ptrArrayBase (peerArray); /* First phase of IO. Tries to distribute bandwidth fairly to keep faster * peers from starving the others. Loop through the peers, giving each a * small chunk of bandwidth. Keep looping until we run out of bandwidth * and/or peers that can use it */ n = peerCount; dbgmsg ("%d peers to go round-robin for %s", n, (dir==TR_UP?"upload":"download")); while (n > 0) { const int i = tr_cryptoWeakRandInt (n); /* pick a peer at random */ /* value of 3000 bytes chosen so that when using uTP we'll send a full-size * frame right away and leave enough buffered data for the next frame to go * out in a timely manner. */ const size_t increment = 3000; const int bytesUsed = tr_peerIoFlush (peers[i], dir, increment); dbgmsg ("peer #%d of %d used %d bytes in this pass", i, n, bytesUsed); if (bytesUsed != (int)increment) { /* peer is done writing for now; move it to the end of the list */ tr_peerIo * pio = peers[i]; peers[i] = peers[n-1]; peers[n-1] = pio; --n; } } }
/* return a count of how many contiguous blocks there are starting at this pos */ static int getBlockRun (const tr_cache * cache, int pos, struct run_info * info) { int i; const int n = tr_ptrArraySize (&cache->blocks); const struct cache_block * const * blocks = (const struct cache_block* const *) tr_ptrArrayBase (&cache->blocks); const struct cache_block * ref = blocks[pos]; tr_block_index_t block = ref->block; for (i=pos; i<n; ++i, ++block) { const struct cache_block * b = blocks[i]; if (b->block != block) break; if (b->tor != ref->tor) break; //fprintf (stderr, "pos %d tor %d block %zu time %zu\n", i, b->tor->uniqueId, (size_t)b->block, (size_t)b->time); } //fprintf (stderr, "run is %d long from [%d to %d)\n", (int)(i-pos), i, (int)pos); if (info != NULL) { const struct cache_block * b = blocks[i-1]; info->last_block_time = b->time; info->is_piece_done = tr_torrentPieceIsComplete (b->tor, b->piece); info->is_multi_piece = b->piece != blocks[pos]->piece; info->len = i - pos; info->pos = pos; } return i-pos; }
/* Calculte runs * - Stale runs, runs sitting in cache for a long time or runs not growing, get priority. * Returns number of runs. */ static int calcRuns (tr_cache * cache, struct run_info * runs) { const int n = tr_ptrArraySize (&cache->blocks); int i = 0, pos; const time_t now = tr_time (); for (pos = 0; pos < n; pos += runs[i++].len) { int rank = getBlockRun (cache, pos, &runs[i]); /* This adds ~2 to the relative length of a run for every minute it has * languished in the cache. */ rank += (now - runs[i].last_block_time) / 32; /* Flushing stale blocks should be a top priority as the probability of them * growing is very small, for blocks on piece boundaries, and nonexistant for * blocks inside pieces. */ rank |= runs[i].is_piece_done ? DONEFLAG : 0; /* Move the multi piece runs higher */ rank |= runs[i].is_multi_piece ? MULTIFLAG : 0; runs[i].rank = rank; //fprintf (stderr,"block run at pos %d of length %d and age %ld adjusted +%d\n",runs[i].pos,runs[i].len,now-runs[i].last_block_time,rank-runs[i].len); } //fprintf (stderr, "%d block runs\n", i); qsort (runs, i, sizeof (struct run_info), compareRuns); return i; }
static void allocateBandwidth (tr_bandwidth * b, tr_priority_t parent_priority, tr_direction dir, unsigned int period_msec, tr_ptrArray * peer_pool) { const tr_priority_t priority = MAX (parent_priority, b->priority); assert (tr_isBandwidth (b)); assert (tr_isDirection (dir)); /* set the available bandwidth */ if (b->band[dir].isLimited) { const uint64_t nextPulseSpeed = b->band[dir].desiredSpeed_Bps; b->band[dir].bytesLeft = (unsigned int)(nextPulseSpeed * period_msec) / 1000u; } /* add this bandwidth's peer, if any, to the peer pool */ if (b->peer != NULL) { b->peer->priority = priority; tr_ptrArrayAppend (peer_pool, b->peer); } /* traverse & repeat for the subtree */ if (1) { int i; struct tr_bandwidth ** children = (struct tr_bandwidth**) tr_ptrArrayBase (&b->children); const int n = tr_ptrArraySize (&b->children); for (i=0; i<n; ++i) allocateBandwidth (children[i], priority, dir, period_msec, peer_pool); } }
void tr_bandwidthAllocate( tr_bandwidth * b, tr_direction dir, unsigned int period_msec ) { int i, peerCount; tr_ptrArray tmp = TR_PTR_ARRAY_INIT; tr_ptrArray low = TR_PTR_ARRAY_INIT; tr_ptrArray high = TR_PTR_ARRAY_INIT; tr_ptrArray normal = TR_PTR_ARRAY_INIT; struct tr_peerIo ** peers; /* allocateBandwidth() is a helper function with two purposes: * 1. allocate bandwidth to b and its subtree * 2. accumulate an array of all the peerIos from b and its subtree. */ allocateBandwidth( b, TR_PRI_LOW, dir, period_msec, &tmp ); peers = (struct tr_peerIo**) tr_ptrArrayBase( &tmp ); peerCount = tr_ptrArraySize( &tmp ); for( i=0; i<peerCount; ++i ) { tr_peerIo * io = peers[i]; tr_peerIoRef( io ); tr_peerIoFlushOutgoingProtocolMsgs( io ); switch( io->priority ) { case TR_PRI_HIGH: tr_ptrArrayAppend( &high, io ); /* fall through */ case TR_PRI_NORMAL: tr_ptrArrayAppend( &normal, io ); /* fall through */ default: tr_ptrArrayAppend( &low, io ); } } /* First phase of IO. Tries to distribute bandwidth fairly to keep faster * peers from starving the others. Loop through the peers, giving each a * small chunk of bandwidth. Keep looping until we run out of bandwidth * and/or peers that can use it */ phaseOne( &high, dir ); phaseOne( &normal, dir ); phaseOne( &low, dir ); /* Second phase of IO. To help us scale in high bandwidth situations, * enable on-demand IO for peers with bandwidth left to burn. * This on-demand IO is enabled until (1) the peer runs out of bandwidth, * or (2) the next tr_bandwidthAllocate() call, when we start over again. */ for( i=0; i<peerCount; ++i ) tr_peerIoSetEnabled( peers[i], dir, tr_peerIoHasBandwidthLeft( peers[i], dir ) ); for( i=0; i<peerCount; ++i ) tr_peerIoUnref( peers[i] ); /* cleanup */ tr_ptrArrayDestruct( &normal, NULL ); tr_ptrArrayDestruct( &high, NULL ); tr_ptrArrayDestruct( &low, NULL ); tr_ptrArrayDestruct( &tmp, NULL ); }
static tr_quark append_new_quark (const void * str, size_t len) { tr_quark ret; struct tr_key_struct * tmp; tmp = tr_new (struct tr_key_struct, 1); tmp->str = tr_strndup (str, len); tmp->len = len; ret = TR_N_KEYS + tr_ptrArraySize (&my_runtime); tr_ptrArrayAppend (&my_runtime, tmp); return ret; }
static int cacheTrim (tr_cache * cache) { int err = 0; if (tr_ptrArraySize (&cache->blocks) > cache->max_blocks) { /* Amount of cache that should be removed by the flush. This influences how large * runs can grow as well as how often flushes will happen. */ const int cacheCutoff = 1 + cache->max_blocks / 4; struct run_info * runs = tr_new (struct run_info, tr_ptrArraySize (&cache->blocks)); int i=0, j=0; calcRuns (cache, runs); while (j < cacheCutoff) j += runs[i++].len; err = flushRuns (cache, runs, i); tr_free (runs); } return err; }
static int flushContiguous( tr_cache * cache, int pos, int n ) { int i; int err = 0; uint8_t * buf = tr_new( uint8_t, n * MAX_BLOCK_SIZE ); uint8_t * walk = buf; struct cache_block ** blocks = (struct cache_block**) tr_ptrArrayBase( &cache->blocks ); struct cache_block * b = blocks[pos]; tr_torrent * tor = b->tor; const tr_piece_index_t piece = b->piece; const uint32_t offset = b->offset; //fprintf( stderr, "flushing %d contiguous blocks [%d-%d) from cache to disk\n", n, pos, n+pos ); for( i=pos; i<pos+n; ++i ) { b = blocks[i]; memcpy( walk, b->buf, b->length ); walk += b->length; tr_free( b->buf ); tr_free( b ); } tr_ptrArrayErase( &cache->blocks, pos, pos+n ); #if 0 tr_tordbg( tor, "Writing to disk piece %d, offset %d, len %d", (int)piece, (int)offset, (int)(walk-buf) ); tr_ndbg( MY_NAME, "Removing %d blocks from cache, rank: %d - %d left", n, rank, tr_ptrArraySize(&cache->blocks) ); fprintf( stderr, "%s - Writing to disk piece %d, offset %d, len %d\n", tr_torrentName(tor), (int)piece, (int)offset, (int)(walk-buf) ); fprintf( stderr, "%s - Removing %d blocks from cache; %d left\n", MY_NAME, n, tr_ptrArraySize(&cache->blocks) ); #endif err = tr_ioWrite( tor, piece, offset, walk-buf, buf ); tr_free( buf ); ++cache->disk_writes; cache->disk_write_bytes += walk-buf; return err; }
static void allocateBandwidth( tr_bandwidth * b, tr_priority_t parent_priority, tr_direction dir, unsigned int period_msec, tr_ptrArray * peer_pool ) { tr_priority_t priority; assert( tr_isBandwidth( b ) ); assert( tr_isDirection( dir ) ); /* set the available bandwidth */ if( b->band[dir].isLimited ) { const unsigned int nextPulseSpeed = b->band[dir].desiredSpeed_Bps; b->band[dir].bytesLeft = ( nextPulseSpeed * period_msec ) / 1000u; #ifdef DEBUG_DIRECTION if( dir == DEBUG_DIRECTION ) fprintf( stderr, "bandwidth %p currentPieceSpeed(%5.2f of %5.2f) desiredSpeed(%5.2f), allocating %d\n", b, currentSpeed, tr_bandwidthGetRawSpeed( b, dir ), desiredSpeed, b->band[dir].bytesLeft ); #endif } priority = MAX( parent_priority, b->priority ); /* add this bandwidth's peer, if any, to the peer pool */ if( b->peer != NULL ) { b->peer->priority = priority; tr_ptrArrayAppend( peer_pool, b->peer ); } #ifdef DEBUG_DIRECTION if( ( dir == DEBUG_DIRECTION ) && ( n > 1 ) ) fprintf( stderr, "bandwidth %p has %d peers\n", b, n ); #endif /* traverse & repeat for the subtree */ if( 1 ) { int i; struct tr_bandwidth ** children = (struct tr_bandwidth**) tr_ptrArrayBase( &b->children ); const int n = tr_ptrArraySize( &b->children ); for( i=0; i<n; ++i ) allocateBandwidth( children[i], priority, dir, period_msec, peer_pool ); } }
static void allocateBandwidth( tr_bandwidth * b, tr_direction dir, int period_msec, tr_ptrArray * peer_pool ) { assert( tr_isBandwidth( b ) ); assert( tr_isDirection( dir ) ); /* set the available bandwidth */ if( b->band[dir].isLimited ) { const double desiredSpeed = b->band[dir].desiredSpeed; const double nextPulseSpeed = desiredSpeed; b->band[dir].bytesLeft = MAX( 0.0, nextPulseSpeed * 1024.0 * period_msec / 1000.0 ); #ifdef DEBUG_DIRECTION if( dir == DEBUG_DIRECTION ) fprintf( stderr, "bandwidth %p currentPieceSpeed(%5.2f of %5.2f) desiredSpeed(%5.2f), allocating %5.2f\n", b, currentSpeed, tr_bandwidthGetRawSpeed( b, dir ), desiredSpeed, b->band[dir].bytesLeft/1024.0 ); #endif } /* traverse & repeat for the subtree */ { int i; const int n = tr_ptrArraySize( b->peers ); for( i=0; i<n; ++i ) tr_ptrArrayAppend( peer_pool, tr_ptrArrayNth( b->peers, i ) ); } #ifdef DEBUG_DIRECTION if( ( dir == DEBUG_DIRECTION ) && ( n > 1 ) ) fprintf( stderr, "bandwidth %p has %d peers\n", b, n ); #endif /* all children should reallocate too */ if( 1 ) { int i, n=0; struct tr_bandwidth ** children = (struct tr_bandwidth**) tr_ptrArrayPeek( b->children, &n ); for( i=0; i<n; ++i ) allocateBandwidth( children[i], dir, period_msec, peer_pool ); } }
bool tr_quark_lookup (const void * str, size_t len, tr_quark * setme) { struct tr_key_struct tmp; struct tr_key_struct * match; static const size_t n_static = sizeof(my_static) / sizeof(struct tr_key_struct); bool success = false; assert (n_static == TR_N_KEYS); tmp.str = str; tmp.len = len; /* is it in our static array? */ match = bsearch (&tmp, my_static, n_static, sizeof(struct tr_key_struct), compareKeys); if (match != NULL) { *setme = match - my_static; success = true; } /* was it added during runtime? */ if (!success && !tr_ptrArrayEmpty(&my_runtime)) { size_t i; struct tr_key_struct ** runtime = (struct tr_key_struct **) tr_ptrArrayBase (&my_runtime); const size_t n_runtime = tr_ptrArraySize (&my_runtime); for (i=0; i<n_runtime; ++i) { if (compareKeys (&tmp, runtime[i]) == 0) { *setme = TR_N_KEYS + i; success = true; break; } } } return success; }
/* return a count of how many contiguous blocks there are starting at this pos */ static int getBlockRun(tr_cache const* cache, int pos, struct run_info* info) { int const n = tr_ptrArraySize(&cache->blocks); struct cache_block const* const* blocks = (struct cache_block const* const*)tr_ptrArrayBase(&cache->blocks); struct cache_block const* ref = blocks[pos]; tr_block_index_t block = ref->block; int len = 0; for (int i = pos; i < n; ++i, ++block, ++len) { struct cache_block const* b = blocks[i]; if (b->block != block) { break; } if (b->tor != ref->tor) { break; } // fprintf(stderr, "pos %d tor %d block %zu time %zu\n", i, b->tor->uniqueId, (size_t)b->block, (size_t)b->time); } // fprintf(stderr, "run is %d long from [%d to %d)\n", len, pos, pos + len); if (info != NULL) { struct cache_block const* b = blocks[pos + len - 1]; info->last_block_time = b->time; info->is_piece_done = tr_torrentPieceIsComplete(b->tor, b->piece); info->is_multi_piece = b->piece != blocks[pos]->piece; info->len = len; info->pos = pos; } return len; }
static void handle_upload (struct evhttp_request * req, struct tr_rpc_server * server) { if (req->type != EVHTTP_REQ_POST) { send_simple_response (req, 405, NULL); } else { int i; int n; bool hasSessionId = false; tr_ptrArray parts = TR_PTR_ARRAY_INIT; const char * query = strchr (req->uri, '?'); const bool paused = query && strstr (query + 1, "paused=true"); extract_parts_from_multipart (req->input_headers, req->input_buffer, &parts); n = tr_ptrArraySize (&parts); /* first look for the session id */ for (i=0; i<n; ++i) { struct tr_mimepart * p = tr_ptrArrayNth (&parts, i); if (tr_memmem (p->headers, p->headers_len, TR_RPC_SESSION_ID_HEADER, strlen (TR_RPC_SESSION_ID_HEADER))) break; } if (i<n) { const struct tr_mimepart * p = tr_ptrArrayNth (&parts, i); const char * ours = get_current_session_id (server); const size_t ourlen = strlen (ours); hasSessionId = ourlen <= p->body_len && memcmp (p->body, ours, ourlen) == 0; } if (!hasSessionId) { int code = 409; const char * codetext = tr_webGetResponseStr (code); struct evbuffer * body = evbuffer_new (); evbuffer_add_printf (body, "%s", "{ \"success\": false, \"msg\": \"Bad Session-Id\" }");; evhttp_send_reply (req, code, codetext, body); evbuffer_free (body); } else for (i=0; i<n; ++i) { struct tr_mimepart * p = tr_ptrArrayNth (&parts, i); size_t body_len = p->body_len; tr_variant top, *args; tr_variant test; bool have_source = false; char * body = p->body; if (body_len >= 2 && memcmp (&body[body_len - 2], "\r\n", 2) == 0) body_len -= 2; tr_variantInitDict (&top, 2); tr_variantDictAddStr (&top, TR_KEY_method, "torrent-add"); args = tr_variantDictAddDict (&top, TR_KEY_arguments, 2); tr_variantDictAddBool (args, TR_KEY_paused, paused); if (tr_urlIsValid (body, body_len)) { tr_variantDictAddRaw (args, TR_KEY_filename, body, body_len); have_source = true; } else if (!tr_variantFromBenc (&test, body, body_len)) { char * b64 = tr_base64_encode (body, body_len, NULL); tr_variantDictAddStr (args, TR_KEY_metainfo, b64); tr_free (b64); have_source = true; } if (have_source) tr_rpc_request_exec_json (server->session, &top, NULL, NULL); tr_variantFree (&top); } tr_ptrArrayDestruct (&parts, (PtrArrayForeachFunc)tr_mimepart_free); /* send "success" response */ { int code = HTTP_OK; const char * codetext = tr_webGetResponseStr (code); struct evbuffer * body = evbuffer_new (); evbuffer_add_printf (body, "%s", "{ \"success\": true, \"msg\": \"Torrent Added\" }");; evhttp_send_reply (req, code, codetext, body); evbuffer_free (body); } } }