void tr_bandwidthAllocate( tr_bandwidth * b, tr_direction dir, unsigned int period_msec ) { int i, peerCount; tr_ptrArray tmp = TR_PTR_ARRAY_INIT; tr_ptrArray low = TR_PTR_ARRAY_INIT; tr_ptrArray high = TR_PTR_ARRAY_INIT; tr_ptrArray normal = TR_PTR_ARRAY_INIT; struct tr_peerIo ** peers; /* allocateBandwidth() is a helper function with two purposes: * 1. allocate bandwidth to b and its subtree * 2. accumulate an array of all the peerIos from b and its subtree. */ allocateBandwidth( b, TR_PRI_LOW, dir, period_msec, &tmp ); peers = (struct tr_peerIo**) tr_ptrArrayBase( &tmp ); peerCount = tr_ptrArraySize( &tmp ); for( i=0; i<peerCount; ++i ) { tr_peerIo * io = peers[i]; tr_peerIoRef( io ); tr_peerIoFlushOutgoingProtocolMsgs( io ); switch( io->priority ) { case TR_PRI_HIGH: tr_ptrArrayAppend( &high, io ); /* fall through */ case TR_PRI_NORMAL: tr_ptrArrayAppend( &normal, io ); /* fall through */ default: tr_ptrArrayAppend( &low, io ); } } /* First phase of IO. Tries to distribute bandwidth fairly to keep faster * peers from starving the others. Loop through the peers, giving each a * small chunk of bandwidth. Keep looping until we run out of bandwidth * and/or peers that can use it */ phaseOne( &high, dir ); phaseOne( &normal, dir ); phaseOne( &low, dir ); /* Second phase of IO. To help us scale in high bandwidth situations, * enable on-demand IO for peers with bandwidth left to burn. * This on-demand IO is enabled until (1) the peer runs out of bandwidth, * or (2) the next tr_bandwidthAllocate() call, when we start over again. */ for( i=0; i<peerCount; ++i ) tr_peerIoSetEnabled( peers[i], dir, tr_peerIoHasBandwidthLeft( peers[i], dir ) ); for( i=0; i<peerCount; ++i ) tr_peerIoUnref( peers[i] ); /* cleanup */ tr_ptrArrayDestruct( &normal, NULL ); tr_ptrArrayDestruct( &high, NULL ); tr_ptrArrayDestruct( &low, NULL ); tr_ptrArrayDestruct( &tmp, NULL ); }
static void allocateBandwidth (tr_bandwidth * b, tr_priority_t parent_priority, tr_direction dir, unsigned int period_msec, tr_ptrArray * peer_pool) { const tr_priority_t priority = MAX (parent_priority, b->priority); assert (tr_isBandwidth (b)); assert (tr_isDirection (dir)); /* set the available bandwidth */ if (b->band[dir].isLimited) { const uint64_t nextPulseSpeed = b->band[dir].desiredSpeed_Bps; b->band[dir].bytesLeft = (unsigned int)(nextPulseSpeed * period_msec) / 1000u; } /* add this bandwidth's peer, if any, to the peer pool */ if (b->peer != NULL) { b->peer->priority = priority; tr_ptrArrayAppend (peer_pool, b->peer); } /* traverse & repeat for the subtree */ if (1) { int i; struct tr_bandwidth ** children = (struct tr_bandwidth**) tr_ptrArrayBase (&b->children); const int n = tr_ptrArraySize (&b->children); for (i=0; i<n; ++i) allocateBandwidth (children[i], priority, dir, period_msec, peer_pool); } }
static tr_quark append_new_quark (const void * str, size_t len) { tr_quark ret; struct tr_key_struct * tmp; tmp = tr_new (struct tr_key_struct, 1); tmp->str = tr_strndup (str, len); tmp->len = len; ret = TR_N_KEYS + tr_ptrArraySize (&my_runtime); tr_ptrArrayAppend (&my_runtime, tmp); return ret; }
static void allocateBandwidth( tr_bandwidth * b, tr_priority_t parent_priority, tr_direction dir, unsigned int period_msec, tr_ptrArray * peer_pool ) { tr_priority_t priority; assert( tr_isBandwidth( b ) ); assert( tr_isDirection( dir ) ); /* set the available bandwidth */ if( b->band[dir].isLimited ) { const unsigned int nextPulseSpeed = b->band[dir].desiredSpeed_Bps; b->band[dir].bytesLeft = ( nextPulseSpeed * period_msec ) / 1000u; #ifdef DEBUG_DIRECTION if( dir == DEBUG_DIRECTION ) fprintf( stderr, "bandwidth %p currentPieceSpeed(%5.2f of %5.2f) desiredSpeed(%5.2f), allocating %d\n", b, currentSpeed, tr_bandwidthGetRawSpeed( b, dir ), desiredSpeed, b->band[dir].bytesLeft ); #endif } priority = MAX( parent_priority, b->priority ); /* add this bandwidth's peer, if any, to the peer pool */ if( b->peer != NULL ) { b->peer->priority = priority; tr_ptrArrayAppend( peer_pool, b->peer ); } #ifdef DEBUG_DIRECTION if( ( dir == DEBUG_DIRECTION ) && ( n > 1 ) ) fprintf( stderr, "bandwidth %p has %d peers\n", b, n ); #endif /* traverse & repeat for the subtree */ if( 1 ) { int i; struct tr_bandwidth ** children = (struct tr_bandwidth**) tr_ptrArrayBase( &b->children ); const int n = tr_ptrArraySize( &b->children ); for( i=0; i<n; ++i ) allocateBandwidth( children[i], priority, dir, period_msec, peer_pool ); } }
static void allocateBandwidth( tr_bandwidth * b, tr_direction dir, int period_msec, tr_ptrArray * peer_pool ) { assert( tr_isBandwidth( b ) ); assert( tr_isDirection( dir ) ); /* set the available bandwidth */ if( b->band[dir].isLimited ) { const double desiredSpeed = b->band[dir].desiredSpeed; const double nextPulseSpeed = desiredSpeed; b->band[dir].bytesLeft = MAX( 0.0, nextPulseSpeed * 1024.0 * period_msec / 1000.0 ); #ifdef DEBUG_DIRECTION if( dir == DEBUG_DIRECTION ) fprintf( stderr, "bandwidth %p currentPieceSpeed(%5.2f of %5.2f) desiredSpeed(%5.2f), allocating %5.2f\n", b, currentSpeed, tr_bandwidthGetRawSpeed( b, dir ), desiredSpeed, b->band[dir].bytesLeft/1024.0 ); #endif } /* traverse & repeat for the subtree */ { int i; const int n = tr_ptrArraySize( b->peers ); for( i=0; i<n; ++i ) tr_ptrArrayAppend( peer_pool, tr_ptrArrayNth( b->peers, i ) ); } #ifdef DEBUG_DIRECTION if( ( dir == DEBUG_DIRECTION ) && ( n > 1 ) ) fprintf( stderr, "bandwidth %p has %d peers\n", b, n ); #endif /* all children should reallocate too */ if( 1 ) { int i, n=0; struct tr_bandwidth ** children = (struct tr_bandwidth**) tr_ptrArrayPeek( b->children, &n ); for( i=0; i<n; ++i ) allocateBandwidth( children[i], dir, period_msec, peer_pool ); } }
static void extract_parts_from_multipart (const struct evkeyvalq * headers, struct evbuffer * body, tr_ptrArray * setme_parts) { const char * content_type = evhttp_find_header (headers, "Content-Type"); const char * in = (const char*) evbuffer_pullup (body, -1); size_t inlen = evbuffer_get_length (body); const char * boundary_key = "boundary="; const char * boundary_key_begin = content_type ? strstr (content_type, boundary_key) : NULL; const char * boundary_val = boundary_key_begin ? boundary_key_begin + strlen (boundary_key) : "arglebargle"; char * boundary = tr_strdup_printf ("--%s", boundary_val); const size_t boundary_len = strlen (boundary); const char * delim = tr_memmem (in, inlen, boundary, boundary_len); while (delim) { size_t part_len; const char * part = delim + boundary_len; inlen -= (part - in); in = part; delim = tr_memmem (in, inlen, boundary, boundary_len); part_len = delim ? (size_t)(delim - part) : inlen; if (part_len) { const char * rnrn = tr_memmem (part, part_len, "\r\n\r\n", 4); if (rnrn) { struct tr_mimepart * p = tr_new (struct tr_mimepart, 1); p->headers_len = (size_t) (rnrn - part); p->headers = tr_strndup (part, p->headers_len); p->body_len = (size_t) ((part + part_len) - (rnrn + 4)); p->body = tr_strndup (rnrn+4, p->body_len); tr_ptrArrayAppend (setme_parts, p); } } } tr_free (boundary); }
/** * This function's previous recursive implementation was * easier to read, but was vulnerable to a smash-stacking * attack via maliciously-crafted bencoded data. (#667) */ int tr_variantParseBenc (const void * buf_in, const void * bufend_in, tr_variant * top, const char ** setme_end) { int err = 0; const uint8_t * buf = buf_in; const uint8_t * bufend = bufend_in; tr_ptrArray stack = TR_PTR_ARRAY_INIT; tr_quark key = 0; tr_variantInit (top, 0); while (buf != bufend) { if (buf > bufend) /* no more text to parse... */ err = EILSEQ; if (err) break; if (*buf == 'i') /* int */ { int64_t val; const uint8_t * end; tr_variant * v; if ((err = tr_bencParseInt (buf, bufend, &end, &val))) break; buf = end; if ((v = get_node (&stack, &key, top, &err))) tr_variantInitInt (v, val); } else if (*buf == 'l') /* list */ { tr_variant * v; ++buf; if ((v = get_node (&stack, &key, top, &err))) { tr_variantInitList (v, 0); tr_ptrArrayAppend (&stack, v); } } else if (*buf == 'd') /* dict */ { tr_variant * v; ++buf; if ((v = get_node (&stack, &key, top, &err))) { tr_variantInitDict (v, 0); tr_ptrArrayAppend (&stack, v); } } else if (*buf == 'e') /* end of list or dict */ { ++buf; if (tr_ptrArrayEmpty (&stack) || (key != 0)) { err = EILSEQ; break; } else { tr_ptrArrayPop (&stack); if (tr_ptrArrayEmpty (&stack)) break; } } else if (isdigit (*buf)) /* string? */ { tr_variant * v; const uint8_t * end; const uint8_t * str; size_t str_len; if ((err = tr_bencParseStr (buf, bufend, &end, &str, &str_len))) break; buf = end; if (!key && !tr_ptrArrayEmpty(&stack) && tr_variantIsDict(tr_ptrArrayBack(&stack))) key = tr_quark_new (str, str_len); else if ((v = get_node (&stack, &key, top, &err))) tr_variantInitStr (v, str, str_len); } else /* invalid bencoded text... march past it */ { ++buf; } if (tr_ptrArrayEmpty (&stack)) break; } if (!err && (!top->type || !tr_ptrArrayEmpty(&stack))) err = EILSEQ; if (!err && setme_end) *setme_end = (const char*) buf; tr_ptrArrayDestruct (&stack, NULL); return err; }
static int callback( void * vdata, int type, const JSON_value * value ) { struct json_benc_data * data = vdata; tr_benc * node; switch( type ) { case JSON_T_ARRAY_BEGIN: node = getNode( data ); tr_bencInitList( node, 0 ); tr_ptrArrayAppend( data->stack, node ); break; case JSON_T_ARRAY_END: tr_ptrArrayPop( data->stack ); break; case JSON_T_OBJECT_BEGIN: node = getNode( data ); tr_bencInitDict( node, 0 ); tr_ptrArrayAppend( data->stack, node ); break; case JSON_T_OBJECT_END: tr_ptrArrayPop( data->stack ); break; case JSON_T_FLOAT: { char buf[128]; tr_snprintf( buf, sizeof( buf ), "%f", (double)value->vu.float_value ); tr_bencInitStr( getNode( data ), buf, -1 ); break; } case JSON_T_NULL: tr_bencInitStr( getNode( data ), "", 0 ); break; case JSON_T_INTEGER: tr_bencInitInt( getNode( data ), value->vu.integer_value ); break; case JSON_T_TRUE: tr_bencInitInt( getNode( data ), 1 ); break; case JSON_T_FALSE: tr_bencInitInt( getNode( data ), 0 ); break; case JSON_T_STRING: tr_bencInitStr( getNode( data ), value->vu.str.value, value->vu.str.length ); break; case JSON_T_KEY: assert( !data->key ); data->key = tr_strdup( value->vu.str.value ); break; } return 1; }