static void phaseOne( tr_ptrArray * peerArray, tr_direction dir ) { int i, n; int peerCount = tr_ptrArraySize( peerArray ); struct tr_peerIo ** peers = (struct tr_peerIo**) tr_ptrArrayBase( peerArray ); /* First phase of IO. Tries to distribute bandwidth fairly to keep faster * peers from starving the others. Loop through the peers, giving each a * small chunk of bandwidth. Keep looping until we run out of bandwidth * and/or peers that can use it */ n = peerCount; dbgmsg( "%d peers to go round-robin for %s", n, (dir==TR_UP?"upload":"download") ); i = n ? tr_cryptoWeakRandInt( n ) : 0; /* pick a random starting point */ while( n > 1 ) { const size_t increment = 1024; const int bytesUsed = tr_peerIoFlush( peers[i], dir, increment ); dbgmsg( "peer #%d of %d used %d bytes in this pass", i, n, bytesUsed ); if( bytesUsed == (int)increment ) ++i; else { /* peer is done writing for now; move it to the end of the list */ tr_peerIo * pio = peers[i]; peers[i] = peers[n-1]; peers[n-1] = pio; --n; } if( i == n ) i = 0; } }
static int test_quickFindFirst_Iteration (const size_t k, const size_t n, int * buf, int range) { size_t i; int highest_low; int lowest_high; /* populate buf with random ints */ for (i=0; i<n; ++i) buf[i] = tr_cryptoWeakRandInt (range); /* find the best k */ tr_quickfindFirstK (buf, n, sizeof(int), compareInts, k); /* confirm that the smallest K ints are in the first slots K slots in buf */ highest_low = INT_MIN; for (i=0; i<k; ++i) if (highest_low < buf[i]) highest_low = buf[i]; lowest_high = INT_MAX; for (i=k; i<n; ++i) if (lowest_high > buf[i]) lowest_high = buf[i]; check (highest_low <= lowest_high); return 0; }
static void nap( int roughly_sec ) { const int roughly_msec = roughly_sec * 1000; const int msec = roughly_msec/2 + tr_cryptoWeakRandInt(roughly_msec); tr_wait_msec( msec ); }
static void phaseOne (tr_ptrArray * peerArray, tr_direction dir) { int n; int peerCount = tr_ptrArraySize (peerArray); struct tr_peerIo ** peers = (struct tr_peerIo**) tr_ptrArrayBase (peerArray); /* First phase of IO. Tries to distribute bandwidth fairly to keep faster * peers from starving the others. Loop through the peers, giving each a * small chunk of bandwidth. Keep looping until we run out of bandwidth * and/or peers that can use it */ n = peerCount; dbgmsg ("%d peers to go round-robin for %s", n, (dir==TR_UP?"upload":"download")); while (n > 0) { const int i = tr_cryptoWeakRandInt (n); /* pick a peer at random */ /* value of 3000 bytes chosen so that when using uTP we'll send a full-size * frame right away and leave enough buffered data for the next frame to go * out in a timely manner. */ const size_t increment = 3000; const int bytesUsed = tr_peerIoFlush (peers[i], dir, increment); dbgmsg ("peer #%d of %d used %d bytes in this pass", i, n, bytesUsed); if (bytesUsed != (int)increment) { /* peer is done writing for now; move it to the end of the list */ tr_peerIo * pio = peers[i]; peers[i] = peers[n-1]; peers[n-1] = pio; --n; } } }
static int test_bitfield_count_range (void) { int i; int n; int begin; int end; int count1; int count2; const int bitCount = 100 + tr_cryptoWeakRandInt (1000); tr_bitfield bf; /* generate a random bitfield */ tr_bitfieldConstruct (&bf, bitCount); for (i=0, n=tr_cryptoWeakRandInt (bitCount); i<n; ++i) tr_bitfieldAdd (&bf, tr_cryptoWeakRandInt (bitCount)); begin = tr_cryptoWeakRandInt (bitCount); do end = tr_cryptoWeakRandInt (bitCount); while (end == begin); /* ensure end <= begin */ if (end < begin) { const int tmp = begin; begin = end; end = tmp; } /* test the bitfield */ count1 = 0; for (i=begin; i<end; ++i) if (tr_bitfieldHas (&bf, i)) ++count1; count2 = tr_bitfieldCountRange (&bf, begin, end); check (count1 == count2); /* cleanup */ tr_bitfieldDestruct (&bf); return 0; }
int tr_cryptoRandInt( int upperBound ) { int noise; int val; assert( upperBound > 0 ); if( RAND_pseudo_bytes ( (unsigned char *) &noise, sizeof noise ) >= 0 ) { val = abs( noise ) % upperBound; } else /* fall back to a weaker implementation... */ { val = tr_cryptoWeakRandInt( upperBound ); } return val; }
int tr_dhtInit(tr_session *ss, const tr_address * tr_addr) { struct sockaddr_in sin; tr_benc benc; int rc; tr_bool have_id = FALSE; char * dat_file; uint8_t * nodes = NULL, * nodes6 = NULL; const uint8_t * raw; size_t len, len6; struct bootstrap_closure * cl; if( session ) /* already initialized */ return -1; dht_port = tr_sessionGetPeerPort(ss); if(dht_port <= 0) return -1; tr_ndbg( "DHT", "Initializing DHT" ); dht_socket = socket(PF_INET, SOCK_DGRAM, 0); if(dht_socket < 0) goto fail; memset(&sin, 0, sizeof(sin)); sin.sin_family = AF_INET; memcpy(&sin.sin_addr, &tr_addr->addr.addr4, sizeof (struct in_addr)); sin.sin_port = htons(dht_port); rc = bind(dht_socket, (struct sockaddr*)&sin, sizeof(sin)); if(rc < 0) goto fail; if(tr_globalIPv6()) rebind_ipv6(TRUE); if( getenv( "TR_DHT_VERBOSE" ) != NULL ) dht_debug = stderr; dat_file = tr_buildPath( ss->configDir, "dht.dat", NULL ); rc = tr_bencLoadFile( &benc, TR_FMT_BENC, dat_file ); tr_free( dat_file ); if(rc == 0) { have_id = tr_bencDictFindRaw(&benc, "id", &raw, &len); if( have_id && len==20 ) memcpy( myid, raw, len ); if( dht_socket >= 0 && tr_bencDictFindRaw( &benc, "nodes", &raw, &len ) && !(len%6) ) { nodes = tr_memdup( raw, len ); } if( dht6_socket > 0 && tr_bencDictFindRaw( &benc, "nodes6", &raw, &len6 ) && !(len6%18) ) { nodes6 = tr_memdup( raw, len6 ); } tr_bencFree( &benc ); } if(nodes == NULL) len = 0; if(nodes6 == NULL) len6 = 0; if( have_id ) tr_ninf( "DHT", "Reusing old id" ); else { /* Note that DHT ids need to be distributed uniformly, * so it should be something truly random. */ tr_ninf( "DHT", "Generating new id" ); tr_cryptoRandBuf( myid, 20 ); } rc = dht_init( dht_socket, dht6_socket, myid, NULL ); if( rc < 0 ) goto fail; session = ss; cl = tr_new( struct bootstrap_closure, 1 ); cl->session = session; cl->nodes = nodes; cl->nodes6 = nodes6; cl->len = len; cl->len6 = len6; tr_threadNew( dht_bootstrap, cl ); dht_event = event_new( session->event_base, dht_socket, EV_READ, event_callback, NULL ); tr_timerAdd( dht_event, 0, tr_cryptoWeakRandInt( 1000000 ) ); if( dht6_socket >= 0 ) { dht6_event = event_new( session->event_base, dht6_socket, EV_READ, event_callback, NULL ); tr_timerAdd( dht6_event, 0, tr_cryptoWeakRandInt( 1000000 ) ); } tr_ndbg( "DHT", "DHT initialized" ); return 1; fail: { const int save = errno; close(dht_socket); if( dht6_socket >= 0 ) close(dht6_socket); dht_socket = dht6_socket = -1; session = NULL; tr_ndbg( "DHT", "DHT initialization failed (errno = %d)", save ); errno = save; } return -1; }
static tr_port getRandomPort( tr_session * s ) { return tr_cryptoWeakRandInt( s->randomPortHigh - s->randomPortLow + 1) + s->randomPortLow; }
int tr_dhtInit(tr_session *ss) { tr_benc benc; int rc; bool have_id = false; char * dat_file; uint8_t * nodes = NULL, * nodes6 = NULL; const uint8_t * raw; size_t len, len6; struct bootstrap_closure * cl; if( session ) /* already initialized */ return -1; tr_ndbg( "DHT", "Initializing DHT" ); if( getenv( "TR_DHT_VERBOSE" ) != NULL ) dht_debug = stderr; dat_file = tr_buildPath( ss->configDir, "dht.dat", NULL ); rc = tr_bencLoadFile( &benc, TR_FMT_BENC, dat_file ); tr_free( dat_file ); if(rc == 0) { have_id = tr_bencDictFindRaw(&benc, "id", &raw, &len); if( have_id && len==20 ) memcpy( myid, raw, len ); if( ss->udp_socket >= 0 && tr_bencDictFindRaw( &benc, "nodes", &raw, &len ) && !(len%6) ) { nodes = tr_memdup( raw, len ); } if( ss->udp6_socket > 0 && tr_bencDictFindRaw( &benc, "nodes6", &raw, &len6 ) && !(len6%18) ) { nodes6 = tr_memdup( raw, len6 ); } tr_bencFree( &benc ); } if(nodes == NULL) len = 0; if(nodes6 == NULL) len6 = 0; if( have_id ) tr_ninf( "DHT", "Reusing old id" ); else { /* Note that DHT ids need to be distributed uniformly, * so it should be something truly random. */ tr_ninf( "DHT", "Generating new id" ); tr_cryptoRandBuf( myid, 20 ); } rc = dht_init( ss->udp_socket, ss->udp6_socket, myid, NULL ); if( rc < 0 ) goto fail; session = ss; cl = tr_new( struct bootstrap_closure, 1 ); cl->session = session; cl->nodes = nodes; cl->nodes6 = nodes6; cl->len = len; cl->len6 = len6; tr_threadNew( dht_bootstrap, cl ); dht_timer = evtimer_new( session->event_base, timer_callback, session ); tr_timerAdd( dht_timer, 0, tr_cryptoWeakRandInt( 1000000 ) ); tr_ndbg( "DHT", "DHT initialized" ); return 1; fail: tr_ndbg( "DHT", "DHT initialization failed (errno = %d)", errno ); session = NULL; return -1; }
void tr_bandwidthAllocate( tr_bandwidth * b, tr_direction dir, int period_msec ) { int i, n, peerCount; tr_ptrArray * tmp; struct tr_peerIo ** peers; const uint64_t now = tr_date( ); const uint64_t cutoff = now + 100; /* 1/10th of a second */ /* allocateBandwidth() is a helper function with two purposes: * 1. allocate bandwidth to b and its subtree * 2. accumulate an array of all the peerIos from b and its subtree. */ tmp = tr_ptrArrayNew( ); allocateBandwidth( b, dir, period_msec, tmp ); peers = (struct tr_peerIo**) tr_ptrArrayPeek( tmp, &peerCount ); /* Stop all peers from listening for the socket to be ready for IO. * See "Second phase of IO" lower in this function for more info. */ for( i=0; i<peerCount; ++i ) tr_peerIoSetEnabled( peers[i], dir, FALSE ); /* First phase of IO. Tries to distribute bandwidth fairly to keep faster * peers from starving the others. Loop through the peers, giving each a * small chunk of bandwidth. Keep looping until we run out of bandwidth * or peers that can use it */ n = peerCount; i = n ? tr_cryptoWeakRandInt( n ) : 0; /* pick a random starting point */ for( ; n>0 && tr_date()<=cutoff; ) { const int increment = n==1 ? 4096 : 1024; const int byteCount = tr_peerIoFlush( peers[i], dir, increment); if( byteCount == increment ) ++i; else { /* peer is done writing for now; move it to the end of the list */ tr_peerIo * tmp = peers[i]; peers[i] = peers[n-1]; peers[n-1] = tmp; --n; } assert( i <= n ); if( i == n ) i = 0; } /* Second phase of IO. To help us scale in high bandwidth situations, * enable on-demand IO for peers with bandwidth left to burn. * This on-demand IO is enabled until (1) the peer runs out of bandwidth, * or (2) the next tr_bandwidthAllocate() call, when we start over again. */ for( i=0; i<peerCount; ++i ) if( tr_peerIoHasBandwidthLeft( peers[i], dir ) ) tr_peerIoSetEnabled( peers[i], dir, TRUE ); /* cleanup */ tr_ptrArrayFree( tmp, NULL ); }