/* Clean a single torrent return 1 if torrent timed out */ int clean_single_torrent( ot_torrent *torrent ) { ot_peerlist *peer_list = torrent->peer_list; ot_vector *bucket_list = &peer_list->peers; time_t timedout = (time_t)( g_now_minutes - peer_list->base ); int num_buckets = 1, removed_seeders = 0; /* No need to clean empty torrent */ if( !timedout ) return 0; /* Torrent has idled out */ if( timedout > OT_TORRENT_TIMEOUT ) return 1; /* Nothing to be cleaned here? Test if torrent is worth keeping */ if( timedout > OT_PEER_TIMEOUT ) { if( !peer_list->peer_count ) return peer_list->down_count ? 0 : 1; timedout = OT_PEER_TIMEOUT; } if( OT_PEERLIST_HASBUCKETS( peer_list ) ) { num_buckets = bucket_list->size; bucket_list = (ot_vector *)bucket_list->data; } while( num_buckets-- ) { size_t removed_peers = clean_single_bucket( bucket_list->data, bucket_list->size, timedout, &removed_seeders ); peer_list->peer_count -= removed_peers; bucket_list->size -= removed_peers; if( bucket_list->size < removed_peers ) vector_fixup_peers( bucket_list ); ++bucket_list; } peer_list->seed_count -= removed_seeders; /* See, if we need to convert a torrent from simple vector to bucket list */ if( ( peer_list->peer_count > OT_PEER_BUCKET_MINCOUNT ) || OT_PEERLIST_HASBUCKETS(peer_list) ) vector_redistribute_buckets( peer_list ); if( peer_list->peer_count ) peer_list->base = g_now_minutes; else { /* When we got here, the last time that torrent has been touched is OT_PEER_TIMEOUT Minutes before */ peer_list->base = g_now_minutes - OT_PEER_TIMEOUT; } return 0; }
static int persist_dump_peers(ot_peerlist *peer_list, FILE *fp ) { unsigned int bucket, num_buckets = 1; ot_vector *bucket_list = &peer_list->peers; unsigned int count = 0; if( OT_PEERLIST_HASBUCKETS(peer_list) ) { num_buckets = bucket_list->size; bucket_list = (ot_vector *)bucket_list->data; } /* write peers count */ for (bucket = 0; bucket < num_buckets; ++bucket) { count += bucket_list[bucket].size; } if (fwrite(&count, sizeof(unsigned int), 1, fp) == 0) goto werr; for (bucket = 0; bucket < num_buckets; ++bucket) { ot_peer *peers = (ot_peer*)bucket_list[bucket].data; size_t peer_count = bucket_list[bucket].size; while( peer_count-- ) { if (fwrite(peers++, sizeof(ot_peer), 1, fp) == 0) goto werr; } } return 0; werr: LOG_ERR("%s: persist dump peers failed\n", __FUNCTION__); return -1; }
void free_peerlist( ot_peerlist *peer_list ) { if( peer_list->peers.data ) { if( OT_PEERLIST_HASBUCKETS( peer_list ) ) { ot_vector *bucket_list = (ot_vector*)(peer_list->peers.data); while( peer_list->peers.size-- ) free( bucket_list++->data ); } free( peer_list->peers.data ); } free( peer_list ); }
static size_t stats_slash24s_txt( char *reply, size_t amount ) { stats_network_node *slash24s_network_counters_root = NULL; char *r=reply; int bucket; size_t i; for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) { ot_vector *torrents_list = mutex_bucket_lock( bucket ); for( i=0; i<torrents_list->size; ++i ) { ot_peerlist *peer_list = ( ((ot_torrent*)(torrents_list->data))[i] ).peer_list; ot_vector *bucket_list = &peer_list->peers; int num_buckets = 1; if( OT_PEERLIST_HASBUCKETS( peer_list ) ) { num_buckets = bucket_list->size; bucket_list = (ot_vector *)bucket_list->data; } while( num_buckets-- ) { ot_peer *peers = (ot_peer*)bucket_list->data; size_t numpeers = bucket_list->size; while( numpeers-- ) if( stat_increase_network_count( &slash24s_network_counters_root, 0, (uintptr_t)(peers++) ) ) goto bailout_unlock; ++bucket_list; } } mutex_bucket_unlock( bucket, 0 ); if( !g_opentracker_running ) goto bailout_error; } /* The tree is built. Now analyze */ r += stats_return_busy_networks( r, slash24s_network_counters_root, amount, STATS_NETWORK_NODE_MAXDEPTH ); r += stats_return_busy_networks( r, slash24s_network_counters_root, amount, STATS_NETWORK_NODE_LIMIT ); goto success; bailout_unlock: mutex_bucket_unlock( bucket, 0 ); bailout_error: r = reply; success: stats_shift_down_network_count( &slash24s_network_counters_root, 0, sizeof(int)*8-1 ); return r-reply; }
void vector_redistribute_buckets( ot_peerlist * peer_list ) { int tmp, bucket, bucket_size_new, num_buckets_new, num_buckets_old = 1; ot_vector * bucket_list_new, * bucket_list_old = &peer_list->peers; if( OT_PEERLIST_HASBUCKETS( peer_list ) ) { num_buckets_old = peer_list->peers.size; bucket_list_old = peer_list->peers.data; } if( peer_list->peer_count < 255 ) num_buckets_new = 1; else if( peer_list->peer_count > 8192 ) num_buckets_new = 64; else if( peer_list->peer_count >= 512 && peer_list->peer_count < 4096 ) num_buckets_new = 16; else if( peer_list->peer_count < 512 && num_buckets_old <= 16 ) num_buckets_new = num_buckets_old; else if( peer_list->peer_count < 512 ) num_buckets_new = 1; else if( peer_list->peer_count < 8192 && num_buckets_old > 1 ) num_buckets_new = num_buckets_old; else num_buckets_new = 16; if( num_buckets_new == num_buckets_old ) return; /* Assume near perfect distribution */ bucket_list_new = malloc( num_buckets_new * sizeof( ot_vector ) ); if( !bucket_list_new) return; bzero( bucket_list_new, num_buckets_new * sizeof( ot_vector ) ); tmp = peer_list->peer_count / num_buckets_new; bucket_size_new = OT_VECTOR_MIN_MEMBERS; while( bucket_size_new < tmp) bucket_size_new *= OT_VECTOR_GROW_RATIO; /* preallocate vectors to hold all peers */ for( bucket=0; bucket<num_buckets_new; ++bucket ) { bucket_list_new[bucket].space = bucket_size_new; bucket_list_new[bucket].data = malloc( bucket_size_new * sizeof(ot_peer) ); if( !bucket_list_new[bucket].data ) return vector_clean_list( bucket_list_new, num_buckets_new ); } /* Now sort them into the correct bucket */ for( bucket=0; bucket<num_buckets_old; ++bucket ) { ot_peer * peers_old = bucket_list_old[bucket].data, * peers_new; int peer_count_old = bucket_list_old[bucket].size; while( peer_count_old-- ) { ot_vector * bucket_dest = bucket_list_new; if( num_buckets_new > 1 ) bucket_dest += vector_hash_peer(peers_old, num_buckets_new); if( bucket_dest->size + 1 > bucket_dest->space ) { void * tmp = realloc( bucket_dest->data, sizeof(ot_peer) * OT_VECTOR_GROW_RATIO * bucket_dest->space ); if( !tmp ) return vector_clean_list( bucket_list_new, num_buckets_new ); bucket_dest->data = tmp; bucket_dest->space *= OT_VECTOR_GROW_RATIO; } peers_new = (ot_peer*)bucket_dest->data; memcpy(peers_new + bucket_dest->size++, peers_old++, sizeof(ot_peer)); } } /* Now sort each bucket to later allow bsearch */ for( bucket=0; bucket<num_buckets_new; ++bucket ) qsort( bucket_list_new[bucket].data, bucket_list_new[bucket].size, sizeof( ot_peer ), vector_compare_peer ); /* Everything worked fine. Now link new bucket_list to peer_list */ if( OT_PEERLIST_HASBUCKETS( peer_list) ) vector_clean_list( (ot_vector*)peer_list->peers.data, peer_list->peers.size ); else free( peer_list->peers.data ); if( num_buckets_new > 1 ) { peer_list->peers.data = bucket_list_new; peer_list->peers.size = num_buckets_new; peer_list->peers.space = 0; /* Magic marker for "is list of buckets" */ } else { peer_list->peers.data = bucket_list_new->data; peer_list->peers.size = bucket_list_new->size; peer_list->peers.space = bucket_list_new->space; free( bucket_list_new ); } }
/* Clean a single torrent return 1 if torrent timed out */ int clean_single_torrent( ot_torrent *torrent ) { #ifdef _DEBUG ts_log_debug("ot_clean::clean_single_torrent: start"); #endif ot_peerlist *peer_list = torrent->peer_list; ot_vector *bucket_list = &peer_list->peers; time_t timedout = (time_t)( g_now_minutes - peer_list->base ); int num_buckets = 1, removed_seeders = 0; /* terasaur -- begin mod */ int update_stats = 0; /* terasaur -- end mod */ /* No need to clean empty torrent */ if( !timedout ) return 0; /* Torrent has idled out */ if( timedout > OT_TORRENT_TIMEOUT ) return 1; /* Nothing to be cleaned here? Test if torrent is worth keeping */ if( timedout > OT_PEER_TIMEOUT ) { if( !peer_list->peer_count ) return peer_list->down_count ? 0 : 1; timedout = OT_PEER_TIMEOUT; } if( OT_PEERLIST_HASBUCKETS( peer_list ) ) { num_buckets = bucket_list->size; bucket_list = (ot_vector *)bucket_list->data; } while( num_buckets-- ) { size_t removed_peers = clean_single_bucket( bucket_list->data, bucket_list->size, timedout, &removed_seeders ); peer_list->peer_count -= removed_peers; bucket_list->size -= removed_peers; if( bucket_list->size < removed_peers ) vector_fixup_peers( bucket_list ); ++bucket_list; /* terasaur -- begin mod */ if ((removed_peers) > 0 || (removed_seeders > 0)) { update_stats = 1; } /* terasaur -- end mod */ } peer_list->seed_count -= removed_seeders; /* See, if we need to convert a torrent from simple vector to bucket list */ if( ( peer_list->peer_count > OT_PEER_BUCKET_MINCOUNT ) || OT_PEERLIST_HASBUCKETS(peer_list) ) vector_redistribute_buckets( peer_list ); if( peer_list->peer_count ) peer_list->base = g_now_minutes; else { /* When we got here, the last time that torrent has been touched is OT_PEER_TIMEOUT Minutes before */ peer_list->base = g_now_minutes - OT_PEER_TIMEOUT; } /* terasaur -- begin mod */ if (update_stats == 1) { #ifdef _DEBUG ts_log_debug("ot_clean::clean_single_torrent: calling ts_update_torrent_stats"); #endif ts_update_torrent_stats(torrent, 0); #ifdef _DEBUG ts_log_debug("ot_clean::clean_single_torrent: after ts_update_torrent_stats"); #endif } /* terasaur -- end mod */ #ifdef _DEBUG ts_log_debug("ot_clean::clean_single_torrent: returning"); #endif return 0; }