static void metainfoLookupRescan( tr_session * session ) { int i; int n; struct stat sb; const char * dirname = tr_getTorrentDir( session ); DIR * odir = NULL; tr_ctor * ctor = NULL; tr_list * list = NULL; assert( tr_isSession( session ) ); /* walk through the directory and find the mappings */ ctor = tr_ctorNew( session ); tr_ctorSetSave( ctor, FALSE ); /* since we already have them */ if( !stat( dirname, &sb ) && S_ISDIR( sb.st_mode ) && ( ( odir = opendir( dirname ) ) ) ) { struct dirent *d; for( d = readdir( odir ); d != NULL; d = readdir( odir ) ) { if( d->d_name && d->d_name[0] != '.' ) /* skip dotfiles, ., and .. */ { tr_info inf; char * path = tr_buildPath( dirname, d->d_name, NULL ); tr_ctorSetMetainfoFromFile( ctor, path ); if( !tr_torrentParse( session, ctor, &inf ) ) { tr_list_append( &list, tr_strdup( inf.hashString ) ); tr_list_append( &list, tr_strdup( path ) ); tr_metainfoFree( &inf ); } tr_free( path ); } } closedir( odir ); } tr_ctorFree( ctor ); n = tr_list_size( list ) / 2; session->metainfoLookup = tr_new0( struct tr_metainfo_lookup, n ); session->metainfoLookupCount = n; for( i = 0; i < n; ++i ) { char * hashString = tr_list_pop_front( &list ); char * filename = tr_list_pop_front( &list ); memcpy( session->metainfoLookup[i].hashString, hashString, 2 * SHA_DIGEST_LENGTH + 1 ); tr_free( hashString ); session->metainfoLookup[i].filename = filename; } metainfoLookupResort( session ); tr_dbg( "Found %d torrents in \"%s\"", n, dirname ); }
static void add_tasks_from_queue( tr_web * g ) { while( ( g->still_running < MAX_CONCURRENT_TASKS ) && ( tr_list_size( g->easy_queue ) > 0 ) ) { CURL * easy = tr_list_pop_front( &g->easy_queue ); if( easy ) { const CURLMcode rc = curl_multi_add_handle( g->multi, easy ); if( rc != CURLM_OK ) tr_err( "%s", curl_multi_strerror( rc ) ); else { dbgmsg( "pumped the task queue, %d remain", tr_list_size( g->easy_queue ) ); ++g->still_running; } } } }
static void addTask( void * vtask ) { struct tr_web_task * task = vtask; const tr_handle * session = task->session; if( session && session->web ) { struct tr_web * web = session->web; CURL * easy; dbgmsg( "adding task #%lu [%s]", task->tag, task->url ); easy = curl_easy_init( ); if( !task->range && session->isProxyEnabled ) { curl_easy_setopt( easy, CURLOPT_PROXY, session->proxy ); curl_easy_setopt( easy, CURLOPT_PROXYAUTH, CURLAUTH_ANY ); curl_easy_setopt( easy, CURLOPT_PROXYPORT, session->proxyPort ); curl_easy_setopt( easy, CURLOPT_PROXYTYPE, getCurlProxyType( session->proxyType ) ); } if( !task->range && session->isProxyAuthEnabled ) { char * str = tr_strdup_printf( "%s:%s", session->proxyUsername, session->proxyPassword ); curl_easy_setopt( easy, CURLOPT_PROXYUSERPWD, str ); tr_free( str ); } curl_easy_setopt( easy, CURLOPT_DNS_CACHE_TIMEOUT, 360L ); curl_easy_setopt( easy, CURLOPT_CONNECTTIMEOUT, 60L ); curl_easy_setopt( easy, CURLOPT_FOLLOWLOCATION, 1L ); curl_easy_setopt( easy, CURLOPT_MAXREDIRS, 16L ); curl_easy_setopt( easy, CURLOPT_NOSIGNAL, 1L ); curl_easy_setopt( easy, CURLOPT_PRIVATE, task ); curl_easy_setopt( easy, CURLOPT_SSL_VERIFYHOST, 0L ); curl_easy_setopt( easy, CURLOPT_SSL_VERIFYPEER, 0L ); curl_easy_setopt( easy, CURLOPT_URL, task->url ); curl_easy_setopt( easy, CURLOPT_USERAGENT, TR_NAME "/" LONG_VERSION_STRING ); curl_easy_setopt( easy, CURLOPT_VERBOSE, getenv( "TR_CURL_VERBOSE" ) != NULL ); curl_easy_setopt( easy, CURLOPT_WRITEDATA, task ); curl_easy_setopt( easy, CURLOPT_WRITEFUNCTION, writeFunc ); if( task->range ) curl_easy_setopt( easy, CURLOPT_RANGE, task->range ); else /* don't set encoding on webseeds; it messes up binary data */ curl_easy_setopt( easy, CURLOPT_ENCODING, "" ); if( web->still_running >= MAX_CONCURRENT_TASKS ) { tr_list_append( &web->easy_queue, easy ); dbgmsg( " >> enqueueing a task ... size is now %d", tr_list_size( web->easy_queue ) ); } else { const CURLMcode rc = curl_multi_add_handle( web->multi, easy ); if( rc == CURLM_OK ) ++web->still_running; else tr_err( "%s", curl_multi_strerror( rc ) ); } } }
static void tr_webThreadFunc( void * vsession ) { CURLM * multi; struct tr_web * web; int taskCount = 0; struct tr_web_task * task; tr_session * session = vsession; /* try to enable ssl for https support; but if that fails, * try a plain vanilla init */ if( curl_global_init( CURL_GLOBAL_SSL ) ) curl_global_init( 0 ); web = tr_new0( struct tr_web, 1 ); web->close_mode = ~0; web->taskLock = tr_lockNew( ); web->curl_verbose = getenv( "TR_CURL_VERBOSE" ) != NULL; web->cookie_filename = tr_buildPath( session->configDir, "cookies.txt", NULL ); multi = curl_multi_init( ); session->web = web; for( ;; ) { long msec; int unused; CURLMsg * msg; CURLMcode mcode; if( web->close_mode == TR_WEB_CLOSE_NOW ) break; if( ( web->close_mode == TR_WEB_CLOSE_WHEN_IDLE ) && ( tr_list_size( web->tasks_undone ) == 0 ) ) break; /* add tasks from the queue */ tr_lockLock( web->taskLock ); while( tr_list_size( web->tasks_undone ) > 0 ) { /* pop the task */ task = tr_list_pop_front( &web->tasks_undone); tr_list_append(&web->tasks_doing,task); dbgmsg( "adding task to curl: [%s]", task->url ); curl_multi_add_handle( multi, createEasy( session, web, task )); /*fprintf( stderr, "adding a task.. taskCount is now %d\n", taskCount );*/ ++taskCount; } tr_lockUnlock( web->taskLock ); /* maybe wait a little while before calling curl_multi_perform() */ msec = 0; curl_multi_timeout( multi, &msec ); if( msec < 0 ) msec = THREADFUNC_MAX_SLEEP_MSEC; if( session->isClosed ) msec = 100; /* on shutdown, call perform() more frequently */ if( msec > 0 ) { int usec; int max_fd; struct timeval t; fd_set r_fd_set, w_fd_set, c_fd_set; max_fd = 0; FD_ZERO( &r_fd_set ); FD_ZERO( &w_fd_set ); FD_ZERO( &c_fd_set ); curl_multi_fdset( multi, &r_fd_set, &w_fd_set, &c_fd_set, &max_fd ); if( msec > THREADFUNC_MAX_SLEEP_MSEC ) msec = THREADFUNC_MAX_SLEEP_MSEC; usec = msec * 1000; t.tv_sec = usec / 1000000; t.tv_usec = usec % 1000000; tr_select( max_fd+1, &r_fd_set, &w_fd_set, &c_fd_set, &t ); } /* call curl_multi_perform() */ do { mcode = curl_multi_perform( multi, &unused ); } while( mcode == CURLM_CALL_MULTI_PERFORM ); /* pump completed tasks from the multi */ while(( msg = curl_multi_info_read( multi, &unused ))) { if(( msg->msg == CURLMSG_DONE ) && ( msg->easy_handle != NULL )) { double total_time; struct tr_web_task * task; long req_bytes_sent; CURL * e = msg->easy_handle; curl_easy_getinfo( e, CURLINFO_PRIVATE, (void*)&task ); curl_easy_getinfo( e, CURLINFO_RESPONSE_CODE, &task->code ); curl_easy_getinfo( e, CURLINFO_REQUEST_SIZE, &req_bytes_sent ); curl_easy_getinfo( e, CURLINFO_TOTAL_TIME, &total_time ); task->did_connect = task->code>0 || req_bytes_sent>0; task->did_timeout = !task->code && ( total_time >= task->timeout_secs ); curl_multi_remove_handle( multi, e ); curl_easy_cleanup( e ); dbgmsg("removing a completed task.. taskCount is now %d (response code: %d, response len: %d)\n", taskCount, (int)task->code, (int)evbuffer_get_length(task->response) ); tr_runInEventThread( task->session, task_finish_func, task ); tr_list_remove_data(&web->tasks_doing,task); --taskCount; } } } /* cleanup */ curl_multi_cleanup( multi ); /* Discard any remaining tasks. * This is rare, but can happen on shutdown with unresponsive trackers. */ while( tr_list_size( web->tasks_undone ) > 0) { task = tr_list_pop_front( &web->tasks_undone); task->bSync = 1; dbgmsg( "Discarding task \"%s\"", task->url ); task_finish_func(task); } while( tr_list_size( web->tasks_doing ) > 0) { task = tr_list_pop_front( &web->tasks_doing); task->bSync = 1; dbgmsg( "Discarding task \"%s\"", task->url ); task_finish_func(task); } tr_lockFree( web->taskLock ); tr_free( web->cookie_filename ); tr_free( web ); session->web = NULL; }