int tr_trackerScrape( tr_torrent_t * tor, int * seeders, int * leechers ) { tr_info_t * inf = &tor->info; int s, i, ret; uint8_t buf[1024]; benc_val_t scrape, * val1, * val2; struct in_addr addr; uint64_t date; int pos, len; tr_resolve_t * resolve; if( !tor->scrape[0] ) { /* scrape not supported */ return 1; } resolve = tr_netResolveInit( inf->trackerAddress ); for( date = tr_date();; ) { ret = tr_netResolvePulse( resolve, &addr ); if( ret == TR_RESOLVE_OK ) { tr_netResolveClose( resolve ); break; } if( ret == TR_RESOLVE_ERROR || ( ret == TR_RESOLVE_WAIT && tr_date() > date + 10000 ) ) { fprintf( stderr, "Could not resolve %s\n", inf->trackerAddress ); tr_netResolveClose( resolve ); return 1; } tr_wait( 10 ); } s = tr_netOpen( addr, htons( inf->trackerPort ) ); if( s < 0 ) { return 1; } len = snprintf( (char *) buf, sizeof( buf ), "GET %s?info_hash=%s HTTP/1.1\r\n" "Host: %s\r\n" "Connection: close\r\n\r\n", tor->scrape, tor->hashString, inf->trackerAddress ); for( date = tr_date();; ) { ret = tr_netSend( s, buf, len ); if( ret & TR_NET_CLOSE ) { fprintf( stderr, "Could not connect to tracker\n" ); tr_netClose( s ); return 1; } else if( ret & TR_NET_BLOCK ) { if( tr_date() > date + 10000 ) { fprintf( stderr, "Could not connect to tracker\n" ); tr_netClose( s ); return 1; } } else { break; } tr_wait( 10 ); } pos = 0; for( date = tr_date();; ) { ret = tr_netRecv( s, &buf[pos], sizeof( buf ) - pos ); if( ret & TR_NET_CLOSE ) { break; } else if( ret & TR_NET_BLOCK ) { if( tr_date() > date + 10000 ) { fprintf( stderr, "Could not read from tracker\n" ); tr_netClose( s ); return 1; } } else { pos += ret; } tr_wait( 10 ); } if( pos < 1 ) { fprintf( stderr, "Could not read from tracker\n" ); tr_netClose( s ); return 1; } for( i = 0; i < pos - 8; i++ ) { if( !memcmp( &buf[i], "d5:files", 8 ) ) { break; } } if( i >= pos - 8 ) { return 1; } if( tr_bencLoad( &buf[i], pos - i, &scrape, NULL ) ) { return 1; } val1 = tr_bencDictFind( &scrape, "files" ); if( !val1 ) { return 1; } val1 = &val1->val.l.vals[1]; if( !val1 ) { return 1; } val2 = tr_bencDictFind( val1, "complete" ); if( !val2 ) { return 1; } *seeders = val2->val.i; val2 = tr_bencDictFind( val1, "incomplete" ); if( !val2 ) { return 1; } *leechers = val2->val.i; tr_bencFree( &scrape ); return 0; }
static uint64_t loadProgress( tr_benc * dict, tr_torrent * tor ) { size_t i, n; uint64_t ret = 0; tr_benc * prog; const tr_info * inf = tr_torrentInfo( tor ); for( i=0, n=inf->pieceCount; i<n; ++i ) inf->pieces[i].timeChecked = 0; if( tr_bencDictFindDict( dict, KEY_PROGRESS, &prog ) ) { const char * err; const char * str; const uint8_t * raw; size_t rawlen; tr_benc * l; tr_benc * b; struct tr_bitset bitset = TR_BITSET_INIT; if( tr_bencDictFindList( prog, KEY_PROGRESS_CHECKTIME, &l ) ) { /* per-piece timestamps were added in 2.20. If some of a file's pieces have been checked more recently than the file's mtime, and some lest recently, then that file will have a list containing timestamps for each piece. However, the most common use case is that the file doesn't change after it's downloaded. To reduce overhead in the .resume file, only a single timestamp is saved for the file if *all* or *none* of the pieces were tested more recently than the file's mtime. */ tr_file_index_t fi; for( fi=0; fi<inf->fileCount; ++fi ) { tr_benc * b = tr_bencListChild( l, fi ); const tr_file * f = &inf->files[fi]; tr_piece * p = &inf->pieces[f->firstPiece]; const tr_piece * pend = &inf->pieces[f->lastPiece]+1; if( tr_bencIsInt( b ) ) { int64_t t; tr_bencGetInt( b, &t ); for( ; p!=pend; ++p ) p->timeChecked = (time_t)t; } else if( tr_bencIsList( b ) ) { int i = 0; int64_t offset = 0; const int pieces = f->lastPiece + 1 - f->firstPiece; tr_bencGetInt( tr_bencListChild( b, 0 ), &offset ); for( i=0; i<pieces; ++i ) { int64_t t = 0; tr_bencGetInt( tr_bencListChild( b, i+1 ), &t ); inf->pieces[f->firstPiece+i].timeChecked = (time_t)(t ? t + offset : 0); } } } } else if( tr_bencDictFindList( prog, KEY_PROGRESS_MTIMES, &l ) ) { tr_file_index_t fi; /* Before 2.20, we stored the files' mtimes in the .resume file. When loading the .resume file, a torrent's file would be flagged as untested if its stored mtime didn't match its real mtime. */ for( fi=0; fi<inf->fileCount; ++fi ) { int64_t t; if( tr_bencGetInt( tr_bencListChild( l, fi ), &t ) ) { const tr_file * f = &inf->files[fi]; tr_piece * p = &inf->pieces[f->firstPiece]; const tr_piece * pend = &inf->pieces[f->lastPiece]; const time_t mtime = tr_torrentGetFileMTime( tor, fi ); const time_t timeChecked = mtime==t ? mtime : 0; for( ; p!=pend; ++p ) p->timeChecked = timeChecked; } } } err = NULL; if(( b = tr_bencDictFind( prog, KEY_PROGRESS_BLOCKS ))) { if( !tr_bitsetFromBenc( &bitset, b ) ) err = "Invalid value for PIECES"; } else if( tr_bencDictFindStr( prog, KEY_PROGRESS_HAVE, &str ) ) { if( !strcmp( str, "all" ) ) tr_bitsetSetHaveAll( &bitset ); else err = "Invalid value for HAVE"; } else if( tr_bencDictFindRaw( prog, KEY_PROGRESS_BITFIELD, &raw, &rawlen ) ) { bitset.bitfield.bits = (void*) raw; bitset.bitfield.byteCount = rawlen; bitset.bitfield.bitCount = rawlen * 8; } else err = "Couldn't find 'pieces' or 'have' or 'bitfield'"; if( !err && !tr_cpBlockBitsetInit( &tor->completion, &bitset ) ) err = "Error loading bitfield"; if( err != NULL ) tr_tordbg( tor, "Torrent needs to be verified - %s", err ); ret = TR_FR_PROGRESS; } return ret; }
static void recvAnswer( tr_tracker_t * tc ) { tr_torrent_t * tor = tc->tor; int ret; int i; benc_val_t beAll; benc_val_t * bePeers, * beFoo; uint8_t * body; int bodylen; if( tc->pos == tc->size ) { tc->size *= 2; tc->buf = realloc( tc->buf, tc->size ); } ret = tr_netRecv( tc->socket, &tc->buf[tc->pos], tc->size - tc->pos ); if( ret & TR_NET_BLOCK ) { return; } if( !( ret & TR_NET_CLOSE ) ) { // printf( "got %d bytes\n", ret ); tc->pos += ret; return; } tr_netClose( tc->socket ); tr_fdSocketClosed( tor->fdlimit, 1 ); // printf( "connection closed, got total %d bytes\n", tc->pos ); tc->status = TC_STATUS_IDLE; tc->dateTry = tr_date(); if( tc->pos < 12 || ( 0 != memcmp( tc->buf, "HTTP/1.0 ", 9 ) && 0 != memcmp( tc->buf, "HTTP/1.1 ", 9 ) ) ) { /* We don't have a complete HTTP status line */ tr_inf( "Tracker: incomplete HTTP status line" ); tc->lastAttempt = TC_ATTEMPT_NOREACH; return; } if( '2' != tc->buf[9] ) { /* we didn't get a 2xx status code */ tr_err( "Tracker: invalid HTTP status code: %c%c%c", tc->buf[9], tc->buf[10], tc->buf[11] ); tc->lastAttempt = TC_ATTEMPT_ERROR; return; } /* find the end of the http headers */ body = tr_memmem( tc->buf, tc->pos, "\015\012\015\012", 4 ); if( NULL != body ) { body += 4; } /* hooray for trackers that violate the HTTP spec */ else if( NULL != ( body = tr_memmem( tc->buf, tc->pos, "\015\015", 2 ) ) || NULL != ( body = tr_memmem( tc->buf, tc->pos, "\012\012", 2 ) ) ) { body += 2; } else { tr_err( "Tracker: could not find end of HTTP headers" ); tc->lastAttempt = TC_ATTEMPT_NOREACH; return; } bodylen = tc->pos - (body - tc->buf); /* Find and load the dictionary */ for( i = 0; i < bodylen; i++ ) { if( !tr_bencLoad( &body[i], bodylen - i, &beAll, NULL ) ) { break; } } if( i >= bodylen ) { if( tc->stopped || 0 < tc->newPort ) { tc->lastAttempt = TC_ATTEMPT_OK; goto nodict; } tr_err( "Tracker: no valid dictionary found in answer" ); tc->lastAttempt = TC_ATTEMPT_ERROR; return; } // tr_bencPrint( &beAll ); if( ( bePeers = tr_bencDictFind( &beAll, "failure reason" ) ) ) { tr_err( "Tracker: %s", bePeers->val.s.s ); tor->error |= TR_ETRACKER; snprintf( tor->trackerError, sizeof( tor->trackerError ), "%s", bePeers->val.s.s ); tc->lastAttempt = TC_ATTEMPT_ERROR; goto cleanup; } tor->error &= ~TR_ETRACKER; tc->lastAttempt = TC_ATTEMPT_OK; if( !tc->interval ) { /* Get the tracker interval, ignore it if it is not between 10 sec and 5 mins */ if( !( beFoo = tr_bencDictFind( &beAll, "interval" ) ) || !( beFoo->type & TYPE_INT ) ) { tr_err( "Tracker: no 'interval' field" ); goto cleanup; } tc->interval = beFoo->val.i; tc->interval = MIN( tc->interval, 300 ); tc->interval = MAX( 10, tc->interval ); tr_inf( "Tracker: interval = %d seconds", tc->interval ); } if( ( beFoo = tr_bencDictFind( &beAll, "complete" ) ) && ( beFoo->type & TYPE_INT ) ) { tc->seeders = beFoo->val.i; } if( ( beFoo = tr_bencDictFind( &beAll, "incomplete" ) ) && ( beFoo->type & TYPE_INT ) ) { tc->leechers = beFoo->val.i; } if( tc->seeders + tc->leechers >= 50 ) { tc->hasManyPeers = 1; } if( !( bePeers = tr_bencDictFind( &beAll, "peers" ) ) ) { if( tc->stopped || 0 < tc->newPort ) { goto nodict; } tr_err( "Tracker: no \"peers\" field" ); goto cleanup; } if( bePeers->type & TYPE_LIST ) { char * ip; int port; /* Original protocol */ tr_inf( "Tracker: got %d peers", bePeers->val.l.count ); for( i = 0; i < bePeers->val.l.count; i++ ) { beFoo = tr_bencDictFind( &bePeers->val.l.vals[i], "ip" ); if( !beFoo ) continue; ip = beFoo->val.s.s; beFoo = tr_bencDictFind( &bePeers->val.l.vals[i], "port" ); if( !beFoo ) continue; port = beFoo->val.i; tr_peerAddOld( tor, ip, port ); } if( bePeers->val.l.count >= 50 ) { tc->hasManyPeers = 1; } } else if( bePeers->type & TYPE_STR ) { struct in_addr addr; in_port_t port; /* "Compact" extension */ if( bePeers->val.s.i % 6 ) { tr_err( "Tracker: \"peers\" of size %d", bePeers->val.s.i ); tr_lockUnlock( &tor->lock ); goto cleanup; } tr_inf( "Tracker: got %d peers", bePeers->val.s.i / 6 ); for( i = 0; i < bePeers->val.s.i / 6; i++ ) { memcpy( &addr, &bePeers->val.s.s[6*i], 4 ); memcpy( &port, &bePeers->val.s.s[6*i+4], 2 ); tr_peerAddCompact( tor, addr, port ); } if( bePeers->val.s.i / 6 >= 50 ) { tc->hasManyPeers = 1; } } nodict: /* Success */ tc->started = 0; tc->completed = 0; tc->dateOk = tr_date(); if( tc->stopped ) { tor->status = TR_STATUS_STOPPED; tc->stopped = 0; } else if( 0 < tc->newPort ) { tc->started = 1; tc->download = tor->downloaded; tc->upload = tor->uploaded; } cleanup: tr_bencFree( &beAll ); }