static int test_bitfields (void) { unsigned int i; unsigned int bitcount = 500; tr_bitfield field; tr_bitfieldConstruct (&field, bitcount); /* test tr_bitfieldAdd */ for (i=0; i<bitcount; i++) if (! (i % 7)) tr_bitfieldAdd (&field, i); for (i=0; i<bitcount; i++) check (tr_bitfieldHas (&field, i) == (! (i % 7))); /* test tr_bitfieldAddRange */ tr_bitfieldAddRange (&field, 0, bitcount); for (i=0; i<bitcount; i++) check (tr_bitfieldHas (&field, i)); /* test tr_bitfieldRemRange in the middle of a boundary */ tr_bitfieldRemRange (&field, 4, 21); for (i=0; i<64; i++) check (tr_bitfieldHas (&field, i) == ((i < 4) || (i >= 21))); /* test tr_bitfieldRemRange on the boundaries */ tr_bitfieldAddRange (&field, 0, 64); tr_bitfieldRemRange (&field, 8, 24); for (i=0; i<64; i++) check (tr_bitfieldHas (&field, i) == ((i < 8) || (i >= 24))); /* test tr_bitfieldRemRange when begin & end is on the same word */ tr_bitfieldAddRange (&field, 0, 64); tr_bitfieldRemRange (&field, 4, 5); for (i=0; i<64; i++) check (tr_bitfieldHas (&field, i) == ((i < 4) || (i >= 5))); /* test tr_bitfieldAddRange */ tr_bitfieldRemRange (&field, 0, 64); tr_bitfieldAddRange (&field, 4, 21); for (i=0; i<64; i++) check (tr_bitfieldHas (&field, i) == ((4 <= i) && (i < 21))); /* test tr_bitfieldAddRange on the boundaries */ tr_bitfieldRemRange (&field, 0, 64); tr_bitfieldAddRange (&field, 8, 24); for (i=0; i<64; i++) check (tr_bitfieldHas (&field, i) == ((8 <= i) && (i < 24))); /* test tr_bitfieldAddRange when begin & end is on the same word */ tr_bitfieldRemRange (&field, 0, 64); tr_bitfieldAddRange (&field, 4, 5); for (i=0; i<64; i++) check (tr_bitfieldHas (&field, i) == ((4 <= i) && (i < 5))); tr_bitfieldDestruct (&field); return 0; }
size_t tr_cpMissingBytesInPiece (const tr_completion * cp, tr_piece_index_t piece) { if (tr_cpHasAll (cp)) { return 0; } else { size_t haveBytes = 0; tr_block_index_t f, l; const size_t pieceByteSize = tr_torPieceCountBytes (cp->tor, piece); tr_torGetPieceBlockRange (cp->tor, piece, &f, &l); if (f != l) { /* nb: we don't pass the usual l+1 here to tr_bitfieldCountRange (). It's faster to handle the last block separately because its size needs to be checked separately. */ haveBytes = tr_bitfieldCountRange (&cp->blockBitfield, f, l); haveBytes *= cp->tor->blockSize; } if (tr_bitfieldHas (&cp->blockBitfield, l)) /* handle the last block */ haveBytes += tr_torBlockCountBytes (cp->tor, l); assert (haveBytes <= pieceByteSize); return pieceByteSize - haveBytes; } }
float tr_cpPercentBlocksInPiece( tr_completion_t * cp, int piece ) { tr_torrent_t * tor = cp->tor; int i; int blockCount, startBlock, endBlock; int complete; uint8_t * bitfield; blockCount = tr_pieceCountBlocks( piece ); startBlock = tr_pieceStartBlock( piece ); endBlock = startBlock + blockCount; complete = 0; bitfield = cp->blockBitfield; for( i = startBlock; i < endBlock; i++ ) { if( tr_bitfieldHas( bitfield, i ) ) { complete++; } } return (float)complete / (float)blockCount; }
void tr_cpBlockBitfieldSet( tr_completion_t * cp, uint8_t * bitfield ) { tr_torrent_t * tor = cp->tor; int i, j; int startBlock, endBlock; int pieceComplete; for( i = 0; i < cp->tor->info.pieceCount; i++ ) { startBlock = tr_pieceStartBlock( i ); endBlock = startBlock + tr_pieceCountBlocks( i ); pieceComplete = 1; for( j = startBlock; j < endBlock; j++ ) { if( tr_bitfieldHas( bitfield, j ) ) { tr_cpBlockAdd( cp, j ); } else { pieceComplete = 0; } } if( pieceComplete ) { tr_cpPieceAdd( cp, i ); } } }
uint64_t tr_cpSizeWhenDone( const tr_completion * ccp ) { if( ccp->sizeWhenDoneIsDirty ) { uint64_t size = 0; const tr_torrent * tor = ccp->tor; const tr_info * inf = tr_torrentInfo( tor ); tr_completion * cp = (tr_completion *) ccp; /* mutable */ if( tr_cpHasAll( ccp ) ) { size = inf->totalSize; } else { tr_piece_index_t p; for( p=0; p<inf->pieceCount; ++p ) { uint64_t n = 0; const uint64_t pieceSize = tr_torPieceCountBytes( tor, p ); if( !inf->pieces[p].dnd ) { n = pieceSize; } else { uint64_t o = 0; tr_block_index_t b, f, l; tr_torGetPieceBlockRange( cp->tor, p, &f, &l ); for( b=f; b<=l; ++b ) if( tr_cpBlockIsComplete( cp, b ) ) n += tr_torBlockCountBytes( tor, b ); o = tr_bitfieldCountRange( &cp->blockBitfield, f, l+1 ); o *= cp->tor->blockSize; if( l == ( cp->tor->blockCount - 1 ) && tr_bitfieldHas( &cp->blockBitfield, l ) ) o -= ( cp->tor->blockSize - cp->tor->lastBlockSize ); assert( n == o ); } assert( n <= tr_torPieceCountBytes( tor, p ) ); size += n; } } assert( size <= inf->totalSize ); assert( size >= cp->sizeNow ); cp->sizeWhenDoneLazy = size; cp->sizeWhenDoneIsDirty = false; } return ccp->sizeWhenDoneLazy; }
uint64_t tr_cpSizeWhenDone(tr_completion const* ccp) { if (ccp->sizeWhenDoneIsDirty) { uint64_t size = 0; tr_torrent const* tor = ccp->tor; tr_info const* inf = tr_torrentInfo(tor); tr_completion* cp = (tr_completion*)ccp; /* mutable */ if (tr_cpHasAll(ccp)) { size = inf->totalSize; } else { for (tr_piece_index_t p = 0; p < inf->pieceCount; ++p) { uint64_t n = 0; uint64_t const pieceSize = tr_torPieceCountBytes(tor, p); if (!inf->pieces[p].dnd) { n = pieceSize; } else { tr_block_index_t f; tr_block_index_t l; tr_torGetPieceBlockRange(cp->tor, p, &f, &l); n = tr_bitfieldCountRange(&cp->blockBitfield, f, l + 1); n *= cp->tor->blockSize; if (l == cp->tor->blockCount - 1 && tr_bitfieldHas(&cp->blockBitfield, l)) { n -= cp->tor->blockSize - cp->tor->lastBlockSize; } } TR_ASSERT(n <= tr_torPieceCountBytes(tor, p)); size += n; } } TR_ASSERT(size <= inf->totalSize); TR_ASSERT(size >= cp->sizeNow); cp->sizeWhenDoneLazy = size; cp->sizeWhenDoneIsDirty = false; } return ccp->sizeWhenDoneLazy; }
uint64_t tr_cpLeftBytes( tr_completion_t * cp ) { tr_torrent_t * tor = cp->tor; uint64_t left; left = (uint64_t) ( cp->tor->blockCount - cp->blockCount ) * (uint64_t) tor->blockSize; if( !tr_bitfieldHas( cp->blockBitfield, cp->tor->blockCount - 1 ) && tor->info.totalSize % tor->blockSize ) { left += tor->info.totalSize % tor->blockSize; left -= tor->blockSize; } return left; }
void tr_cpBlockInit( tr_completion * cp, const tr_bitfield * b ) { tr_cpReset( cp ); /* set blockBitfield */ tr_bitfieldSetFromBitfield( &cp->blockBitfield, b ); /* set sizeNow */ cp->sizeNow = tr_bitfieldCountTrueBits( &cp->blockBitfield ); assert( cp->sizeNow <= cp->tor->blockCount ); cp->sizeNow *= cp->tor->blockSize; if( tr_bitfieldHas( b, cp->tor->blockCount-1 ) ) cp->sizeNow -= ( cp->tor->blockSize - cp->tor->lastBlockSize ); assert( cp->sizeNow <= cp->tor->info.totalSize ); }
static int test_bitfield_count_range (void) { int i; int n; int begin; int end; int count1; int count2; const int bitCount = 100 + tr_cryptoWeakRandInt (1000); tr_bitfield bf; /* generate a random bitfield */ tr_bitfieldConstruct (&bf, bitCount); for (i=0, n=tr_cryptoWeakRandInt (bitCount); i<n; ++i) tr_bitfieldAdd (&bf, tr_cryptoWeakRandInt (bitCount)); begin = tr_cryptoWeakRandInt (bitCount); do end = tr_cryptoWeakRandInt (bitCount); while (end == begin); /* ensure end <= begin */ if (end < begin) { const int tmp = begin; begin = end; end = tmp; } /* test the bitfield */ count1 = 0; for (i=begin; i<end; ++i) if (tr_bitfieldHas (&bf, i)) ++count1; count2 = tr_bitfieldCountRange (&bf, begin, end); check (count1 == count2); /* cleanup */ tr_bitfieldDestruct (&bf); return 0; }
int tr_cpBlockIsComplete( const tr_completion * cp, tr_block_index_t block ) { return tr_bitfieldHas( cp->blockBitfield, block ); }
/* Pieces */ int tr_cpPieceIsComplete( tr_completion_t * cp, int piece ) { return tr_bitfieldHas( cp->pieceBitfield, piece ); }
int tr_cpBlockIsComplete( tr_completion_t * cp, int block ) { return tr_bitfieldHas( cp->blockBitfield, block ); }