void tr_cpBlockAdd( tr_completion * cp, tr_block_index_t block ) { const tr_torrent * tor = cp->tor; if( !tr_cpBlockIsComplete( cp, block ) ) { const tr_piece_index_t piece = tr_torBlockPiece( tor, block ); const int blockSize = tr_torBlockCountBytes( tor, block ); ++cp->completeBlocks[piece]; if( tr_cpPieceIsComplete( cp, piece ) ) tr_bitfieldAdd( cp->pieceBitfield, piece ); tr_bitfieldAdd( cp->blockBitfield, block ); cp->sizeNow += blockSize; cp->haveValidIsDirty = 1; cp->sizeWhenDoneIsDirty = 1; } }
size_t tr_cpMissingBytesInPiece (const tr_completion * cp, tr_piece_index_t piece) { if (tr_cpHasAll (cp)) { return 0; } else { size_t haveBytes = 0; tr_block_index_t f, l; const size_t pieceByteSize = tr_torPieceCountBytes (cp->tor, piece); tr_torGetPieceBlockRange (cp->tor, piece, &f, &l); if (f != l) { /* nb: we don't pass the usual l+1 here to tr_bitfieldCountRange (). It's faster to handle the last block separately because its size needs to be checked separately. */ haveBytes = tr_bitfieldCountRange (&cp->blockBitfield, f, l); haveBytes *= cp->tor->blockSize; } if (tr_bitfieldHas (&cp->blockBitfield, l)) /* handle the last block */ haveBytes += tr_torBlockCountBytes (cp->tor, l); assert (haveBytes <= pieceByteSize); return pieceByteSize - haveBytes; } }
void tr_cpPieceRem( tr_completion * cp, tr_piece_index_t piece ) { const tr_torrent * tor = cp->tor; const tr_block_index_t start = tr_torPieceFirstBlock( tor, piece ); const tr_block_index_t end = start + tr_torPieceCountBlocks( tor, piece ); tr_block_index_t block; assert( cp ); assert( piece < tor->info.pieceCount ); assert( start < tor->blockCount ); assert( start <= end ); assert( end <= tor->blockCount ); for( block = start; block < end; ++block ) if( tr_cpBlockIsComplete( cp, block ) ) cp->sizeNow -= tr_torBlockCountBytes( tor, block ); cp->sizeWhenDoneIsDirty = 1; cp->haveValidIsDirty = 1; cp->completeBlocks[piece] = 0; tr_bitfieldRemRange ( cp->blockBitfield, start, end ); tr_bitfieldRem( cp->pieceBitfield, piece ); }
uint64_t tr_cpSizeWhenDone( const tr_completion * ccp ) { if( ccp->sizeWhenDoneIsDirty ) { uint64_t size = 0; const tr_torrent * tor = ccp->tor; const tr_info * inf = tr_torrentInfo( tor ); tr_completion * cp = (tr_completion *) ccp; /* mutable */ if( tr_cpHasAll( ccp ) ) { size = inf->totalSize; } else { tr_piece_index_t p; for( p=0; p<inf->pieceCount; ++p ) { uint64_t n = 0; const uint64_t pieceSize = tr_torPieceCountBytes( tor, p ); if( !inf->pieces[p].dnd ) { n = pieceSize; } else { uint64_t o = 0; tr_block_index_t b, f, l; tr_torGetPieceBlockRange( cp->tor, p, &f, &l ); for( b=f; b<=l; ++b ) if( tr_cpBlockIsComplete( cp, b ) ) n += tr_torBlockCountBytes( tor, b ); o = tr_bitfieldCountRange( &cp->blockBitfield, f, l+1 ); o *= cp->tor->blockSize; if( l == ( cp->tor->blockCount - 1 ) && tr_bitfieldHas( &cp->blockBitfield, l ) ) o -= ( cp->tor->blockSize - cp->tor->lastBlockSize ); assert( n == o ); } assert( n <= tr_torPieceCountBytes( tor, p ) ); size += n; } } assert( size <= inf->totalSize ); assert( size >= cp->sizeNow ); cp->sizeWhenDoneLazy = size; cp->sizeWhenDoneIsDirty = false; } return ccp->sizeWhenDoneLazy; }
void tr_cpBlockAdd( tr_completion * cp, tr_block_index_t block ) { const tr_torrent * tor = cp->tor; if( !tr_cpBlockIsComplete( cp, block ) ) { tr_bitfieldAdd( &cp->blockBitfield, block ); cp->sizeNow += tr_torBlockCountBytes( tor, block ); cp->haveValidIsDirty = true; cp->sizeWhenDoneIsDirty = true; } }
static void fire_client_got_blocks (tr_torrent * tor, tr_webseed * w, tr_block_index_t block, tr_block_index_t count) { tr_block_index_t i; tr_peer_event e = TR_PEER_EVENT_INIT; e.eventType = TR_PEER_CLIENT_GOT_BLOCK; tr_torrentGetBlockLocation (tor, block, &e.pieceIndex, &e.offset, &e.length); for (i = 1; i <= count; i++) { if (i == count) e.length = tr_torBlockCountBytes (tor, block + count - 1); publish (w, &e); e.offset += e.length; } }
void tr_cpBlockAdd(tr_completion* cp, tr_block_index_t block) { tr_torrent const* tor = cp->tor; if (!tr_cpBlockIsComplete(cp, block)) { tr_piece_index_t const piece = tr_torBlockPiece(cp->tor, block); tr_bitfieldAdd(&cp->blockBitfield, block); cp->sizeNow += tr_torBlockCountBytes(tor, block); cp->haveValidIsDirty = true; cp->sizeWhenDoneIsDirty |= tor->info.pieces[piece].dnd; } }
void tr_cpPieceRem( tr_completion * cp, tr_piece_index_t piece ) { tr_block_index_t i, f, l; const tr_torrent * tor = cp->tor; tr_torGetPieceBlockRange( cp->tor, piece, &f, &l ); for( i=f; i<=l; ++i ) if( tr_cpBlockIsComplete( cp, i ) ) cp->sizeNow -= tr_torBlockCountBytes( tor, i ); cp->haveValidIsDirty = true; cp->sizeWhenDoneIsDirty = true; tr_bitfieldRemRange( &cp->blockBitfield, f, l+1 ); }
uint64_t tr_cpSizeWhenDone( const tr_completion * ccp ) { if( ccp->sizeWhenDoneIsDirty ) { tr_completion * cp = (tr_completion *) ccp; /* mutable */ const tr_torrent * tor = cp->tor; const tr_info * info = &tor->info; tr_piece_index_t i; uint64_t size = 0; for( i = 0; i < info->pieceCount; ++i ) { if( !info->pieces[i].dnd ) { /* we want the piece... */ size += tr_torPieceCountBytes( tor, i ); } else if( tr_cpPieceIsComplete( cp, i ) ) { /* we have the piece... */ size += tr_torPieceCountBytes( tor, i ); } else if( cp->completeBlocks[i] ) { /* we have part of the piece... */ const tr_block_index_t b = tr_torPieceFirstBlock( tor, i ); const tr_block_index_t e = b + tr_torPieceCountBlocks( tor, i ); tr_block_index_t j; for( j = b; j < e; ++j ) if( tr_cpBlockIsComplete( cp, j ) ) size += tr_torBlockCountBytes( tor, j ); } } cp->sizeWhenDoneLazy = size; cp->sizeWhenDoneIsDirty = 0; } assert( ccp->sizeWhenDoneLazy <= ccp->tor->info.totalSize ); assert( ccp->sizeWhenDoneLazy >= ccp->sizeNow ); return ccp->sizeWhenDoneLazy; }
/* Initialize a completion object from a bitfield indicating which blocks we have */ tr_bool tr_cpBlockBitfieldSet( tr_completion * cp, tr_bitfield * blockBitfield ) { tr_bool success = FALSE; assert( cp ); assert( blockBitfield ); /* The bitfield of block flags is typically loaded from a resume file. Test the bitfield's length in case the resume file somehow got corrupted */ if(( success = blockBitfield->byteCount == cp->blockBitfield.byteCount )) { tr_block_index_t b = 0; tr_piece_index_t p = 0; uint32_t pieceBlock = 0; uint16_t completeBlocksInPiece = 0; tr_block_index_t completeBlocksInTorrent = 0; uint32_t blocksInCurrentPiece = tr_torPieceCountBlocks( cp->tor, p ); /* start cp with a state where it thinks we have nothing */ tr_cpReset( cp ); /* init our block bitfield from the one passed in */ memcpy( cp->blockBitfield.bits, blockBitfield->bits, blockBitfield->byteCount ); /* invalidate the fields that are lazy-evaluated */ cp->sizeWhenDoneIsDirty = TRUE; cp->haveValidIsDirty = TRUE; /* to set the remaining fields, we walk through every block... */ while( b < cp->tor->blockCount ) { if( tr_bitfieldHasFast( blockBitfield, b ) ) ++completeBlocksInPiece; ++b; ++pieceBlock; /* by the time we reach the end of a piece, we have enough info to update that piece's slot in cp.completeBlocks and cp.pieceBitfield */ if( pieceBlock == blocksInCurrentPiece ) { cp->completeBlocks[p] = completeBlocksInPiece; completeBlocksInTorrent += completeBlocksInPiece; if( completeBlocksInPiece == blocksInCurrentPiece ) tr_bitfieldAdd( &cp->pieceBitfield, p ); /* reset the per-piece counters because we're starting on a new piece now */ ++p; completeBlocksInPiece = 0; pieceBlock = 0; blocksInCurrentPiece = tr_torPieceCountBlocks( cp->tor, p ); } } /* update sizeNow */ cp->sizeNow = completeBlocksInTorrent; cp->sizeNow *= tr_torBlockCountBytes( cp->tor, 0 ); if( tr_bitfieldHasFast( &cp->blockBitfield, cp->tor->blockCount-1 ) ) { /* the last block is usually smaller than the other blocks, so handle that special case or cp->sizeNow might be too large */ cp->sizeNow -= tr_torBlockCountBytes( cp->tor, 0 ); cp->sizeNow += tr_torBlockCountBytes( cp->tor, cp->tor->blockCount-1 ); } } return success; }