void * tr_cpCreatePieceBitfield (const tr_completion * cp, size_t * byte_count) { void * ret; tr_piece_index_t n; tr_bitfield pieces; assert (tr_torrentHasMetadata (cp->tor)); n = cp->tor->info.pieceCount; tr_bitfieldConstruct (&pieces, n); if (tr_cpHasAll (cp)) { tr_bitfieldSetHasAll (&pieces); } else if (!tr_cpHasNone (cp)) { tr_piece_index_t i; bool * flags = tr_new (bool, n); for (i=0; i<n; ++i) flags[i] = tr_cpPieceIsComplete (cp, i); tr_bitfieldSetFromFlags (&pieces, flags, n); tr_free (flags); }
size_t tr_cpMissingBytesInPiece (const tr_completion * cp, tr_piece_index_t piece) { if (tr_cpHasAll (cp)) { return 0; } else { size_t haveBytes = 0; tr_block_index_t f, l; const size_t pieceByteSize = tr_torPieceCountBytes (cp->tor, piece); tr_torGetPieceBlockRange (cp->tor, piece, &f, &l); if (f != l) { /* nb: we don't pass the usual l+1 here to tr_bitfieldCountRange (). It's faster to handle the last block separately because its size needs to be checked separately. */ haveBytes = tr_bitfieldCountRange (&cp->blockBitfield, f, l); haveBytes *= cp->tor->blockSize; } if (tr_bitfieldHas (&cp->blockBitfield, l)) /* handle the last block */ haveBytes += tr_torBlockCountBytes (cp->tor, l); assert (haveBytes <= pieceByteSize); return pieceByteSize - haveBytes; } }
tr_completeness tr_cpGetStatus( const tr_completion * cp ) { if( tr_cpHasAll( cp ) ) return TR_SEED; if( !tr_torrentHasMetadata( cp->tor ) ) return TR_LEECH; if( cp->sizeNow == tr_cpSizeWhenDone( cp ) ) return TR_PARTIAL_SEED; return TR_LEECH; }
uint64_t tr_cpSizeWhenDone( const tr_completion * ccp ) { if( ccp->sizeWhenDoneIsDirty ) { uint64_t size = 0; const tr_torrent * tor = ccp->tor; const tr_info * inf = tr_torrentInfo( tor ); tr_completion * cp = (tr_completion *) ccp; /* mutable */ if( tr_cpHasAll( ccp ) ) { size = inf->totalSize; } else { tr_piece_index_t p; for( p=0; p<inf->pieceCount; ++p ) { uint64_t n = 0; const uint64_t pieceSize = tr_torPieceCountBytes( tor, p ); if( !inf->pieces[p].dnd ) { n = pieceSize; } else { uint64_t o = 0; tr_block_index_t b, f, l; tr_torGetPieceBlockRange( cp->tor, p, &f, &l ); for( b=f; b<=l; ++b ) if( tr_cpBlockIsComplete( cp, b ) ) n += tr_torBlockCountBytes( tor, b ); o = tr_bitfieldCountRange( &cp->blockBitfield, f, l+1 ); o *= cp->tor->blockSize; if( l == ( cp->tor->blockCount - 1 ) && tr_bitfieldHas( &cp->blockBitfield, l ) ) o -= ( cp->tor->blockSize - cp->tor->lastBlockSize ); assert( n == o ); } assert( n <= tr_torPieceCountBytes( tor, p ) ); size += n; } } assert( size <= inf->totalSize ); assert( size >= cp->sizeNow ); cp->sizeWhenDoneLazy = size; cp->sizeWhenDoneIsDirty = false; } return ccp->sizeWhenDoneLazy; }
size_t tr_cpMissingBlocksInPiece( const tr_completion * cp, tr_piece_index_t piece ) { if( tr_cpHasAll( cp ) ) return 0; else { tr_block_index_t f, l; tr_torGetPieceBlockRange( cp->tor, piece, &f, &l ); return (l+1-f) - tr_bitfieldCountRange( &cp->blockBitfield, f, l+1 ); } }
uint64_t tr_cpSizeWhenDone(tr_completion const* ccp) { if (ccp->sizeWhenDoneIsDirty) { uint64_t size = 0; tr_torrent const* tor = ccp->tor; tr_info const* inf = tr_torrentInfo(tor); tr_completion* cp = (tr_completion*)ccp; /* mutable */ if (tr_cpHasAll(ccp)) { size = inf->totalSize; } else { for (tr_piece_index_t p = 0; p < inf->pieceCount; ++p) { uint64_t n = 0; uint64_t const pieceSize = tr_torPieceCountBytes(tor, p); if (!inf->pieces[p].dnd) { n = pieceSize; } else { tr_block_index_t f; tr_block_index_t l; tr_torGetPieceBlockRange(cp->tor, p, &f, &l); n = tr_bitfieldCountRange(&cp->blockBitfield, f, l + 1); n *= cp->tor->blockSize; if (l == cp->tor->blockCount - 1 && tr_bitfieldHas(&cp->blockBitfield, l)) { n -= cp->tor->blockSize - cp->tor->lastBlockSize; } } TR_ASSERT(n <= tr_torPieceCountBytes(tor, p)); size += n; } } TR_ASSERT(size <= inf->totalSize); TR_ASSERT(size >= cp->sizeNow); cp->sizeWhenDoneLazy = size; cp->sizeWhenDoneIsDirty = false; } return ccp->sizeWhenDoneLazy; }
void tr_cpGetAmountDone( const tr_completion * cp, float * tab, int tabCount ) { int i; const bool seed = tr_cpHasAll( cp ); const float interval = cp->tor->info.pieceCount / (float)tabCount; for( i=0; i<tabCount; ++i ) { if( seed ) tab[i] = 1.0f; else { tr_block_index_t f, l; const tr_piece_index_t piece = (tr_piece_index_t)i * interval; tr_torGetPieceBlockRange( cp->tor, piece, &f, &l ); tab[i] = tr_bitfieldCountRange( &cp->blockBitfield, f, l+1 ) / (float)(l+1-f); } } }