Пример #1
0
/* opens blob containing given row id
   if blob_ref contains given id already - do nothing
   otherwise - close current blob and open new one
*/
static rc_t open_blob_for_current_id (
    int64_t id,
    VCursor const* cursor_ref, VBlob const** blob_ref, uint32_t col_idx,
    char* error_buf,
    size_t error_buf_size
    )
{
    rc_t rc = 0;
    if (*blob_ref == NULL)
    {
        rc = VCursorGetBlobDirect ( cursor_ref, blob_ref, id, col_idx);
        if ( rc != 0)
        {
            rc_t res = string_printf ( error_buf, error_buf_size, NULL,
                "ERROR: VCursorGetBlobDirect(init) failed with error: 0x%08x (%u) [%R]", rc, rc, rc);
            if (res == rcBuffer || res == rcInsufficient)
                error_buf [ error_buf_size - 1 ] = '\0';

            return rc;
        }
    }
    else
    {
        int64_t start_id;
        uint64_t count;
        rc = VBlobIdRange ( *blob_ref, & start_id, & count );
        if ( rc != 0)
        {
            rc_t res = string_printf ( error_buf, error_buf_size, NULL,
                "ERROR: VBlobIdRange failed with error: 0x%08x (%u) [%R]", rc, rc, rc);
            if (res == rcBuffer || res == rcInsufficient)
                error_buf [ error_buf_size - 1 ] = '\0';

            return rc;
        }

        if (id >= start_id + (int64_t)count || id < start_id)
        {
            VBlobRelease ( *blob_ref );
            *blob_ref = NULL;

            rc = VCursorGetBlobDirect ( cursor_ref, blob_ref, id, col_idx);
            if ( rc != 0)
            {
                rc_t res = string_printf ( error_buf, error_buf_size, NULL,
                    "ERROR: VCursorGetBlobDirect failed with error: 0x%08x (%u) [%R]", rc, rc, rc);
                if (res == rcBuffer || res == rcInsufficient)
                    error_buf [ error_buf_size - 1 ] = '\0';

                return rc;
            }
        }
    }
    return rc;
}
Пример #2
0
static
bool CC run_trigger_prods ( void *item, void *data )
{
    run_trigger_prod_data *pb = data;
    VProduction *prod = item;

    VBlob *blob;
    pb -> rc = VProductionReadBlob ( prod, & blob, pb -> id , pb -> cnt, NULL);
    if ( pb -> rc != 0 )
        return true;
    if ( blob != NULL )
        TRACK_BLOB ( VBlobRelease, blob );
    ( void ) VBlobRelease ( blob );
    return false;
}
Пример #3
0
static void remove_unneeded_alignments (PileupIteratorState* pileup_state, uint64_t ref_pos, char* error_buf, size_t error_buf_size)
{
    /*int64_t max_removed_id = 0;*/
#if CACHE_IMPL_AS_LIST == 1
    Alignment_CacheItem* item = ( Alignment_CacheItem* ) DLListHead ( & pileup_state->cache_alignment.list_alignments );

    for (; item != NULL; )
    {
        uint64_t local_ref_pos = ref_pos - item->seq_start;
        if ( item->start + item->len <= local_ref_pos ) /* not "<" here because local_ref_pos here is the old position that will be incremented right after we exit from this function */
        {
            Alignment_CacheItem* item_cur = item;
            item = (Alignment_CacheItem*) DLNodeNext( & item->node );
#if USE_BLOB_CACHE_FOR_ALIGNMENTS == 1
            if ( item->row_id > max_removed_id )
                max_removed_id = item->row_id;
#endif

            DLListUnlink ( & pileup_state->cache_alignment.list_alignments, & item_cur->node);
            Alignment_CacheItemWhack ( & item_cur->node, NULL );
            -- pileup_state->cache_alignment.size;
        }
        else
            item = (Alignment_CacheItem*) DLNodeNext( & item->node );
    }
#else
#if 0
    /* Improved (?) inplace version */

    /* i - current item in current state cache (cache size can decrease during this algorithm, so as i)
           all cache[j]: j < i are already processed in the cache - i.e. they present in new cache state
       
       size - updated size of cache
       gap_start - beginning index of first consecutive elements found to be removed from the current cache
                 (so it's an semi-open interval [start, i) that needs to be removed
                 from the cache)
    */
    size_t i = 0;
    Alignment_Cache* cache = & pileup_state->cache_alignment;
    size_t size = cache->size;
    size_t gap_start = 0;
    for (; i < size;)
    {
        Alignment_CacheItem const* item = & cache->data [i];
        uint64_t local_ref_pos = ref_pos - item->seq_start;
        if ( item->start + item->len > local_ref_pos ) /* not ">=" here because local_ref_pos here is the old position that will be incremented right after we exit from this function */
        {
            /* check if we have a gap before i and if yes - move memory up */
            if ( gap_start != i )
            {
                memmove ( & cache->data[gap_start], & cache->data[i], (size-i)*sizeof(Alignment_CacheItem) );
                size -= i - gap_start;
                i = gap_start;
            }

            ++i;
            gap_start = i;
        }
        else
        {
            ++i;
        }
    }
    if ( gap_start != i )
        size = gap_start;

    cache->size = size;
#else
    size_t i_src, i_dst;
    Alignment_Cache* cache = & pileup_state->cache_alignment;
    size_t size = cache->size;

    for (i_src = 0; i_src < size; ++ i_src)
    {
        Alignment_CacheItem const* item = & cache->data [i_src];
        uint64_t local_ref_pos = ref_pos - item->seq_start;
        if ( item->start + item->len > local_ref_pos ) /* not ">=" here because local_ref_pos here is the old position that will be incremented right after we exit from this function */
            break;
    }

    for (i_dst = 0; i_src < size; ++ i_src)
    {
        Alignment_CacheItem const* item = & cache->data [i_src];
        uint64_t local_ref_pos = ref_pos - item->seq_start;
        if ( item->start + item->len > local_ref_pos ) /* not ">=" here because local_ref_pos here is the old position that will be incremented right after we exit from this function */
        {
            if (i_dst != i_src)
                cache->data [i_dst] = cache->data [i_src];
            ++ i_dst;
        }
    }

    cache->size = i_dst;

#endif
#endif

#if USE_BLOB_CACHE_FOR_ALIGNMENTS == 2
    if ( max_removed_id > 0 )
    {
        size_t i_src, i_dst;
        BlobItem* blobs = pileup_state->blobs_alignments.blobs;
        size_t size = pileup_state->blobs_alignments.size;

        for (i_src = 0; i_src < size; ++ i_src)
        {
            BlobItem const* item = & blobs [i_src];
            int64_t start_id;
            uint64_t count;
            rc_t rc = VBlobIdRange ( item->blob, & start_id, & count );
            if ( rc != 0 )
            {
                rc_t res = string_printf ( error_buf, error_buf_size, NULL,
                    "ERROR: VBlobIdRange failed with error: 0x%08x (%u) [%R]", rc, rc, rc);
                if (res == rcBuffer || res == rcInsufficient)
                    error_buf [ error_buf_size - 1 ] = '\0';

                return rc;
            }
            if ( start_id + (int64_t)count > max_removed_id )
            {
                /* Current blob contains ids greater than last removed one
                   we don't want to look further because most probably
                   all the following blobs will also contain ids that we will
                   need to keep in the cache, so we exit immediately
                */
                break;
            }
            else
            {
                VBlobRelease ( item->blob );
                
            }
        }

        for (i_dst = 0; i_src < size; ++ i_src)
        {
            Alignment_CacheItem const* item = & cache->data [i_src];
            uint64_t local_ref_pos = ref_pos - item->seq_start;
            if ( item->start + item->len > local_ref_pos ) /* not ">=" here because local_ref_pos here is the old position that will be incremented right after we exit from this function */
            {
                if (i_dst != i_src)
                    cache->data [i_dst] = cache->data [i_src];
                ++ i_dst;
            }
        }

        cache->size = i_dst;
    }
#endif
}