Ejemplo n.º 1
0
static void
test_To_Array(TestBatch *batch) {
    uint64_t  *source_ints = TestUtils_random_u64s(NULL, 20, 0, 200);
    BitVector *bit_vec = BitVec_new(0);
    I32Array  *array;
    long       num_unique = 0;
    long       i;

    // Unique the random ints.
    Sort_quicksort(source_ints, 20, sizeof(uint64_t),
                   S_compare_u64s, NULL);
    for (i = 0; i < 19; i++) {
        if (source_ints[i] != source_ints[i + 1]) {
            source_ints[num_unique] = source_ints[i];
            num_unique++;
        }
    }

    // Set bits.
    for (i = 0; i < num_unique; i++) {
        BitVec_Set(bit_vec, (uint32_t)source_ints[i]);
    }

    // Create the array and compare it to the source.
    array = BitVec_To_Array(bit_vec);
    for (i = 0; i < num_unique; i++) {
        if (I32Arr_Get(array, i) != (int32_t)source_ints[i]) { break; }
    }
    TEST_INT_EQ(batch, i, num_unique, "To_Array (%ld == %ld)", i,
                num_unique);

    DECREF(array);
    DECREF(bit_vec);
    FREEMEM(source_ints);
}
Ejemplo n.º 2
0
// Create all the spans needed by HeatMap_Flatten_Spans, based on the source
// offsets and lengths... but leave the scores at 0.
static VArray*
S_flattened_but_empty_spans(VArray *spans)
{
    const uint32_t num_spans = VA_Get_Size(spans);
    int32_t *bounds = (int32_t*)MALLOCATE((num_spans * 2) * sizeof(int32_t));

    // Assemble a list of all unique start/end boundaries. 
    for (uint32_t i = 0; i < num_spans; i++) {
        Span *span            = (Span*)VA_Fetch(spans, i);
        bounds[i]             = span->offset;
        bounds[i + num_spans] = span->offset + span->length; 
    }
    Sort_quicksort(bounds, num_spans * 2, sizeof(uint32_t), 
        S_compare_i32, NULL);
    uint32_t num_bounds = 0;
    int32_t  last       = I32_MAX;
    for (uint32_t i = 0; i < num_spans * 2; i++) {
        if (bounds[i] != last) {
            bounds[num_bounds++] = bounds[i];
            last = bounds[i];
        }
    }

    // Create one Span for each zone between two bounds. 
    VArray *flattened = VA_new(num_bounds - 1);
    for (uint32_t i = 0; i < num_bounds - 1; i++) {
        int32_t  start   = bounds[i];
        int32_t  length  = bounds[i + 1] - start;
        VA_Push(flattened, (Obj*)Span_new(start, length, 0.0f));
    }

    FREEMEM(bounds);
    return flattened;
}
Ejemplo n.º 3
0
void
Inversion_Invert_IMP(Inversion *self) {
    InversionIVARS *const ivars = Inversion_IVARS(self);
    Token   **tokens = ivars->tokens;
    Token   **limit  = tokens + ivars->size;
    int32_t   token_pos = 0;

    // Thwart future attempts to append.
    if (ivars->inverted) {
        THROW(ERR, "Inversion has already been inverted");
    }
    ivars->inverted = true;

    // Assign token positions.
    for (; tokens < limit; tokens++) {
        TokenIVARS *const cur_token_ivars = Token_IVARS(*tokens);
        cur_token_ivars->pos = token_pos;
        token_pos = (int32_t)((uint32_t)token_pos
                              + (uint32_t)cur_token_ivars->pos_inc);
        if (token_pos < cur_token_ivars->pos) {
            THROW(ERR, "Token positions out of order: %i32 %i32",
                  cur_token_ivars->pos, token_pos);
        }
    }

    // Sort the tokens lexically, and hand off to cluster counting routine.
    Sort_quicksort(ivars->tokens, ivars->size, sizeof(Token*), Token_compare,
                   NULL);
    S_count_clusters(self, ivars);
}
Ejemplo n.º 4
0
void
VA_Sort_IMP(VArray *self, CFISH_Sort_Compare_t compare, void *context) {
    if (!compare) { compare = S_default_compare; }
    Sort_quicksort(self->elems, self->size, sizeof(void*), compare, context);
}