Example #1
0
static void enc_seq_list(const void *list, char **buf)
{
    void *it = sorted_list_iterator(list);
    uint16_t *seq = NULL;

    enc_uint16((uint16_t) sorted_list_size(list), buf);
    seq = sorted_list_next(it);
    while (seq != NULL) {
        enc_uint16(*seq, buf);
        seq = sorted_list_next(it);
    }
    sorted_list_free_iterator(it);
}
Example #2
0
static void enc_part_seq_list(const void *list, char **buf)
{
    void *it = sorted_list_iterator(list);
    part_seq_t *pseq = NULL;

    enc_uint16((uint16_t) sorted_list_size(list), buf);
    pseq = sorted_list_next(it);
    while (pseq != NULL) {
        enc_uint16(pseq->part_id, buf);
        enc_uint48(pseq->seq, buf);
        pseq = sorted_list_next(it);
    }
    sorted_list_free_iterator(it);
}
Example #3
0
static void enc_part_versions_list(const void *list, char **buf)
{
    void *it = sorted_list_iterator(list);
    part_version_t *pver = NULL;
    uint16_t i;

    enc_uint16((uint16_t) sorted_list_size(list), buf);
    pver = sorted_list_next(it);
    while (pver != NULL) {
        enc_uint16(pver->part_id, buf);
        enc_uint16(pver->num_failover_log, buf);
        for (i = 0; i < pver->num_failover_log; ++i) {
            memcpy(*buf, &(pver->failover_log[i].uuid), 8);
            *buf += 8;
            enc_uint64(pver->failover_log[i].seq, buf);
        }
        pver = sorted_list_next(it);
    }
    sorted_list_free_iterator(it);
}
Example #4
0
couchstore_error_t encode_index_header(const index_header_t *header,
                                       char **buffer,
                                       size_t *buffer_size)
{
    char *buf = NULL, *b = NULL;
    size_t sz = 0;
    uint16_t id_btree_state_size;
    int i;
    size_t comp_size;
    char *comp;
    snappy_status res;

    sz += 1;                     /* version */
    sz += 2;                     /* number of partitions */
    sz += 3 * BITMASK_BYTE_SIZE; /* active/passive/cleanup bitmasks */
    /* seqs */
    sz += 2;
    sz += sorted_list_size(header->seqs) * (2 + 6);
    /* id btree state */
    sz += 2;
    if (header->id_btree_state != NULL) {
        sz += sizeof(raw_btree_root);
        sz += header->id_btree_state->reduce_value.size;
    }
    /* view btree states */
    sz += 1;
    for (i = 0; i < header->num_views; ++i) {
        sz += 2;
        if (header->view_states[i] != NULL) {
            sz += sizeof(raw_btree_root);
            sz += header->view_states[i]->reduce_value.size;
        }
    }
    /* has_replicas */
    sz += 1;
    /* replicas_on_transfer */
    sz += 2;
    sz += sorted_list_size(header->replicas_on_transfer) * 2;
    /* pending transition active */
    sz += 2;
    sz += sorted_list_size(header->pending_transition.active) * 2;
    /* pending transition passive */
    sz += 2;
    sz += sorted_list_size(header->pending_transition.passive) * 2;
    /* pending transition unindexable */
    sz += 2;
    sz += sorted_list_size(header->pending_transition.unindexable) * 2;
    /* unindexable seqs */
    sz += 2;
    sz += sorted_list_size(header->unindexable_seqs) * (2 + 6);
    /* partition versions */
    if (header->version >= 2) {
        sz += size_of_partition_versions(header->part_versions);
    }

    b = buf = (char *) malloc(sz);
    if (buf == NULL) {
        goto alloc_error;
    }

    b[0] = (char) header->version;
    b += 1;

    enc_uint16(header->num_partitions, &b);

    memcpy(b, &header->active_bitmask, BITMASK_BYTE_SIZE);
    b += BITMASK_BYTE_SIZE;
    memcpy(b, &header->passive_bitmask, BITMASK_BYTE_SIZE);
    b += BITMASK_BYTE_SIZE;
    memcpy(b, &header->cleanup_bitmask, BITMASK_BYTE_SIZE);
    b += BITMASK_BYTE_SIZE;

    enc_part_seq_list(header->seqs, &b);

    if (header->id_btree_state != NULL) {
        id_btree_state_size = (uint16_t) sizeof(raw_btree_root);
        id_btree_state_size += (uint16_t) header->id_btree_state->reduce_value.size;
    } else {
        id_btree_state_size = 0;
    }
    enc_uint16(id_btree_state_size, &b);

    encode_root(b, header->id_btree_state);
    b += id_btree_state_size;

    b[0] = (char) header->num_views;
    b += 1;
    for (i = 0; i < header->num_views; ++i) {
        uint16_t view_state_size = 0;

        if (header->view_states[i] != NULL) {
            view_state_size = (uint16_t) sizeof(raw_btree_root);
            view_state_size += (uint16_t) header->view_states[i]->reduce_value.size;
        }
        enc_uint16(view_state_size, &b);

        encode_root(b, header->view_states[i]);
        b += view_state_size;
    }

    b[0] = (char) (header->has_replica ? 1 : 0);
    b += 1;

    enc_seq_list(header->replicas_on_transfer, &b);
    enc_seq_list(header->pending_transition.active, &b);
    enc_seq_list(header->pending_transition.passive, &b);
    enc_seq_list(header->pending_transition.unindexable, &b);
    enc_part_seq_list(header->unindexable_seqs, &b);

    if (header->version >= 2) {
        enc_part_versions_list(header->part_versions, &b);
    }

    comp_size = snappy_max_compressed_length(sz);
    comp = (char *) malloc(16 + comp_size);

    if (comp == NULL) {
        goto alloc_error;
    }

    res = snappy_compress(buf, sz, comp + 16, &comp_size);

    if (res != SNAPPY_OK) {
        /* TODO: a new error for couchstore_error_t */
        free(comp);
        goto alloc_error;
    }

    memcpy(comp, header->signature, 16);
    *buffer = comp;
    *buffer_size = 16 + comp_size;
    free(buf);

    return COUCHSTORE_SUCCESS;

 alloc_error:
    free(buf);
    *buffer = NULL;
    *buffer_size = 0;
    return COUCHSTORE_ERROR_ALLOC_FAIL;
}
Example #5
0
static index_header_t *test_index_header_decoding(const char *header_bin, size_t header_bin_size)
{
    uint16_t active[] = { 3,7,19,23,27,31,35,39,43,47,51,55,59,63 };
    uint16_t passive[] = {
        0,1,2,4,5,6,8,9,10,12,13,14,16,17,18,20,21,22,24,25,
        26,28,29,30,32,33,34,36,37,38,40,41,42,44,45,46,48,49,
        50,52,53,54,56,57,60,62
    };
    uint16_t cleanup[] = { 11,15,58,61 };
    uint16_t unindexable[] = { 0,63 };
    uint16_t replicas_on_transfer[] = { 5, 10, 60, 62 };
    uint16_t pending_active[] = { 11,15 };
    uint16_t pending_passive[] = { 58,61 };
    uint16_t pending_unindexable[] = { 15,58 };
    index_header_t *header = NULL;
    bitmap_t expected_active, expected_passive, expected_cleanup;
    unsigned i;

    assert(decode_index_header(header_bin, header_bin_size, &header) == COUCHSTORE_SUCCESS);
    assert(header != NULL);

    assert(header->version == 1);
    assert(memcmp(header->signature, header_bin, 16) == 0);
    assert(header->num_partitions == 64);
    assert(header->num_views == 2);

    memset(&expected_active, 0, sizeof(expected_active));
    for (i = 0; i < (sizeof(active) / sizeof(active[0])); ++i) {
        set_bit(&expected_active, active[i]);
    }
    assert(memcmp(&header->active_bitmask, &expected_active, sizeof(expected_active)) == 0);

    memset(&expected_passive, 0, sizeof(expected_passive));
    for (i = 0; i < (sizeof(passive) / sizeof(passive[0])); ++i) {
        set_bit(&expected_passive, passive[i]);
    }
    assert(memcmp(&header->passive_bitmask, &expected_passive, sizeof(expected_passive)) == 0);

    memset(&expected_cleanup, 0, sizeof(expected_cleanup));
    for (i = 0; i < (sizeof(cleanup) / sizeof(cleanup[0])); ++i) {
        set_bit(&expected_cleanup, cleanup[i]);
    }
    assert(memcmp(&header->cleanup_bitmask, &expected_cleanup, sizeof(expected_cleanup)) == 0);

    assert(sorted_list_size(header->seqs) == 58);
    for (uint16_t i = 0; i < 64; ++i) {

        switch (i) {
            /* unindexable */
        case 0:
        case 63:
            /* cleanup */
        case 11:
        case 15:
        case 58:
        case 61:
            continue;
        default:
            break;
        }

        part_seq_t rs, *pseq;
        rs.part_id = i;

        pseq = (part_seq_t *) sorted_list_get(header->seqs, &rs);
        assert(pseq != NULL);
        assert(pseq->part_id == i);
        assert(pseq->seq == 1221);
    }

    int num_unindexable = sizeof(unindexable) / sizeof(unindexable[0]);
    assert(sorted_list_size(header->unindexable_seqs) == num_unindexable);
    for (int i = 0; i < num_unindexable; ++i) {
        part_seq_t rs, *pseq;
        rs.part_id = unindexable[i];

        pseq = (part_seq_t *) sorted_list_get(header->unindexable_seqs, &rs);
        assert(pseq != NULL);
        assert(pseq->part_id == unindexable[i]);
        assert(pseq->seq == 1221);
    }

    assert(header->id_btree_state->pointer == 1617507);
    assert(header->id_btree_state->subtreesize == 1286028);
    assert(header->id_btree_state->reduce_value.size == 133);
    /* TODO: once view reduction decoding is done, test the exact reduction value. */

    assert(header->view_btree_states[0]->pointer == 2901853);
    assert(header->view_btree_states[0]->subtreesize == 1284202);
    assert(header->view_btree_states[0]->reduce_value.size == 140);
    /* TODO: once view reduction decoding is done, test the exact reduction value. */

    assert(header->view_btree_states[1]->pointer == 4180175);
    assert(header->view_btree_states[1]->subtreesize == 1278451);
    assert(header->view_btree_states[1]->reduce_value.size == 140);
    /* TODO: once view reduction decoding is done, test the exact reduction value. */

    assert(header->has_replica == 1);
    assert(header->replicas_on_transfer != NULL);

    int num_reps = (sizeof(replicas_on_transfer) /
                    sizeof(replicas_on_transfer[0]));

    assert(sorted_list_size(header->replicas_on_transfer) == num_reps);
    for (int i = 0; i < num_reps; ++i) {
        uint16_t *part_id = sorted_list_get(header->replicas_on_transfer,
                                            &replicas_on_transfer[i]);
        assert(part_id != NULL);
        assert(*part_id == replicas_on_transfer[i]);
    }

    int num_pending_active = sizeof(pending_active) / sizeof(pending_active[0]);
    assert(sorted_list_size(header->pending_transition.active) == num_pending_active);
    for (int i = 0; i < num_pending_active; ++i) {
        uint16_t *part_id = sorted_list_get(header->pending_transition.active,
                                            &pending_active[i]);
        assert(part_id != NULL);
        assert(*part_id == pending_active[i]);
    }

    int num_pending_passive = sizeof(pending_passive) / sizeof(pending_passive[0]);
    assert(sorted_list_size(header->pending_transition.passive) == num_pending_passive);
    for (int i = 0; i < num_pending_passive; ++i) {
        uint16_t *part_id = sorted_list_get(header->pending_transition.passive,
                                            &pending_passive[i]);
        assert(part_id != NULL);
        assert(*part_id == pending_passive[i]);
    }

    int num_pending_unindexable = sizeof(pending_unindexable) / sizeof(pending_unindexable[0]);
    assert(sorted_list_size(header->pending_transition.unindexable) == num_pending_unindexable);
    for (int i = 0; i < num_pending_unindexable; ++i) {
        uint16_t *part_id = sorted_list_get(header->pending_transition.unindexable,
                                            &pending_unindexable[i]);
        assert(part_id != NULL);
        assert(*part_id == pending_unindexable[i]);
    }

    return header;
}