couchstore_error_t decode_index_header(const char *bytes, size_t len, index_header_t **header) { index_header_t *h = NULL; char *b = NULL, *uncomp = NULL; uint16_t num_seqs, i, j, sz, num_part_versions; size_t uncompLen; /* First 16 bytes are md5 checksum (group signature). */ if (len <= 16) { return COUCHSTORE_ERROR_CORRUPT; } if (snappy_uncompressed_length(bytes + 16, len, &uncompLen) != SNAPPY_OK) { return COUCHSTORE_ERROR_CORRUPT; } b = uncomp = (char *) malloc(uncompLen); if (b == NULL) { return COUCHSTORE_ERROR_ALLOC_FAIL; } if (snappy_uncompress(bytes + 16, len - 16, b, &uncompLen) != SNAPPY_OK) { goto alloc_error; } h = (index_header_t *) malloc(sizeof(index_header_t)); if (h == NULL) { goto alloc_error; } h->seqs = NULL; h->id_btree_state = NULL; h->view_states = NULL; h->replicas_on_transfer = NULL; h->pending_transition.active = NULL; h->pending_transition.passive = NULL; h->pending_transition.unindexable = NULL; h->unindexable_seqs = NULL; memcpy(h->signature, bytes, 16); h->version = (uint8_t) b[0]; b += 1; h->num_partitions = dec_uint16(b); b += 2; memcpy(&h->active_bitmask, b, BITMASK_BYTE_SIZE); b += BITMASK_BYTE_SIZE; memcpy(&h->passive_bitmask, b, BITMASK_BYTE_SIZE); b += BITMASK_BYTE_SIZE; memcpy(&h->cleanup_bitmask, b, BITMASK_BYTE_SIZE); b += BITMASK_BYTE_SIZE; num_seqs = dec_uint16(b); b += 2; h->seqs = sorted_list_create(part_seq_cmp); if (h->seqs == NULL) { goto alloc_error; } for (i = 0; i < num_seqs; ++i) { part_seq_t pseq; pseq.part_id = dec_uint16(b); b += 2; pseq.seq = dec_uint48(b); b += 6; if (sorted_list_add(h->seqs, &pseq, sizeof(pseq)) != 0) { goto alloc_error; } } sz = dec_uint16(b); b += 2; h->id_btree_state = read_root((void *) b, (int) sz); b += sz; h->num_views = (uint8_t) b[0]; b += 1; h->view_states = (node_pointer **) malloc(sizeof(node_pointer *) * h->num_views); if (h->view_states == NULL) { goto alloc_error; } for (i = 0; i < (uint16_t) h->num_views; ++i) { sz = dec_uint16(b); b += 2; h->view_states[i] = read_root((void *) b, (int) sz); b += sz; } h->has_replica = b[0] == 0 ? 0 : 1; b += 1; sz = dec_uint16(b); b += 2; h->replicas_on_transfer = sorted_list_create(part_id_cmp); if (h->replicas_on_transfer == NULL) { goto alloc_error; } for (i = 0; i < sz; ++i) { uint16_t part_id = dec_uint16(b); b += 2; if (sorted_list_add(h->replicas_on_transfer, &part_id, sizeof(part_id)) != 0) { goto alloc_error; } } sz = dec_uint16(b); b += 2; h->pending_transition.active = sorted_list_create(part_id_cmp); if (h->pending_transition.active == NULL) { goto alloc_error; } for (i = 0; i < sz; ++i) { uint16_t part_id = dec_uint16(b); b += 2; if (sorted_list_add(h->pending_transition.active, &part_id, sizeof(part_id)) != 0) { goto alloc_error; } } sz = dec_uint16(b); b += 2; h->pending_transition.passive = sorted_list_create(part_id_cmp); if (h->pending_transition.passive == NULL) { goto alloc_error; } for (i = 0; i < sz; ++i) { uint16_t part_id = dec_uint16(b); b += 2; if (sorted_list_add(h->pending_transition.passive, &part_id, sizeof(part_id)) != 0) { goto alloc_error; } } sz = dec_uint16(b); b += 2; h->pending_transition.unindexable = sorted_list_create(part_id_cmp); if (h->pending_transition.unindexable == NULL) { goto alloc_error; } for (i = 0; i < sz; ++i) { uint16_t part_id = dec_uint16(b); b += 2; if (sorted_list_add(h->pending_transition.unindexable, &part_id, sizeof(part_id)) != 0) { goto alloc_error; } } num_seqs = dec_uint16(b); b += 2; h->unindexable_seqs = sorted_list_create(part_seq_cmp); if (h->unindexable_seqs == NULL) { goto alloc_error; } for (i = 0; i < num_seqs; ++i) { part_seq_t pseq; pseq.part_id = dec_uint16(b); b += 2; pseq.seq = dec_uint48(b); b += 6; if (sorted_list_add(h->unindexable_seqs, &pseq, sizeof(pseq)) != 0) { goto alloc_error; } } if (h->version >= 2) { num_part_versions = dec_uint16(b); b += 2; h->part_versions = sorted_list_create(part_versions_cmp); if (h->part_versions == NULL) { goto alloc_error; } for (i = 0; i < num_part_versions; ++i) { part_version_t pver; pver.part_id = dec_uint16(b); b += 2; pver.num_failover_log = dec_uint16(b); b += 2; pver.failover_log = (failover_log_t *) malloc( sizeof(failover_log_t) * pver.num_failover_log); if (pver.failover_log == NULL) { goto alloc_error; } for (j = 0; j < pver.num_failover_log; ++j) { memcpy(&pver.failover_log[j].uuid, b, 8); b += 8; pver.failover_log[j].seq = dec_uint64(b); b += 8; } if (sorted_list_add(h->part_versions, &pver, sizeof(pver)) != 0) { free(pver.failover_log); goto alloc_error; } } } free(uncomp); *header = h; return COUCHSTORE_SUCCESS; alloc_error: free_index_header(h); free(uncomp); return COUCHSTORE_ERROR_ALLOC_FAIL; }
couchstore_error_t decode_view_btree_reduction(const char *bytes, size_t len, view_btree_reduction_t **reduction) { view_btree_reduction_t *r = NULL; uint8_t i, j; uint16_t sz; const char *bs; size_t length; r = (view_btree_reduction_t *) malloc(sizeof(view_btree_reduction_t)); if (r == NULL) { goto alloc_error; } assert(len >= 5); r->kv_count = dec_uint40(bytes); bytes += 5; len -= 5; assert(len >= BITMASK_BYTE_SIZE); memcpy(&r->partitions_bitmap, bytes, BITMASK_BYTE_SIZE); bytes += BITMASK_BYTE_SIZE; len -= BITMASK_BYTE_SIZE; bs = bytes; length = len; r->num_values = 0; while (len > 0) { assert(len >= 2); sz = dec_uint16(bs); bs += 2; len -= 2; assert(len >= sz); bs += sz; len -= sz; r->num_values++; } if (len > 0) { free_view_btree_reduction(r); return COUCHSTORE_ERROR_CORRUPT; } if (r->num_values > 0) { r->reduce_values = (sized_buf *) malloc(r->num_values * sizeof(sized_buf)); if (r->reduce_values == NULL) { goto alloc_error; } } else { r->reduce_values = NULL; } for (j = 0; j< r->num_values; ++j) { r->reduce_values[j].buf = NULL; } i = 0; len = length; while (len > 0) { sz = dec_uint16(bytes); bytes += 2; len -= 2; r->reduce_values[i].size = sz; r->reduce_values[i].buf = (char *) malloc(sz); if (r->reduce_values[i].buf == NULL) { goto alloc_error; } memcpy(r->reduce_values[i].buf, bytes, sz); bytes += sz; len -= sz; i++; } *reduction = r; return COUCHSTORE_SUCCESS; alloc_error: free_view_btree_reduction(r); return COUCHSTORE_ERROR_ALLOC_FAIL; }