static void enc_seq_list(const void *list, char **buf) { void *it = sorted_list_iterator(list); uint16_t *seq = NULL; enc_uint16((uint16_t) sorted_list_size(list), buf); seq = sorted_list_next(it); while (seq != NULL) { enc_uint16(*seq, buf); seq = sorted_list_next(it); } sorted_list_free_iterator(it); }
static void enc_part_seq_list(const void *list, char **buf) { void *it = sorted_list_iterator(list); part_seq_t *pseq = NULL; enc_uint16((uint16_t) sorted_list_size(list), buf); pseq = sorted_list_next(it); while (pseq != NULL) { enc_uint16(pseq->part_id, buf); enc_uint48(pseq->seq, buf); pseq = sorted_list_next(it); } sorted_list_free_iterator(it); }
static void enc_part_versions_list(const void *list, char **buf) { void *it = sorted_list_iterator(list); part_version_t *pver = NULL; uint16_t i; enc_uint16((uint16_t) sorted_list_size(list), buf); pver = sorted_list_next(it); while (pver != NULL) { enc_uint16(pver->part_id, buf); enc_uint16(pver->num_failover_log, buf); for (i = 0; i < pver->num_failover_log; ++i) { memcpy(*buf, &(pver->failover_log[i].uuid), 8); *buf += 8; enc_uint64(pver->failover_log[i].seq, buf); } pver = sorted_list_next(it); } sorted_list_free_iterator(it); }
couchstore_error_t encode_view_btree_reduction(const view_btree_reduction_t *reduction, char *buffer, size_t *buffer_size) { char *b = NULL; size_t sz = 0; int i; sz += 5; /* kv_count */ sz += BITMASK_BYTE_SIZE; /* partitions bitmap */ /* reduce values */ for (i = 0; i < reduction->num_values; ++i) { sz += 2; /* size_t */ sz += reduction->reduce_values[i].size; } if (sz > MAX_REDUCTION_SIZE) { return COUCHSTORE_ERROR_REDUCTION_TOO_LARGE; } b = buffer; enc_raw40(reduction->kv_count, &b); memcpy(b, &reduction->partitions_bitmap, BITMASK_BYTE_SIZE); b += BITMASK_BYTE_SIZE; for (i = 0; i < reduction->num_values; ++i) { enc_uint16(reduction->reduce_values[i].size, &b); memcpy(b, reduction->reduce_values[i].buf, reduction->reduce_values[i].size); b += reduction->reduce_values[i].size; } *buffer_size = sz; return COUCHSTORE_SUCCESS; }
couchstore_error_t encode_index_header(const index_header_t *header, char **buffer, size_t *buffer_size) { char *buf = NULL, *b = NULL; size_t sz = 0; uint16_t id_btree_state_size; int i; size_t comp_size; char *comp; snappy_status res; sz += 1; /* version */ sz += 2; /* number of partitions */ sz += 3 * BITMASK_BYTE_SIZE; /* active/passive/cleanup bitmasks */ /* seqs */ sz += 2; sz += sorted_list_size(header->seqs) * (2 + 6); /* id btree state */ sz += 2; if (header->id_btree_state != NULL) { sz += sizeof(raw_btree_root); sz += header->id_btree_state->reduce_value.size; } /* view btree states */ sz += 1; for (i = 0; i < header->num_views; ++i) { sz += 2; if (header->view_states[i] != NULL) { sz += sizeof(raw_btree_root); sz += header->view_states[i]->reduce_value.size; } } /* has_replicas */ sz += 1; /* replicas_on_transfer */ sz += 2; sz += sorted_list_size(header->replicas_on_transfer) * 2; /* pending transition active */ sz += 2; sz += sorted_list_size(header->pending_transition.active) * 2; /* pending transition passive */ sz += 2; sz += sorted_list_size(header->pending_transition.passive) * 2; /* pending transition unindexable */ sz += 2; sz += sorted_list_size(header->pending_transition.unindexable) * 2; /* unindexable seqs */ sz += 2; sz += sorted_list_size(header->unindexable_seqs) * (2 + 6); /* partition versions */ if (header->version >= 2) { sz += size_of_partition_versions(header->part_versions); } b = buf = (char *) malloc(sz); if (buf == NULL) { goto alloc_error; } b[0] = (char) header->version; b += 1; enc_uint16(header->num_partitions, &b); memcpy(b, &header->active_bitmask, BITMASK_BYTE_SIZE); b += BITMASK_BYTE_SIZE; memcpy(b, &header->passive_bitmask, BITMASK_BYTE_SIZE); b += BITMASK_BYTE_SIZE; memcpy(b, &header->cleanup_bitmask, BITMASK_BYTE_SIZE); b += BITMASK_BYTE_SIZE; enc_part_seq_list(header->seqs, &b); if (header->id_btree_state != NULL) { id_btree_state_size = (uint16_t) sizeof(raw_btree_root); id_btree_state_size += (uint16_t) header->id_btree_state->reduce_value.size; } else { id_btree_state_size = 0; } enc_uint16(id_btree_state_size, &b); encode_root(b, header->id_btree_state); b += id_btree_state_size; b[0] = (char) header->num_views; b += 1; for (i = 0; i < header->num_views; ++i) { uint16_t view_state_size = 0; if (header->view_states[i] != NULL) { view_state_size = (uint16_t) sizeof(raw_btree_root); view_state_size += (uint16_t) header->view_states[i]->reduce_value.size; } enc_uint16(view_state_size, &b); encode_root(b, header->view_states[i]); b += view_state_size; } b[0] = (char) (header->has_replica ? 1 : 0); b += 1; enc_seq_list(header->replicas_on_transfer, &b); enc_seq_list(header->pending_transition.active, &b); enc_seq_list(header->pending_transition.passive, &b); enc_seq_list(header->pending_transition.unindexable, &b); enc_part_seq_list(header->unindexable_seqs, &b); if (header->version >= 2) { enc_part_versions_list(header->part_versions, &b); } comp_size = snappy_max_compressed_length(sz); comp = (char *) malloc(16 + comp_size); if (comp == NULL) { goto alloc_error; } res = snappy_compress(buf, sz, comp + 16, &comp_size); if (res != SNAPPY_OK) { /* TODO: a new error for couchstore_error_t */ free(comp); goto alloc_error; } memcpy(comp, header->signature, 16); *buffer = comp; *buffer_size = 16 + comp_size; free(buf); return COUCHSTORE_SUCCESS; alloc_error: free(buf); *buffer = NULL; *buffer_size = 0; return COUCHSTORE_ERROR_ALLOC_FAIL; }