lzma_block_total_size(const lzma_block *block) { lzma_vli unpadded_size = lzma_block_unpadded_size(block); if (unpadded_size != LZMA_VLI_UNKNOWN) unpadded_size = vli_ceil4(unpadded_size); return unpadded_size; }
/// Set *info from i->current. static void set_info(const lzma_index *i, lzma_index_record *info) { // First copy the cumulative sizes from the current Record of the // current group. info->unpadded_size = i->current.group->unpadded_sums[i->current.record]; info->total_size = vli_ceil4(info->unpadded_size); info->uncompressed_size = i->current.group->uncompressed_sums[ i->current.record]; // Copy the start offsets of this group. info->stream_offset = i->current.stream_offset; info->uncompressed_offset = i->current.uncompressed_offset; // If it's not the first Record in this group, we need to do some // adjustements. if (i->current.record > 0) { // Since the _sums[] are cumulative, we substract the sums of // the previous Record to get the sizes of the current Record, // and add the sums of the previous Record to the offsets. // With unpadded_sums[] we need to take into account that it // uses a bit weird way to do the cumulative summing const lzma_vli total_sum = vli_ceil4(i->current.group->unpadded_sums[ i->current.record - 1]); const lzma_vli uncompressed_sum = i->current.group ->uncompressed_sums[i->current.record - 1]; info->total_size -= total_sum; info->unpadded_size -= total_sum; info->uncompressed_size -= uncompressed_sum; info->stream_offset += total_sum; info->uncompressed_offset += uncompressed_sum; } return; }
/// Go forward to the next group. static void next_group(lzma_index *i) { assert(i->current.group->next != NULL); // Update the offsets first. i->current.stream_offset += vli_ceil4(i->current.group->unpadded_sums[ i->current.group->last]); i->current.uncompressed_offset += i->current.group ->uncompressed_sums[i->current.group->last]; // Then go to the next group. i->current.record = 0; i->current.group = i->current.group->next; return; }
/// Go backward to the previous group. static void previous_group(lzma_index *i) { assert(i->current.group->prev != NULL); // Go to the previous group first. i->current.group = i->current.group->prev; i->current.record = i->current.group->last; // Then update the offsets. i->current.stream_offset -= vli_ceil4(i->current.group->unpadded_sums[ i->current.group->last]); i->current.uncompressed_offset -= i->current.group->uncompressed_sums[ i->current.group->last]; return; }
lzma_index_append(lzma_index *i, lzma_allocator *allocator, lzma_vli unpadded_size, lzma_vli uncompressed_size) { if (unpadded_size < UNPADDED_SIZE_MIN || unpadded_size > UNPADDED_SIZE_MAX || uncompressed_size > LZMA_VLI_MAX) return LZMA_PROG_ERROR; // This looks a bit ugly. We want to first validate that the Index // and Stream stay in valid limits after adding this Record. After // validating, we may need to allocate a new lzma_index_group (it's // slightly more correct to validate before allocating, YMMV). lzma_ret ret; // First update the overall info so we can validate it. const lzma_vli index_list_size_add = lzma_vli_size(unpadded_size) + lzma_vli_size(uncompressed_size); const lzma_vli total_size = vli_ceil4(unpadded_size); i->total_size += total_size; i->uncompressed_size += uncompressed_size; ++i->count; i->index_list_size += index_list_size_add; if (i->total_size > LZMA_VLI_MAX || i->uncompressed_size > LZMA_VLI_MAX || lzma_index_size(i) > LZMA_BACKWARD_SIZE_MAX || lzma_index_file_size(i) > LZMA_VLI_MAX) ret = LZMA_DATA_ERROR; // Would grow past the limits. else ret = index_append_real(i, allocator, unpadded_size, uncompressed_size, false); if (ret != LZMA_OK) { // Something went wrong. Undo the updates. i->total_size -= total_size; i->uncompressed_size -= uncompressed_size; --i->count; i->index_list_size -= index_list_size_add; } return ret; }
/// Appends a new Record to the Index. If needed, this allocates a new /// Record group. static lzma_ret index_append_real(lzma_index *i, lzma_allocator *allocator, lzma_vli unpadded_size, lzma_vli uncompressed_size, bool is_padding) { // Add the new record. if (i->tail == NULL || i->tail->last == INDEX_GROUP_SIZE - 1) { // Allocate a new group. lzma_index_group *g = lzma_alloc(sizeof(lzma_index_group), allocator); if (g == NULL) return LZMA_MEM_ERROR; // Initialize the group and set its first record. g->prev = i->tail; g->next = NULL; g->last = 0; g->unpadded_sums[0] = unpadded_size; g->uncompressed_sums[0] = uncompressed_size; g->paddings[0] = is_padding; // If this is the first group, make it the head. if (i->head == NULL) i->head = g; else i->tail->next = g; // Make it the new tail. i->tail = g; } else { // i->tail has space left for at least one record. i->tail->unpadded_sums[i->tail->last + 1] = unpadded_size + vli_ceil4( i->tail->unpadded_sums[i->tail->last]); i->tail->uncompressed_sums[i->tail->last + 1] = i->tail->uncompressed_sums[i->tail->last] + uncompressed_size; i->tail->paddings[i->tail->last + 1] = is_padding; ++i->tail->last; } return LZMA_OK; }
static lzma_vli index_file_size(lzma_vli compressed_base, lzma_vli unpadded_sum, lzma_vli record_count, lzma_vli index_list_size, lzma_vli stream_padding) { // Earlier Streams and Stream Paddings + Stream Header // + Blocks + Index + Stream Footer + Stream Padding // // This might go over LZMA_VLI_MAX due to too big unpadded_sum // when this function is used in lzma_index_append(). lzma_vli file_size = compressed_base + 2 * LZMA_STREAM_HEADER_SIZE + stream_padding + vli_ceil4(unpadded_sum); if (file_size > LZMA_VLI_MAX) return LZMA_VLI_UNKNOWN; // The same applies here. file_size += index_size(record_count, index_list_size); if (file_size > LZMA_VLI_MAX) return LZMA_VLI_UNKNOWN; return file_size; }
lzma_index_append(lzma_index *i, const lzma_allocator *allocator, lzma_vli unpadded_size, lzma_vli uncompressed_size) { // Validate. if (i == NULL || unpadded_size < UNPADDED_SIZE_MIN || unpadded_size > UNPADDED_SIZE_MAX || uncompressed_size > LZMA_VLI_MAX) return LZMA_PROG_ERROR; index_stream *s = (index_stream *)(i->streams.rightmost); index_group *g = (index_group *)(s->groups.rightmost); const lzma_vli compressed_base = g == NULL ? 0 : vli_ceil4(g->records[g->last].unpadded_sum); const lzma_vli uncompressed_base = g == NULL ? 0 : g->records[g->last].uncompressed_sum; const uint32_t index_list_size_add = lzma_vli_size(unpadded_size) + lzma_vli_size(uncompressed_size); // Check that the file size will stay within limits. if (index_file_size(s->node.compressed_base, compressed_base + unpadded_size, s->record_count + 1, s->index_list_size + index_list_size_add, s->stream_padding) == LZMA_VLI_UNKNOWN) return LZMA_DATA_ERROR; // The size of the Index field must not exceed the maximum value // that can be stored in the Backward Size field. if (index_size(i->record_count + 1, i->index_list_size + index_list_size_add) > LZMA_BACKWARD_SIZE_MAX) return LZMA_DATA_ERROR; if (g != NULL && g->last + 1 < g->allocated) { // There is space in the last group at least for one Record. ++g->last; } else { // We need to allocate a new group. g = lzma_alloc(sizeof(index_group) + i->prealloc * sizeof(index_record), allocator); if (g == NULL) return LZMA_MEM_ERROR; g->last = 0; g->allocated = i->prealloc; // Reset prealloc so that if the application happens to // add new Records, the allocation size will be sane. i->prealloc = INDEX_GROUP_SIZE; // Set the start offsets of this group. g->node.uncompressed_base = uncompressed_base; g->node.compressed_base = compressed_base; g->number_base = s->record_count + 1; // Add the new group to the Stream. index_tree_append(&s->groups, &g->node); } // Add the new Record to the group. g->records[g->last].uncompressed_sum = uncompressed_base + uncompressed_size; g->records[g->last].unpadded_sum = compressed_base + unpadded_size; // Update the totals. ++s->record_count; s->index_list_size += index_list_size_add; i->total_size += vli_ceil4(unpadded_size); i->uncompressed_size += uncompressed_size; ++i->record_count; i->index_list_size += index_list_size_add; return LZMA_OK; }