Ejemplo n.º 1
0
lzma_block_header_size(lzma_block *block)
{
	if (block->version != 0)
		return LZMA_OPTIONS_ERROR;

	// Block Header Size + Block Flags + CRC32.
	uint32_t size = 1 + 1 + 4;

	// Compressed Size
	if (block->compressed_size != LZMA_VLI_UNKNOWN) {
		const uint32_t add = lzma_vli_size(block->compressed_size);
		if (add == 0 || block->compressed_size == 0)
			return LZMA_PROG_ERROR;

		size += add;
	}

	// Uncompressed Size
	if (block->uncompressed_size != LZMA_VLI_UNKNOWN) {
		const uint32_t add = lzma_vli_size(block->uncompressed_size);
		if (add == 0)
			return LZMA_PROG_ERROR;

		size += add;
	}

	// List of Filter Flags
	if (block->filters == NULL || block->filters[0].id == LZMA_VLI_UNKNOWN)
		return LZMA_PROG_ERROR;

	for (size_t i = 0; block->filters[i].id != LZMA_VLI_UNKNOWN; ++i) {
		// Don't allow too many filters.
		if (i == LZMA_FILTERS_MAX)
			return LZMA_PROG_ERROR;

		uint32_t add;
		return_if_error(lzma_filter_flags_size(&add,
				block->filters + i));

		size += add;
	}

	// Pad to a multiple of four bytes.
	block->header_size = (size + 3) & ~UINT32_C(3);

	// NOTE: We don't verify that the encoded size of the Block stays
	// within limits. This is because it is possible that we are called
	// with exaggerated Compressed Size (e.g. LZMA_VLI_MAX) to reserve
	// space for Block Header, and later called again with lower,
	// real values.

	return LZMA_OK;
}
Ejemplo n.º 2
0
lzma_index_append(lzma_index *i, lzma_allocator *allocator,
		lzma_vli unpadded_size, lzma_vli uncompressed_size)
{
	if (unpadded_size < UNPADDED_SIZE_MIN
			|| unpadded_size > UNPADDED_SIZE_MAX
			|| uncompressed_size > LZMA_VLI_MAX)
		return LZMA_PROG_ERROR;

	// This looks a bit ugly. We want to first validate that the Index
	// and Stream stay in valid limits after adding this Record. After
	// validating, we may need to allocate a new lzma_index_group (it's
	// slightly more correct to validate before allocating, YMMV).
	lzma_ret ret;

	// First update the overall info so we can validate it.
	const lzma_vli index_list_size_add = lzma_vli_size(unpadded_size)
			+ lzma_vli_size(uncompressed_size);

	const lzma_vli total_size = vli_ceil4(unpadded_size);

	i->total_size += total_size;
	i->uncompressed_size += uncompressed_size;
	++i->count;
	i->index_list_size += index_list_size_add;

	if (i->total_size > LZMA_VLI_MAX
			|| i->uncompressed_size > LZMA_VLI_MAX
			|| lzma_index_size(i) > LZMA_BACKWARD_SIZE_MAX
			|| lzma_index_file_size(i) > LZMA_VLI_MAX)
		ret = LZMA_DATA_ERROR; // Would grow past the limits.
	else
		ret = index_append_real(i, allocator, unpadded_size,
				uncompressed_size, false);

	if (ret != LZMA_OK) {
		// Something went wrong. Undo the updates.
		i->total_size -= total_size;
		i->uncompressed_size -= uncompressed_size;
		--i->count;
		i->index_list_size -= index_list_size_add;
	}

	return ret;
}
Ejemplo n.º 3
0
lzma_index_append(lzma_index *i, const lzma_allocator *allocator,
		lzma_vli unpadded_size, lzma_vli uncompressed_size)
{
	// Validate.
	if (i == NULL || unpadded_size < UNPADDED_SIZE_MIN
			|| unpadded_size > UNPADDED_SIZE_MAX
			|| uncompressed_size > LZMA_VLI_MAX)
		return LZMA_PROG_ERROR;

	index_stream *s = (index_stream *)(i->streams.rightmost);
	index_group *g = (index_group *)(s->groups.rightmost);

	const lzma_vli compressed_base = g == NULL ? 0
			: vli_ceil4(g->records[g->last].unpadded_sum);
	const lzma_vli uncompressed_base = g == NULL ? 0
			: g->records[g->last].uncompressed_sum;
	const uint32_t index_list_size_add = lzma_vli_size(unpadded_size)
			+ lzma_vli_size(uncompressed_size);

	// Check that the file size will stay within limits.
	if (index_file_size(s->node.compressed_base,
			compressed_base + unpadded_size, s->record_count + 1,
			s->index_list_size + index_list_size_add,
			s->stream_padding) == LZMA_VLI_UNKNOWN)
		return LZMA_DATA_ERROR;

	// The size of the Index field must not exceed the maximum value
	// that can be stored in the Backward Size field.
	if (index_size(i->record_count + 1,
			i->index_list_size + index_list_size_add)
			> LZMA_BACKWARD_SIZE_MAX)
		return LZMA_DATA_ERROR;

	if (g != NULL && g->last + 1 < g->allocated) {
		// There is space in the last group at least for one Record.
		++g->last;
	} else {
		// We need to allocate a new group.
		g = lzma_alloc(sizeof(index_group)
				+ i->prealloc * sizeof(index_record),
				allocator);
		if (g == NULL)
			return LZMA_MEM_ERROR;

		g->last = 0;
		g->allocated = i->prealloc;

		// Reset prealloc so that if the application happens to
		// add new Records, the allocation size will be sane.
		i->prealloc = INDEX_GROUP_SIZE;

		// Set the start offsets of this group.
		g->node.uncompressed_base = uncompressed_base;
		g->node.compressed_base = compressed_base;
		g->number_base = s->record_count + 1;

		// Add the new group to the Stream.
		index_tree_append(&s->groups, &g->node);
	}

	// Add the new Record to the group.
	g->records[g->last].uncompressed_sum
			= uncompressed_base + uncompressed_size;
	g->records[g->last].unpadded_sum
			= compressed_base + unpadded_size;

	// Update the totals.
	++s->record_count;
	s->index_list_size += index_list_size_add;

	i->total_size += vli_ceil4(unpadded_size);
	i->uncompressed_size += uncompressed_size;
	++i->record_count;
	i->index_list_size += index_list_size_add;

	return LZMA_OK;
}