lzma_filter_flags_decode( lzma_filter *filter, lzma_allocator *allocator, const uint8_t *in, size_t *in_pos, size_t in_size) { lzma_vli props_size; lzma_ret ret; // Set the pointer to NULL so the caller can always safely free it. filter->options = NULL; // Filter ID return_if_error(lzma_vli_decode(&filter->id, NULL, in, in_pos, in_size)); if (filter->id >= LZMA_FILTER_RESERVED_START) return LZMA_DATA_ERROR; // Size of Properties return_if_error(lzma_vli_decode(&props_size, NULL, in, in_pos, in_size)); // Filter Properties if (in_size - *in_pos < props_size) return LZMA_DATA_ERROR; ret = lzma_properties_decode( filter, allocator, in + *in_pos, props_size); *in_pos += props_size; return ret; }
lzma_block_header_encode(const lzma_block *block, uint8_t *out) { // Validate everything but filters. if (lzma_block_unpadded_size(block) == 0 || !lzma_vli_is_valid(block->uncompressed_size)) return LZMA_PROG_ERROR; // Indicate the size of the buffer _excluding_ the CRC32 field. const size_t out_size = block->header_size - 4; // Store the Block Header Size. out[0] = out_size / 4; // We write Block Flags in pieces. out[1] = 0x00; size_t out_pos = 2; // Compressed Size if (block->compressed_size != LZMA_VLI_UNKNOWN) { return_if_error(lzma_vli_encode(block->compressed_size, NULL, out, &out_pos, out_size)); out[1] |= 0x40; } // Uncompressed Size if (block->uncompressed_size != LZMA_VLI_UNKNOWN) { return_if_error(lzma_vli_encode(block->uncompressed_size, NULL, out, &out_pos, out_size)); out[1] |= 0x80; } // Filter Flags if (block->filters == NULL || block->filters[0].id == LZMA_VLI_UNKNOWN) return LZMA_PROG_ERROR; size_t filter_count = 0; do { // There can be a maximum of four filters. if (filter_count == LZMA_FILTERS_MAX) return LZMA_PROG_ERROR; return_if_error(lzma_filter_flags_encode( block->filters + filter_count, out, &out_pos, out_size)); } while (block->filters[++filter_count].id != LZMA_VLI_UNKNOWN); out[1] |= filter_count - 1; // Padding memzero(out + out_pos, out_size - out_pos); // CRC32 unaligned_write32le(out + out_size, lzma_crc32(out, out_size, 0)); return LZMA_OK; }
lzma_block_header_size(lzma_block *block) { if (block->version != 0) return LZMA_OPTIONS_ERROR; // Block Header Size + Block Flags + CRC32. uint32_t size = 1 + 1 + 4; // Compressed Size if (block->compressed_size != LZMA_VLI_UNKNOWN) { const uint32_t add = lzma_vli_size(block->compressed_size); if (add == 0 || block->compressed_size == 0) return LZMA_PROG_ERROR; size += add; } // Uncompressed Size if (block->uncompressed_size != LZMA_VLI_UNKNOWN) { const uint32_t add = lzma_vli_size(block->uncompressed_size); if (add == 0) return LZMA_PROG_ERROR; size += add; } // List of Filter Flags if (block->filters == NULL || block->filters[0].id == LZMA_VLI_UNKNOWN) return LZMA_PROG_ERROR; for (size_t i = 0; block->filters[i].id != LZMA_VLI_UNKNOWN; ++i) { // Don't allow too many filters. if (i == LZMA_FILTERS_MAX) return LZMA_PROG_ERROR; uint32_t add; return_if_error(lzma_filter_flags_size(&add, block->filters + i)); size += add; } // Pad to a multiple of four bytes. block->header_size = (size + 3) & ~UINT32_C(3); // NOTE: We don't verify that the encoded size of the Block stays // within limits. This is because it is possible that we are called // with exaggerated Compressed Size (e.g. LZMA_VLI_MAX) to reserve // space for Block Header, and later called again with lower, // real values. return LZMA_OK; }
extern lzma_ret lzma_raw_coder_init(lzma_next_coder *next, lzma_allocator *allocator, const lzma_filter *options, lzma_filter_find coder_find, bool is_encoder) { // Do some basic validation and get the number of filters. size_t count; return_if_error(validate_chain(options, &count)); // Set the filter functions and copy the options pointer. lzma_filter_info filters[LZMA_FILTERS_MAX + 1]; if (is_encoder) { for (size_t i = 0; i < count; ++i) { // The order of the filters is reversed in the // encoder. It allows more efficient handling // of the uncompressed data. const size_t j = count - i - 1; const lzma_filter_coder *const fc = coder_find(options[i].id); if (fc == NULL || fc->init == NULL) return LZMA_OPTIONS_ERROR; filters[j].id = options[i].id; filters[j].init = fc->init; filters[j].options = options[i].options; } } else { for (size_t i = 0; i < count; ++i) { const lzma_filter_coder *const fc = coder_find(options[i].id); if (fc == NULL || fc->init == NULL) return LZMA_OPTIONS_ERROR; filters[i].id = options[i].id; filters[i].init = fc->init; filters[i].options = options[i].options; } } // Terminate the array. filters[count].id = LZMA_VLI_UNKNOWN; filters[count].init = NULL; // Initialize the filters. const lzma_ret ret = lzma_next_filter_init(next, allocator, filters); if (ret != LZMA_OK) lzma_next_end(next, allocator); return ret; }
lzma_index_stream_flags(lzma_index *i, const lzma_stream_flags *stream_flags) { if (i == NULL || stream_flags == NULL) return LZMA_PROG_ERROR; // Validate the Stream Flags. return_if_error(lzma_stream_flags_compare( stream_flags, stream_flags)); index_stream *s = (index_stream *)(i->streams.rightmost); s->stream_flags = *stream_flags; return LZMA_OK; }
value value_cons_now2(value *op1, value *op2) { if (op2->type == VALUE_NIL) { op2->type = VALUE_PAR; value_malloc(op2, 2); if (op2->type == VALUE_ERROR) return; op2->core.u_p->head = *op1; op2->core.u_p->tail = value_init_nil(); } else if (op2->type == VALUE_ARY) { value res; res.type = VALUE_ARY; size_t i, length = value_length(*op2); if (resize_p(length+1)) { value_realloc(op1, next_size(length+1) + 1); if (op1->type == VALUE_ERROR) return value_init_error(); } for (i = 1; i < length; ++i) res.core.u_a.a[i+1] = op2->core.u_a.a[i]; res.core.u_a.a[0] = *op1; res.core.u_a.length = op2->core.u_a.length + 1; *op2 = res; } else if (op2->type == VALUE_LST) { value res; res.type = VALUE_LST; value_malloc(&res, 2); if (res.type == VALUE_ERROR) { return value_init_error(); } res.core.u_l[0] = *op1; res.core.u_l[1] = *op2; *op2 = res; } else if (op2->type == VALUE_PAR) { value res; res.type = VALUE_PAR; value_malloc(&res, 2); return_if_error(res); res.core.u_l[0] = *op1; res.core.u_l[1] = *op2; *op2 = res; } else { value_error(1, "Type Error: cons() is undefined where op2 is %ts (nil, array or list expected).", *op2); return value_init_error(); } return value_init_nil(); }
static lzma_ret block_encoder_init(lzma_coder *coder, const lzma_allocator *allocator) { // Prepare the Block options. Even though Block encoder doesn't need // compressed_size, uncompressed_size, and header_size to be // initialized, it is a good idea to do it here, because this way // we catch if someone gave us Filter ID that cannot be used in // Blocks/Streams. coder->block_options.compressed_size = LZMA_VLI_UNKNOWN; coder->block_options.uncompressed_size = LZMA_VLI_UNKNOWN; return_if_error(lzma_block_header_size(&coder->block_options)); // Initialize the actual Block encoder. return lzma_block_encoder_init(&coder->block_encoder, allocator, &coder->block_options); }
extern lzma_ret lzma_outq_init(lzma_outq *outq, const lzma_allocator *allocator, uint64_t buf_size_max, uint32_t threads) { uint64_t bufs_alloc_size; uint32_t bufs_count; // Set bufs_count and bufs_alloc_size. return_if_error(get_options(&bufs_alloc_size, &bufs_count, buf_size_max, threads)); // Allocate memory if needed. if (outq->buf_size_max != buf_size_max || outq->bufs_allocated != bufs_count) { lzma_outq_end(outq, allocator); #if SIZE_MAX < UINT64_MAX if (bufs_alloc_size > SIZE_MAX) return LZMA_MEM_ERROR; #endif outq->bufs = lzma_alloc(bufs_count * sizeof(lzma_outbuf), allocator); outq->bufs_mem = lzma_alloc((size_t)(bufs_alloc_size), allocator); if (outq->bufs == NULL || outq->bufs_mem == NULL) { lzma_outq_end(outq, allocator); return LZMA_MEM_ERROR; } } // Initialize the rest of the main structure. Initialization of // outq->bufs[] is done when they are actually needed. outq->buf_size_max = (size_t)(buf_size_max); outq->bufs_allocated = bufs_count; outq->bufs_pos = 0; outq->bufs_used = 0; outq->read_pos = 0; return LZMA_OK; }
lzma_raw_buffer_encode(const lzma_filter *filters, lzma_allocator *allocator, const uint8_t *in, size_t in_size, uint8_t *out, size_t *out_pos, size_t out_size) { // Validate what isn't validated later in filter_common.c. if ((in == NULL && in_size != 0) || out == NULL || out_pos == NULL || *out_pos > out_size) return LZMA_PROG_ERROR; // Initialize the encoder lzma_next_coder next = LZMA_NEXT_CODER_INIT; return_if_error(lzma_raw_encoder_init(&next, allocator, filters)); // Store the output position so that we can restore it if // something goes wrong. const size_t out_start = *out_pos; // Do the actual encoding and free coder's memory. size_t in_pos = 0; lzma_ret ret = next.code(next.coder, allocator, in, &in_pos, in_size, out, out_pos, out_size, LZMA_FINISH); lzma_next_end(&next, allocator); if (ret == LZMA_STREAM_END) { ret = LZMA_OK; } else { if (ret == LZMA_OK) { // Output buffer was too small. assert(*out_pos == out_size); ret = LZMA_BUF_ERROR; } // Restore the output position. *out_pos = out_start; } return ret; }
static lzma_ret stream_encoder_update(lzma_coder *coder, const lzma_allocator *allocator, const lzma_filter *filters, const lzma_filter *reversed_filters) { if (coder->sequence <= SEQ_BLOCK_INIT) { // There is no incomplete Block waiting to be finished, // thus we can change the whole filter chain. Start by // trying to initialize the Block encoder with the new // chain. This way we detect if the chain is valid. coder->block_encoder_is_initialized = false; coder->block_options.filters = (lzma_filter *)(filters); const lzma_ret ret = block_encoder_init(coder, allocator); coder->block_options.filters = coder->filters; if (ret != LZMA_OK) return ret; coder->block_encoder_is_initialized = true; } else if (coder->sequence <= SEQ_BLOCK_ENCODE) { // We are in the middle of a Block. Try to update only // the filter-specific options. return_if_error(coder->block_encoder.update( coder->block_encoder.coder, allocator, filters, reversed_filters)); } else { // Trying to update the filter chain when we are already // encoding Index or Stream Footer. return LZMA_PROG_ERROR; } // Free the copy of the old chain and make a copy of the new chain. for (size_t i = 0; coder->filters[i].id != LZMA_VLI_UNKNOWN; ++i) lzma_free(coder->filters[i].options, allocator); return lzma_filters_copy(filters, coder->filters, allocator); }
lzma_raw_buffer_decode(const lzma_filter *filters, lzma_allocator *allocator, const uint8_t *in, size_t *in_pos, size_t in_size, uint8_t *out, size_t *out_pos, size_t out_size) { // Validate what isn't validated later in filter_common.c. if (in == NULL || in_pos == NULL || *in_pos > in_size || out == NULL || out_pos == NULL || *out_pos > out_size) return LZMA_PROG_ERROR; // Initialize the decoer. lzma_next_coder next = LZMA_NEXT_CODER_INIT; return_if_error(lzma_raw_decoder_init(&next, allocator, filters)); // Store the positions so that we can restore them if something // goes wrong. const size_t in_start = *in_pos; const size_t out_start = *out_pos; // Do the actual decoding and free decoder's memory. lzma_ret ret = next.code(next.coder, allocator, in, in_pos, in_size, out, out_pos, out_size, LZMA_FINISH); if (ret == LZMA_STREAM_END) { ret = LZMA_OK; } else { if (ret == LZMA_OK) { // Either the input was truncated or the // output buffer was too small. assert(*in_pos == in_size || *out_pos == out_size); if (*in_pos != in_size) { // Since input wasn't consumed completely, // the output buffer became full and is // too small. ret = LZMA_BUF_ERROR; } else if (*out_pos != out_size) { // Since output didn't became full, the input // has to be truncated. ret = LZMA_DATA_ERROR; } else { // All the input was consumed and output // buffer is full. Now we don't immediately // know the reason for the error. Try // decoding one more byte. If it succeeds, // then the output buffer was too small. If // we cannot get a new output byte, the input // is truncated. uint8_t tmp[1]; size_t tmp_pos = 0; (void)next.code(next.coder, allocator, in, in_pos, in_size, tmp, &tmp_pos, 1, LZMA_FINISH); if (tmp_pos == 1) ret = LZMA_BUF_ERROR; else ret = LZMA_DATA_ERROR; } } // Restore the positions. *in_pos = in_start; *out_pos = out_start; } lzma_next_end(&next, allocator); return ret; }
lzma_block_header_decode(lzma_block *block, lzma_allocator *allocator, const uint8_t *in) { const size_t filter_count = (in[1] & 3) + 1; size_t in_size; size_t i; // Start after the Block Header Size and Block Flags fields. size_t in_pos = 2; // NOTE: We consider the header to be corrupt not only when the // CRC32 doesn't match, but also when variable-length integers // are invalid or over 63 bits, or if the header is too small // to contain the claimed information. // Initialize the filter options array. This way the caller can // safely free() the options even if an error occurs in this function. for (i = 0; i <= LZMA_FILTERS_MAX; ++i) { block->filters[i].id = LZMA_VLI_UNKNOWN; block->filters[i].options = NULL; } // Always zero for now. block->version = 0; // Validate Block Header Size and Check type. The caller must have // already set these, so it is a programming error if this test fails. if (lzma_block_header_size_decode(in[0]) != block->header_size || (unsigned int)(block->check) > LZMA_CHECK_ID_MAX) return LZMA_PROG_ERROR; // Exclude the CRC32 field. in_size = block->header_size - 4; // Verify CRC32 if (lzma_crc32(in, in_size, 0) != unaligned_read32le(in + in_size)) return LZMA_DATA_ERROR; // Check for unsupported flags. if (in[1] & 0x3C) return LZMA_OPTIONS_ERROR; // Compressed Size if (in[1] & 0x40) { return_if_error(lzma_vli_decode(&block->compressed_size, NULL, in, &in_pos, in_size)); // Validate Compressed Size. This checks that it isn't zero // and that the total size of the Block is a valid VLI. if (lzma_block_unpadded_size(block) == 0) return LZMA_DATA_ERROR; } else { block->compressed_size = LZMA_VLI_UNKNOWN; } // Uncompressed Size if (in[1] & 0x80) return_if_error(lzma_vli_decode(&block->uncompressed_size, NULL, in, &in_pos, in_size)); else block->uncompressed_size = LZMA_VLI_UNKNOWN; // Filter Flags for (i = 0; i < filter_count; ++i) { const lzma_ret ret = lzma_filter_flags_decode( &block->filters[i], allocator, in, &in_pos, in_size); if (ret != LZMA_OK) { free_properties(block, allocator); return ret; } } // Padding while (in_pos < in_size) { if (in[in_pos++] != 0x00) { free_properties(block, allocator); // Possibly some new field present so use // LZMA_OPTIONS_ERROR instead of LZMA_DATA_ERROR. return LZMA_OPTIONS_ERROR; } } return LZMA_OK; }
static lzma_ret stream_encoder_init(lzma_next_coder *next, const lzma_allocator *allocator, const lzma_filter *filters, lzma_check check) { lzma_next_coder_init(&stream_encoder_init, next, allocator); if (filters == NULL) return LZMA_PROG_ERROR; if (next->coder == NULL) { next->coder = lzma_alloc(sizeof(lzma_coder), allocator); if (next->coder == NULL) return LZMA_MEM_ERROR; next->code = &stream_encode; next->end = &stream_encoder_end; next->update = &stream_encoder_update; next->coder->filters[0].id = LZMA_VLI_UNKNOWN; next->coder->block_encoder = LZMA_NEXT_CODER_INIT; next->coder->index_encoder = LZMA_NEXT_CODER_INIT; next->coder->index = NULL; } // Basic initializations next->coder->sequence = SEQ_STREAM_HEADER; next->coder->block_options.version = 0; next->coder->block_options.check = check; // Initialize the Index lzma_index_end(next->coder->index, allocator); next->coder->index = lzma_index_init(allocator); if (next->coder->index == NULL) return LZMA_MEM_ERROR; // Encode the Stream Header lzma_stream_flags stream_flags = { .version = 0, .check = check, }; return_if_error(lzma_stream_header_encode( &stream_flags, next->coder->buffer)); next->coder->buffer_pos = 0; next->coder->buffer_size = LZMA_STREAM_HEADER_SIZE; // Initialize the Block encoder. This way we detect unsupported // filter chains when initializing the Stream encoder instead of // giving an error after Stream Header has already written out. return stream_encoder_update( next->coder, allocator, filters, NULL); } extern LZMA_API(lzma_ret) lzma_stream_encoder(lzma_stream *strm, const lzma_filter *filters, lzma_check check) { lzma_next_strm_init(stream_encoder_init, strm, filters, check); strm->internal->supported_actions[LZMA_RUN] = true; strm->internal->supported_actions[LZMA_SYNC_FLUSH] = true; strm->internal->supported_actions[LZMA_FULL_FLUSH] = true; strm->internal->supported_actions[LZMA_FINISH] = true; return LZMA_OK; }
lzma_block_buffer_encode(lzma_block *block, lzma_allocator *allocator, const uint8_t *in, size_t in_size, uint8_t *out, size_t *out_pos, size_t out_size) { size_t check_size; lzma_ret ret; size_t i; // Validate the arguments. if (block == NULL || (in == NULL && in_size != 0) || out == NULL || out_pos == NULL || *out_pos > out_size) return LZMA_PROG_ERROR; // The contents of the structure may depend on the version so // check the version before validating the contents of *block. if (block->version != 0) return LZMA_OPTIONS_ERROR; if ((unsigned int)(block->check) > LZMA_CHECK_ID_MAX || block->filters == NULL) return LZMA_PROG_ERROR; if (!lzma_check_is_supported(block->check)) return LZMA_UNSUPPORTED_CHECK; // Size of a Block has to be a multiple of four, so limit the size // here already. This way we don't need to check it again when adding // Block Padding. out_size -= (out_size - *out_pos) & 3; // Get the size of the Check field. check_size = lzma_check_size(block->check); assert(check_size != UINT32_MAX); // Reserve space for the Check field. if (out_size - *out_pos <= check_size) return LZMA_BUF_ERROR; out_size -= check_size; // Do the actual compression. ret = block_encode_normal(block, allocator, in, in_size, out, out_pos, out_size); if (ret != LZMA_OK) { // If the error was something else than output buffer // becoming full, return the error now. if (ret != LZMA_BUF_ERROR) return ret; // The data was uncompressible (at least with the options // given to us) or the output buffer was too small. Use the // uncompressed chunks of LZMA2 to wrap the data into a valid // Block. If we haven't been given enough output space, even // this may fail. return_if_error(block_encode_uncompressed(block, in, in_size, out, out_pos, out_size)); } assert(*out_pos <= out_size); // Block Padding. No buffer overflow here, because we already adjusted // out_size so that (out_size - out_start) is a multiple of four. // Thus, if the buffer is full, the loop body can never run. for (i = (size_t)(block->compressed_size); i & 3; ++i) { assert(*out_pos < out_size); out[(*out_pos)++] = 0x00; } // If there's no Check field, we are done now. if (check_size > 0) { // Calculate the integrity check. We reserved space for // the Check field earlier so we don't need to check for // available output space here. lzma_check_state check; lzma_check_init(&check, block->check); lzma_check_update(&check, block->check, in, in_size); lzma_check_finish(&check, block->check); memcpy(block->raw_check, check.buffer.u8, check_size); memcpy(out + *out_pos, check.buffer.u8, check_size); *out_pos += check_size; } return LZMA_OK; }
static lzma_ret block_encode_normal(lzma_block *block, lzma_allocator *allocator, const uint8_t *in, size_t in_size, uint8_t *out, size_t *out_pos, size_t out_size) { size_t out_start; lzma_next_coder raw_encoder = LZMA_NEXT_CODER_INIT; lzma_ret ret; // Find out the size of the Block Header. block->compressed_size = lzma2_bound(in_size); if (block->compressed_size == 0) return LZMA_DATA_ERROR; block->uncompressed_size = in_size; return_if_error(lzma_block_header_size(block)); // Reserve space for the Block Header and skip it for now. if (out_size - *out_pos <= block->header_size) return LZMA_BUF_ERROR; out_start = *out_pos; *out_pos += block->header_size; // Limit out_size so that we stop encoding if the output would grow // bigger than what uncompressed Block would be. if (out_size - *out_pos > block->compressed_size) out_size = *out_pos + block->compressed_size; // TODO: In many common cases this could be optimized to use // significantly less memory. ret = lzma_raw_encoder_init( &raw_encoder, allocator, block->filters); if (ret == LZMA_OK) { size_t in_pos = 0; ret = raw_encoder.code(raw_encoder.coder, allocator, in, &in_pos, in_size, out, out_pos, out_size, LZMA_FINISH); } // NOTE: This needs to be run even if lzma_raw_encoder_init() failed. lzma_next_end(&raw_encoder, allocator); if (ret == LZMA_STREAM_END) { // Compression was successful. Write the Block Header. block->compressed_size = *out_pos - (out_start + block->header_size); ret = lzma_block_header_encode(block, out + out_start); if (ret != LZMA_OK) ret = LZMA_PROG_ERROR; } else if (ret == LZMA_OK) { // Output buffer became full. ret = LZMA_BUF_ERROR; } // Reset *out_pos if something went wrong. if (ret != LZMA_OK) *out_pos = out_start; return ret; }
lzma_stream_buffer_encode(lzma_filter *filters, lzma_check check, lzma_allocator *allocator, const uint8_t *in, size_t in_size, uint8_t *out, size_t *out_pos_ptr, size_t out_size) { // Sanity checks if (filters == NULL || (unsigned int)(check) > LZMA_CHECK_ID_MAX || (in == NULL && in_size != 0) || out == NULL || out_pos_ptr == NULL || *out_pos_ptr > out_size) return LZMA_PROG_ERROR; if (!lzma_check_is_supported(check)) return LZMA_UNSUPPORTED_CHECK; // Note for the paranoids: Index encoder prevents the Stream from // getting too big and still being accepted with LZMA_OK, and Block // encoder catches if the input is too big. So we don't need to // separately check if the buffers are too big. // Use a local copy. We update *out_pos_ptr only if everything // succeeds. size_t out_pos = *out_pos_ptr; // Check that there's enough space for both Stream Header and // Stream Footer. if (out_size - out_pos <= 2 * LZMA_STREAM_HEADER_SIZE) return LZMA_BUF_ERROR; // Reserve space for Stream Footer so we don't need to check for // available space again before encoding Stream Footer. out_size -= LZMA_STREAM_HEADER_SIZE; // Encode the Stream Header. lzma_stream_flags stream_flags = { .version = 0, .check = check, }; if (lzma_stream_header_encode(&stream_flags, out + out_pos) != LZMA_OK) return LZMA_PROG_ERROR; out_pos += LZMA_STREAM_HEADER_SIZE; // Encode a Block but only if there is at least one byte of input. lzma_block block = { .version = 0, .check = check, .filters = filters, }; if (in_size > 0) return_if_error(lzma_block_buffer_encode(&block, allocator, in, in_size, out, &out_pos, out_size)); // Index { // Create an Index. It will have one Record if there was // at least one byte of input to encode. Otherwise the // Index will be empty. lzma_index *i = lzma_index_init(allocator); if (i == NULL) return LZMA_MEM_ERROR; lzma_ret ret = LZMA_OK; if (in_size > 0) ret = lzma_index_append(i, allocator, lzma_block_unpadded_size(&block), block.uncompressed_size); // If adding the Record was successful, encode the Index // and get its size which will be stored into Stream Footer. if (ret == LZMA_OK) { ret = lzma_index_buffer_encode( i, out, &out_pos, out_size); stream_flags.backward_size = lzma_index_size(i); } lzma_index_end(i, allocator); if (ret != LZMA_OK) return ret; } // Stream Footer. We have already reserved space for this. if (lzma_stream_footer_encode(&stream_flags, out + out_pos) != LZMA_OK) return LZMA_PROG_ERROR; out_pos += LZMA_STREAM_HEADER_SIZE; // Everything went fine, make the new output position available // to the application. *out_pos_ptr = out_pos; return LZMA_OK; }