static void hap_write_frame_header(HapContext *ctx, uint8_t *dst, int frame_length) { PutByteContext pbc; int i; bytestream2_init_writer(&pbc, dst, frame_length); if (ctx->chunk_count == 1) { /* Write a simple header */ hap_write_section_header(&pbc, HAP_HDR_LONG, frame_length - 8, ctx->chunks[0].compressor | ctx->opt_tex_fmt); } else { /* Write a complex header with Decode Instructions Container */ hap_write_section_header(&pbc, HAP_HDR_LONG, frame_length - 8, HAP_COMP_COMPLEX | ctx->opt_tex_fmt); hap_write_section_header(&pbc, HAP_HDR_SHORT, hap_decode_instructions_length(ctx), HAP_ST_DECODE_INSTRUCTIONS); hap_write_section_header(&pbc, HAP_HDR_SHORT, ctx->chunk_count, HAP_ST_COMPRESSOR_TABLE); for (i = 0; i < ctx->chunk_count; i++) { bytestream2_put_byte(&pbc, ctx->chunks[i].compressor >> 4); } hap_write_section_header(&pbc, HAP_HDR_SHORT, ctx->chunk_count * 4, HAP_ST_SIZE_TABLE); for (i = 0; i < ctx->chunk_count; i++) { bytestream2_put_le32(&pbc, ctx->chunks[i].compressed_size); } } }
static int hap_header_length(HapContext *ctx) { /* Top section header (long version) */ int length = HAP_HDR_LONG; if (ctx->chunk_count > 1) { /* Decode Instructions header (short) + Decode Instructions Container */ length += HAP_HDR_SHORT + hap_decode_instructions_length(ctx); } return length; }
static size_t hap_max_encoded_length(size_t input_bytes, unsigned int texture_format, unsigned int compressor, unsigned int chunk_count) { size_t decode_instructions_length, max_compressed_length; chunk_count = hap_limited_chunk_count_for_frame(input_bytes, texture_format, chunk_count); decode_instructions_length = hap_decode_instructions_length(chunk_count); if (compressor == HapCompressorSnappy) { size_t chunk_size = input_bytes / chunk_count; max_compressed_length = snappy_max_compressed_length(chunk_size) * chunk_count; } else { max_compressed_length = input_bytes; } // top section header + decode instructions section header + decode instructions + compressed data return max_compressed_length + 8U + decode_instructions_length + 4U; }
unsigned int HapEncode(unsigned int count, const void **inputBuffers, unsigned long *inputBuffersBytes, unsigned int *textureFormats, unsigned int *compressors, unsigned int *chunkCounts, void *outputBuffer, unsigned long outputBufferBytes, unsigned long *outputBufferBytesUsed) { size_t top_section_header_length; size_t top_section_length; unsigned long section_length; if (count == 0 || count > 2 // A frame must contain one or two textures || inputBuffers == NULL || inputBuffersBytes == NULL || textureFormats == NULL || compressors == NULL || chunkCounts == NULL || outputBuffer == NULL || outputBufferBytes == 0 || outputBufferBytesUsed == NULL) { return HapResult_Bad_Arguments; } for (int i = 0; i < count; i++) { if (chunkCounts[i] == 0) { return HapResult_Bad_Arguments; } } if (count == 1) { // Encode without the multi-image layout return hap_encode_texture(inputBuffers[0], inputBuffersBytes[0], textureFormats[0], compressors[0], chunkCounts[0], outputBuffer, outputBufferBytes, outputBufferBytesUsed); } else if ((textureFormats[0] != HapTextureFormat_YCoCg_DXT5 && textureFormats[1] != HapTextureFormat_YCoCg_DXT5) && (textureFormats[0] != HapTextureFormat_A_RGTC1 && textureFormats[1] != HapTextureFormat_A_RGTC1)) { /* Permitted combinations: HapTextureFormat_YCoCg_DXT5 + HapTextureFormat_A_RGTC1 */ return HapResult_Bad_Arguments; } else { // Calculate the worst-case size for the top section and choose a header-length based on that top_section_length = 0; for (int i = 0; i < count; i++) { top_section_length += inputBuffersBytes[i] + hap_decode_instructions_length(chunkCounts[i]) + 4; } if (top_section_length > kHapUInt24Max) { top_section_header_length = 8U; } else { top_section_header_length = 4U; } // Encode each texture top_section_length = 0; for (int i = 0; i < count; i++) { void *section = ((uint8_t *)outputBuffer) + top_section_header_length + top_section_length; unsigned int result = hap_encode_texture(inputBuffers[i], inputBuffersBytes[i], textureFormats[i], compressors[i], chunkCounts[i], section, outputBufferBytes - (top_section_header_length + top_section_length), §ion_length); if (result != HapResult_No_Error) { return result; } top_section_length += section_length; } hap_write_section_header(outputBuffer, top_section_header_length, top_section_length, kHapSectionMultipleImages); *outputBufferBytesUsed = top_section_length + top_section_header_length; return HapResult_No_Error; } }
static unsigned int hap_encode_texture(const void *inputBuffer, unsigned long inputBufferBytes, unsigned int textureFormat, unsigned int compressor, unsigned int chunkCount, void *outputBuffer, unsigned long outputBufferBytes, unsigned long *outputBufferBytesUsed) { size_t top_section_header_length; size_t top_section_length; unsigned int storedCompressor; unsigned int storedFormat; /* Check arguments */ if (inputBuffer == NULL || inputBufferBytes == 0 || (textureFormat != HapTextureFormat_RGB_DXT1 && textureFormat != HapTextureFormat_RGBA_DXT5 && textureFormat != HapTextureFormat_YCoCg_DXT5 && textureFormat != HapTextureFormat_A_RGTC1 ) || (compressor != HapCompressorNone && compressor != HapCompressorSnappy ) || outputBuffer == NULL || outputBufferBytesUsed == NULL ) { return HapResult_Bad_Arguments; } else if (outputBufferBytes < hap_max_encoded_length(inputBufferBytes, textureFormat, compressor, chunkCount)) { return HapResult_Buffer_Too_Small; } /* To store frames of length greater than can be expressed in three bytes, we use an eight byte header (the last four bytes are the frame size). We don't know the compressed size until we have performed compression, but we know the worst-case size (the uncompressed size), so choose header-length based on that. A simpler encoder could always use the eight-byte header variation. */ if (inputBufferBytes > kHapUInt24Max) { top_section_header_length = 8U; } else { top_section_header_length = 4U; } if (compressor == HapCompressorSnappy) { /* We attempt to chunk as requested, and if resulting frame is larger than it is uncompressed then store frame uncompressed */ size_t decode_instructions_length; size_t chunk_size, compress_buffer_remaining; uint8_t *second_stage_compressor_table; void *chunk_size_table; char *compressed_data; unsigned int i; chunkCount = hap_limited_chunk_count_for_frame(inputBufferBytes, textureFormat, chunkCount); decode_instructions_length = hap_decode_instructions_length(chunkCount); // Check we have space for the Decode Instructions Container if ((inputBufferBytes + decode_instructions_length + 4) > kHapUInt24Max) { top_section_header_length = 8U; } second_stage_compressor_table = ((uint8_t *)outputBuffer) + top_section_header_length + 4 + 4; chunk_size_table = ((uint8_t *)outputBuffer) + top_section_header_length + 4 + 4 + chunkCount + 4; chunk_size = inputBufferBytes / chunkCount; // write the Decode Instructions section header hap_write_section_header(((uint8_t *)outputBuffer) + top_section_header_length, 4U, decode_instructions_length, kHapSectionDecodeInstructionsContainer); // write the Second Stage Compressor Table section header hap_write_section_header(((uint8_t *)outputBuffer) + top_section_header_length + 4U, 4U, chunkCount, kHapSectionChunkSecondStageCompressorTable); // write the Chunk Size Table section header hap_write_section_header(((uint8_t *)outputBuffer) + top_section_header_length + 4U + 4U + chunkCount, 4U, chunkCount * 4U, kHapSectionChunkSizeTable); compressed_data = (char *)(((uint8_t *)outputBuffer) + top_section_header_length + 4 + decode_instructions_length); compress_buffer_remaining = outputBufferBytes - top_section_header_length - 4 - decode_instructions_length; top_section_length = 4 + decode_instructions_length; for (i = 0; i < chunkCount; i++) { size_t chunk_packed_length = compress_buffer_remaining; const char *chunk_input_start = (const char *)(((uint8_t *)inputBuffer) + (chunk_size * i)); if (compressor == HapCompressorSnappy) { snappy_status result = snappy_compress(chunk_input_start, chunk_size, (char *)compressed_data, &chunk_packed_length); if (result != SNAPPY_OK) { return HapResult_Internal_Error; } } if (compressor == HapCompressorNone || chunk_packed_length >= chunk_size) { // store the chunk uncompressed memcpy(compressed_data, chunk_input_start, chunk_size); chunk_packed_length = chunk_size; second_stage_compressor_table[i] = kHapCompressorNone; } else { // ie we used snappy and saved some space second_stage_compressor_table[i] = kHapCompressorSnappy; } hap_write_4_byte_uint(((uint8_t *)chunk_size_table) + (i * 4), chunk_packed_length); compressed_data += chunk_packed_length; top_section_length += chunk_packed_length; compress_buffer_remaining -= chunk_packed_length; } if (top_section_length < inputBufferBytes + top_section_header_length) { // use the complex storage because snappy compression saved space storedCompressor = kHapCompressorComplex; } else { // Signal to store the frame uncompressed compressor = HapCompressorNone; } } if (compressor == HapCompressorNone) { memcpy(((uint8_t *)outputBuffer) + top_section_header_length, inputBuffer, inputBufferBytes); top_section_length = inputBufferBytes; storedCompressor = kHapCompressorNone; } storedFormat = hap_texture_format_identifier_for_format_constant(textureFormat); hap_write_section_header(outputBuffer, top_section_header_length, top_section_length, hap_4_bit_packed_byte(storedCompressor, storedFormat)); *outputBufferBytesUsed = top_section_length + top_section_header_length; return HapResult_No_Error; }