unsigned int hap_decode_single_texture(const void *texture_section, uint32_t texture_section_length, unsigned int texture_section_type, HapDecodeCallback callback, void *info, void *outputBuffer, unsigned long outputBufferBytes, unsigned long *outputBufferBytesUsed, unsigned int *outputBufferTextureFormat) { int result = HapResult_No_Error; unsigned int textureFormat; unsigned int compressor; size_t bytesUsed = 0; /* One top-level section type describes texture-format and second-stage compression Hap compressor/format constants can be unpacked by reading the top and bottom four bits. */ compressor = hap_top_4_bits(texture_section_type); textureFormat = hap_bottom_4_bits(texture_section_type); /* Pass the texture format out */ *outputBufferTextureFormat = hap_texture_format_constant_for_format_identifier(textureFormat); if (*outputBufferTextureFormat == 0) { return HapResult_Bad_Frame; } if (compressor == kHapCompressorComplex) { /* The top-level section should contain a Decode Instructions Container followed by frame data */ int chunk_count = 0; const void *compressors = NULL; const void *chunk_sizes = NULL; const void *chunk_offsets = NULL; const char *frame_data = NULL; result = hap_decode_header_complex_instructions(texture_section, texture_section_length, &chunk_count, &compressors, &chunk_sizes, &chunk_offsets, &frame_data); if (result != HapResult_No_Error) { return result; } if (chunk_count > 0) { /* Step through the chunks, storing information for their decompression */ HapChunkDecodeInfo *chunk_info = (HapChunkDecodeInfo *)malloc(sizeof(HapChunkDecodeInfo) * chunk_count); size_t running_compressed_chunk_size = 0; size_t running_uncompressed_chunk_size = 0; int i; if (chunk_info == NULL) { return HapResult_Internal_Error; } for (i = 0; i < chunk_count; i++) { chunk_info[i].compressor = *(((uint8_t *)compressors) + i); chunk_info[i].compressed_chunk_size = hap_read_4_byte_uint(((uint8_t *)chunk_sizes) + (i * 4)); if (chunk_offsets) { chunk_info[i].compressed_chunk_data = frame_data + hap_read_4_byte_uint(((uint8_t *)chunk_offsets) + (i * 4)); } else { chunk_info[i].compressed_chunk_data = frame_data + running_compressed_chunk_size; } running_compressed_chunk_size += chunk_info[i].compressed_chunk_size; if (chunk_info[i].compressor == kHapCompressorSnappy) { snappy_status snappy_result = snappy_uncompressed_length(chunk_info[i].compressed_chunk_data, chunk_info[i].compressed_chunk_size, &(chunk_info[i].uncompressed_chunk_size)); if (snappy_result != SNAPPY_OK) { switch (snappy_result) { case SNAPPY_INVALID_INPUT: result = HapResult_Bad_Frame; break; default: result = HapResult_Internal_Error; break; } break; } } else { chunk_info[i].uncompressed_chunk_size = chunk_info[i].compressed_chunk_size; } chunk_info[i].uncompressed_chunk_data = (char *)(((uint8_t *)outputBuffer) + running_uncompressed_chunk_size); running_uncompressed_chunk_size += chunk_info[i].uncompressed_chunk_size; } if (result == HapResult_No_Error && running_uncompressed_chunk_size > outputBufferBytes) { result = HapResult_Buffer_Too_Small; } if (result == HapResult_No_Error) { /* Perform decompression */ bytesUsed = running_uncompressed_chunk_size; if (chunk_count == 1) { /* We don't invoke the callback for one chunk, just decode it directly */ hap_decode_chunk(chunk_info, 0); } else { callback((HapDecodeWorkFunction)hap_decode_chunk, chunk_info, chunk_count, info); } /* Check to see if we encountered any errors and report one of them */ for (i = 0; i < chunk_count; i++) { if (chunk_info[i].result != HapResult_No_Error) { result = chunk_info[i].result; break; } } } free(chunk_info); if (result != HapResult_No_Error) { return result; } } } else if (compressor == kHapCompressorSnappy) { /* Only one section is present containing a single block of snappy-compressed texture data */ snappy_status snappy_result = snappy_uncompressed_length((const char *)texture_section, texture_section_length, &bytesUsed); if (snappy_result != SNAPPY_OK) { return HapResult_Internal_Error; } if (bytesUsed > outputBufferBytes) { return HapResult_Buffer_Too_Small; } snappy_result = snappy_uncompress((const char *)texture_section, texture_section_length, (char *)outputBuffer, &bytesUsed); if (snappy_result != SNAPPY_OK) { return HapResult_Internal_Error; } } else if (compressor == kHapCompressorNone) { /* Only one section is present containing a single block of uncompressed texture data */ bytesUsed = texture_section_length; if (texture_section_length > outputBufferBytes) { return HapResult_Buffer_Too_Small; } memcpy(outputBuffer, texture_section, texture_section_length); } else { return HapResult_Bad_Frame; } /* Fill out the remaining return value */ if (outputBufferBytesUsed != NULL) { *outputBufferBytesUsed = bytesUsed; } return HapResult_No_Error; }
unsigned int HapDecode(const void *inputBuffer, unsigned long inputBufferBytes, HapDecodeCallback callback, void *info, void *outputBuffer, unsigned long outputBufferBytes, unsigned long *outputBufferBytesUsed, unsigned int *outputBufferTextureFormat) { int result = HapResult_No_Error; uint32_t sectionHeaderLength; uint32_t sectionLength; unsigned int sectionType; unsigned int textureFormat; unsigned int compressor; const void *sectionStart; size_t bytesUsed = 0; /* Check arguments */ if (inputBuffer == NULL || outputBuffer == NULL || outputBufferTextureFormat == NULL ) { return HapResult_Bad_Arguments; } /* One top-level section type describes texture-format and second-stage compression */ result = hap_read_section_header(inputBuffer, (uint32_t)inputBufferBytes, §ionHeaderLength, §ionLength, §ionType); if (result != HapResult_No_Error) { return result; } /* Hap compressor/format constants can be unpacked by reading the top and bottom four bits. */ compressor = hap_top_4_bits(sectionType); textureFormat = hap_bottom_4_bits(sectionType); if (compressor == kHapCompressorComplex && callback == NULL) { return HapResult_Bad_Arguments; } /* Pass the texture format out */ *outputBufferTextureFormat = hap_texture_format_constant_for_format_identifier(textureFormat); if (*outputBufferTextureFormat == 0) { return HapResult_Bad_Frame; } sectionStart = ((uint8_t *)inputBuffer) + sectionHeaderLength; if (compressor == kHapCompressorComplex) { /* The top-level section should contain a Decode Instructions Container followed by frame data */ const char *frame_data = NULL; size_t bytes_remaining = 0; int chunk_count = 0; const void *compressors = NULL; const void *chunk_sizes = NULL; const void *chunk_offsets = NULL; result = hap_read_section_header(sectionStart, inputBufferBytes - sectionHeaderLength, §ionHeaderLength, §ionLength, §ionType); if (result == HapResult_No_Error && sectionType != kHapSectionDecodeInstructionsContainer) { result = HapResult_Bad_Frame; } if (result != HapResult_No_Error) { return result; } /* Frame data follows immediately after the Decode Instructions Container */ frame_data = ((const char *)sectionStart) + sectionHeaderLength + sectionLength; /* Step through the sections inside the Decode Instructions Container */ sectionStart = ((uint8_t *)sectionStart) + sectionHeaderLength; bytes_remaining = sectionLength; while (bytes_remaining > 0) { unsigned int section_chunk_count = 0; result = hap_read_section_header(sectionStart, bytes_remaining, §ionHeaderLength, §ionLength, §ionType); if (result != HapResult_No_Error) { return result; } sectionStart = ((uint8_t *)sectionStart) + sectionHeaderLength; switch (sectionType) { case kHapSectionChunkSecondStageCompressorTable: compressors = sectionStart; section_chunk_count = sectionLength; break; case kHapSectionChunkSizeTable: chunk_sizes = sectionStart; section_chunk_count = sectionLength / 4; break; case kHapSectionChunkOffsetTable: chunk_offsets = sectionStart; section_chunk_count = sectionLength / 4; break; default: // Ignore unrecognized sections break; } /* If we calculated a chunk count and already have one, make sure they match */ if (section_chunk_count != 0) { if (chunk_count != 0 && section_chunk_count != chunk_count) { return HapResult_Bad_Frame; } chunk_count = section_chunk_count; } sectionStart = ((uint8_t *)sectionStart) + sectionLength; bytes_remaining -= sectionHeaderLength + sectionLength; } /* The Chunk Second-Stage Compressor Table and Chunk Size Table are required */ if (compressors == NULL || chunk_sizes == NULL) { return HapResult_Bad_Frame; } if (chunk_count > 0) { /* Step through the chunks, storing information for their decompression */ HapChunkDecodeInfo *chunk_info = (HapChunkDecodeInfo *)malloc(sizeof(HapChunkDecodeInfo) * chunk_count); size_t running_compressed_chunk_size = 0; size_t running_uncompressed_chunk_size = 0; int i; if (chunk_info == NULL) { return HapResult_Internal_Error; } for (i = 0; i < chunk_count; i++) { chunk_info[i].compressor = *(((uint8_t *)compressors) + i); chunk_info[i].compressed_chunk_size = hap_read_4_byte_uint(((uint8_t *)chunk_sizes) + (i * 4)); if (chunk_offsets) { chunk_info[i].compressed_chunk_data = frame_data + hap_read_4_byte_uint(((uint8_t *)chunk_offsets) + (i * 4)); } else { chunk_info[i].compressed_chunk_data = frame_data + running_compressed_chunk_size; } running_compressed_chunk_size += chunk_info[i].compressed_chunk_size; if (chunk_info[i].compressor == kHapCompressorSnappy) { snappy_status snappy_result = snappy_uncompressed_length(chunk_info[i].compressed_chunk_data, chunk_info[i].compressed_chunk_size, &(chunk_info[i].uncompressed_chunk_size)); if (snappy_result != SNAPPY_OK) { switch (snappy_result) { case SNAPPY_INVALID_INPUT: result = HapResult_Bad_Frame; break; default: result = HapResult_Internal_Error; break; } break; } } else { chunk_info[i].uncompressed_chunk_size = chunk_info[i].compressed_chunk_size; } chunk_info[i].uncompressed_chunk_data = (char *)(((uint8_t *)outputBuffer) + running_uncompressed_chunk_size); running_uncompressed_chunk_size += chunk_info[i].uncompressed_chunk_size; } if (result == HapResult_No_Error && running_uncompressed_chunk_size > outputBufferBytes) { result = HapResult_Buffer_Too_Small; } if (result == HapResult_No_Error) { /* Perform decompression */ bytesUsed = running_uncompressed_chunk_size; callback((HapDecodeWorkFunction)hap_decode_chunk, chunk_info, chunk_count, info); /* Check to see if we encountered any errors and report one of them */ for (i = 0; i < chunk_count; i++) { if (chunk_info[i].result != HapResult_No_Error) { result = chunk_info[i].result; break; } } } free(chunk_info); if (result != HapResult_No_Error) { return result; } } } else if (compressor == kHapCompressorSnappy) { /* Only one section is present containing a single block of snappy-compressed S3 data */ snappy_status snappy_result = snappy_uncompressed_length((const char *)sectionStart, sectionLength, &bytesUsed); if (snappy_result != SNAPPY_OK) { return HapResult_Internal_Error; } if (bytesUsed > outputBufferBytes) { return HapResult_Buffer_Too_Small; } snappy_result = snappy_uncompress((const char *)sectionStart, sectionLength, (char *)outputBuffer, &bytesUsed); if (snappy_result != SNAPPY_OK) { return HapResult_Internal_Error; } } else if (compressor == kHapCompressorNone) { /* Only one section is present containing a single block of uncompressed S3 data */ bytesUsed = sectionLength; if (sectionLength > outputBufferBytes) { return HapResult_Buffer_Too_Small; } memcpy(outputBuffer, sectionStart, sectionLength); } else { return HapResult_Bad_Frame; } /* Fill out the remaining return value */ if (outputBufferBytesUsed != NULL) { *outputBufferBytesUsed = bytesUsed; } return HapResult_No_Error; }
unsigned int HapGetFrameTextureChunkCount(const void *inputBuffer, unsigned long inputBufferBytes, unsigned int index, int *chunk_count) { unsigned int result = HapResult_No_Error; const void *section; uint32_t section_length; unsigned int section_type; *chunk_count = 0; /* Check arguments */ if (inputBuffer == NULL || index > 1 ) { return HapResult_Bad_Arguments; } /* Locate the section at the given index, which will either be the top-level section in a single texture image, or one of the sections inside a multi-image top-level section. */ result = hap_get_section_at_index(inputBuffer, inputBufferBytes, index, §ion, §ion_length, §ion_type); if (result == HapResult_No_Error) { unsigned int compressor; /* One top-level section type describes texture-format and second-stage compression Hap compressor/format constants can be unpacked by reading the top and bottom four bits. */ compressor = hap_top_4_bits(section_type); if (compressor == kHapCompressorComplex) { /* The top-level section should contain a Decode Instructions Container followed by frame data */ const void *compressors = NULL; const void *chunk_sizes = NULL; const void *chunk_offsets = NULL; const char *frame_data = NULL; result = hap_decode_header_complex_instructions(section, section_length, chunk_count, &compressors, &chunk_sizes, &chunk_offsets, &frame_data); if (result != HapResult_No_Error) { return result; } } else if ((compressor == kHapCompressorSnappy)||(compressor == kHapCompressorNone)) { *chunk_count = 1; } else { return HapResult_Bad_Frame; } } return result; }