static uint64_t compress_latc_block(const uint8_t pixels[]) { // Collect unique pixels int nUniquePixels = 0; uint8_t uniquePixels[kLATCPixelsPerBlock]; for (int i = 0; i < kLATCPixelsPerBlock; ++i) { bool foundPixel = false; for (int j = 0; j < nUniquePixels; ++j) { foundPixel = foundPixel || uniquePixels[j] == pixels[i]; } if (!foundPixel) { uniquePixels[nUniquePixels] = pixels[i]; ++nUniquePixels; } } // If there's only one unique pixel, then our compression is easy. if (1 == nUniquePixels) { return SkEndian_SwapLE64(pixels[0] | (pixels[0] << 8)); // Similarly, if there are only two unique pixels, then our compression is // easy again: place the pixels in the block header, and assign the indices // with one or zero depending on which pixel they belong to. } else if (2 == nUniquePixels) { uint64_t outBlock = 0; for (int i = kLATCPixelsPerBlock - 1; i >= 0; --i) { int idx = 0; if (pixels[i] == uniquePixels[1]) { idx = 1; } outBlock <<= 3; outBlock |= idx; } outBlock <<= 16; outBlock |= (uniquePixels[0] | (uniquePixels[1] << 8)); return SkEndian_SwapLE64(outBlock); } // Count non-maximal pixel values int nonExtremalPixels = 0; for (int i = 0; i < nUniquePixels; ++i) { if (!is_extremal(uniquePixels[i])) { ++nonExtremalPixels; } } // If all the pixels are nonmaximal then compute the palette using // the bounding box of all the pixels. if (nonExtremalPixels == nUniquePixels) { // This is really just for correctness, in all of my tests we // never take this step. We don't lose too much perf here because // most of the processing in this function is worth it for the // 1 == nUniquePixels optimization. return compress_latc_block_bb(pixels); } else { return compress_latc_block_bb_ignore_extremal(pixels); } }
// Compress a block by using the bounding box of the pixels without taking into // account the extremal values. The generated palette will contain extremal values // and fewer points along the line segment to interpolate. static uint64_t compress_latc_block_bb_ignore_extremal(const uint8_t pixels[]) { uint8_t minVal = 255; uint8_t maxVal = 0; for (int i = 0; i < kLATCPixelsPerBlock; ++i) { if (is_extremal(pixels[i])) { continue; } minVal = SkTMin(pixels[i], minVal); maxVal = SkTMax(pixels[i], maxVal); } SkASSERT(!is_extremal(minVal)); SkASSERT(!is_extremal(maxVal)); uint8_t palette[kLATCPaletteSize]; generate_latc_palette(palette, minVal, maxVal); uint64_t indices = 0; for (int i = kLATCPixelsPerBlock - 1; i >= 0; --i) { // Find the best palette index uint8_t idx = 0; if (is_extremal(pixels[i])) { if (0xFF == pixels[i]) { idx = 7; } else if (0 == pixels[i]) { idx = 6; } else { SkFAIL("Pixel is extremal but not really?!"); } } else { uint8_t bestError = abs_diff(pixels[i], palette[0]); for (int j = 1; j < kLATCPaletteSize - 2; ++j) { uint8_t error = abs_diff(pixels[i], palette[j]); if (error < bestError) { bestError = error; idx = j; } } } indices <<= 3; indices |= idx; } return SkEndian_SwapLE64( static_cast<uint64_t>(minVal) | (static_cast<uint64_t>(maxVal) << 8) | (indices << 16)); }
// Compress a block by using the bounding box of the pixels. It is assumed that // there are no extremal pixels in this block otherwise we would have used // compressBlockBBIgnoreExtremal. static uint64_t compress_latc_block_bb(const uint8_t pixels[]) { uint8_t minVal = 255; uint8_t maxVal = 0; for (int i = 0; i < kLATCPixelsPerBlock; ++i) { minVal = SkTMin(pixels[i], minVal); maxVal = SkTMax(pixels[i], maxVal); } SkASSERT(!is_extremal(minVal)); SkASSERT(!is_extremal(maxVal)); uint8_t palette[kLATCPaletteSize]; generate_latc_palette(palette, maxVal, minVal); uint64_t indices = 0; for (int i = kLATCPixelsPerBlock - 1; i >= 0; --i) { // Find the best palette index uint8_t bestError = abs_diff(pixels[i], palette[0]); uint8_t idx = 0; for (int j = 1; j < kLATCPaletteSize; ++j) { uint8_t error = abs_diff(pixels[i], palette[j]); if (error < bestError) { bestError = error; idx = j; } } indices <<= 3; indices |= idx; } return SkEndian_SwapLE64( static_cast<uint64_t>(maxVal) | (static_cast<uint64_t>(minVal) << 8) | (indices << 16)); }
static uint64_t compress_latc_block(uint8_t block[16]) { // Just do a simple min/max but choose which of the // two palettes is better uint8_t maxVal = 0; uint8_t minVal = 255; for (int i = 0; i < 16; ++i) { maxVal = SkMax32(maxVal, block[i]); minVal = SkMin32(minVal, block[i]); } // Generate palettes uint8_t palettes[2][8]; // Straight linear ramp palettes[0][0] = maxVal; palettes[0][1] = minVal; for (int i = 1; i < 7; ++i) { palettes[0][i+1] = ((7-i)*maxVal + i*minVal) / 7; } // Smaller linear ramp with min and max byte values at the end. palettes[1][0] = minVal; palettes[1][1] = maxVal; for (int i = 1; i < 5; ++i) { palettes[1][i+1] = ((5-i)*maxVal + i*minVal) / 5; } palettes[1][6] = 0; palettes[1][7] = 255; // Figure out which of the two is better: // - accumError holds the accumulated error for each pixel from // the associated palette // - indices holds the best indices for each palette in the // bottom 48 (16*3) bits. uint32_t accumError[2] = { 0, 0 }; uint64_t indices[2] = { 0, 0 }; for (int i = 15; i >= 0; --i) { // For each palette: // 1. Retreive the result of this pixel // 2. Store the error in accumError // 3. Store the minimum palette index in indices. for (int p = 0; p < 2; ++p) { uint32_t result = compute_error(block[i], palettes[p]); accumError[p] += (result >> 8); indices[p] <<= 3; indices[p] |= result & 7; } } SkASSERT(indices[0] < (static_cast<uint64_t>(1) << 48)); SkASSERT(indices[1] < (static_cast<uint64_t>(1) << 48)); uint8_t paletteIdx = (accumError[0] > accumError[1]) ? 0 : 1; // Assemble the compressed block. uint64_t result = 0; // Jam the first two palette entries into the bottom 16 bits of // a 64 bit integer. Based on the palette that we chose, one will // be larger than the other and it will select the proper palette. result |= static_cast<uint64_t>(palettes[paletteIdx][0]); result |= static_cast<uint64_t>(palettes[paletteIdx][1]) << 8; // Jam the indices into the top 48 bits. result |= indices[paletteIdx] << 16; // We assume everything is little endian, if it's not then make it so. return SkEndian_SwapLE64(result); }
/** * Return the first 8 bytes of a bytearray, encoded as a little-endian uint64. */ static inline uint64_t first_8_bytes_as_uint64(const uint8_t *bytearray) { return SkEndian_SwapLE64(*(reinterpret_cast<const uint64_t *>(bytearray))); }
/** * Make sure that if we pass in a solid color bitmap that we get the appropriate results */ DEF_TEST(CompressLATC, reporter) { const SkTextureCompressor::Format kLATCFormat = SkTextureCompressor::kLATC_Format; static const int kLATCEncodedBlockSize = 8; SkBitmap bitmap; static const int kWidth = 8; static const int kHeight = 8; SkImageInfo info = SkImageInfo::MakeA8(kWidth, kHeight); bool setInfoSuccess = bitmap.setInfo(info); REPORTER_ASSERT(reporter, setInfoSuccess); bool allocPixelsSuccess = bitmap.allocPixels(info); REPORTER_ASSERT(reporter, allocPixelsSuccess); bitmap.unlockPixels(); int latcDimX, latcDimY; SkTextureCompressor::GetBlockDimensions(kLATCFormat, &latcDimX, &latcDimY); REPORTER_ASSERT(reporter, kWidth % latcDimX == 0); REPORTER_ASSERT(reporter, kHeight % latcDimY == 0); const size_t kSizeToBe = SkTextureCompressor::GetCompressedDataSize(kLATCFormat, kWidth, kHeight); REPORTER_ASSERT(reporter, kSizeToBe == ((kWidth*kHeight*kLATCEncodedBlockSize)/16)); REPORTER_ASSERT(reporter, (kSizeToBe % kLATCEncodedBlockSize) == 0); for (int lum = 0; lum < 256; ++lum) { bitmap.lockPixels(); uint8_t* pixels = reinterpret_cast<uint8_t*>(bitmap.getPixels()); REPORTER_ASSERT(reporter, NULL != pixels); if (NULL == pixels) { bitmap.unlockPixels(); continue; } for (int i = 0; i < kWidth*kHeight; ++i) { pixels[i] = lum; } bitmap.unlockPixels(); SkAutoDataUnref latcData( SkTextureCompressor::CompressBitmapToFormat(bitmap, kLATCFormat)); REPORTER_ASSERT(reporter, NULL != latcData); if (NULL == latcData) { continue; } REPORTER_ASSERT(reporter, kSizeToBe == latcData->size()); // Make sure that it all matches a given block encoding. Since we have // COMPRESS_LATC_FAST defined in SkTextureCompressor_LATC.cpp, we are using // an approximation scheme that optimizes for speed against coverage maps. // That means that each palette in the encoded block is exactly the same, // and that the three bits saved per pixel are computed from the top three // bits of the luminance value. const uint64_t kIndexEncodingMap[8] = { 1, 7, 6, 5, 4, 3, 2, 0 }; const uint64_t kIndex = kIndexEncodingMap[lum >> 5]; const uint64_t kConstColorEncoding = SkEndian_SwapLE64( 255 | (kIndex << 16) | (kIndex << 19) | (kIndex << 22) | (kIndex << 25) | (kIndex << 28) | (kIndex << 31) | (kIndex << 34) | (kIndex << 37) | (kIndex << 40) | (kIndex << 43) | (kIndex << 46) | (kIndex << 49) | (kIndex << 52) | (kIndex << 55) | (kIndex << 58) | (kIndex << 61)); const uint64_t* blockPtr = reinterpret_cast<const uint64_t*>(latcData->data()); for (size_t i = 0; i < (kSizeToBe/8); ++i) { REPORTER_ASSERT(reporter, blockPtr[i] == kConstColorEncoding); } } }