void setUpInterlaceBuffer(int height) { fPng_rowbytes = png_get_rowbytes(this->png_ptr(), this->info_ptr()); fInterlaceBuffer.reset(fPng_rowbytes * height); fInterlacedComplete = false; }
SkCodec::Result SkPngCodec::onGetPixels(const SkImageInfo& requestedInfo, void* dst, size_t dstRowBytes, const Options& options, SkPMColor ctable[], int* ctableCount, int* rowsDecoded) { if (!conversion_possible(requestedInfo, this->getInfo())) { return kInvalidConversion; } if (options.fSubset) { // Subsets are not supported. return kUnimplemented; } // Note that ctable and ctableCount may be modified if there is a color table const Result result = this->initializeSwizzler(requestedInfo, options, ctable, ctableCount); if (result != kSuccess) { return result; } const int width = requestedInfo.width(); const int height = requestedInfo.height(); const int bpp = bytes_per_pixel(this->getEncodedInfo().bitsPerPixel()); const size_t srcRowBytes = width * bpp; // FIXME: Could we use the return value of setjmp to specify the type of // error? int row = 0; // This must be declared above the call to setjmp to avoid memory leaks on incomplete images. SkAutoTMalloc<uint8_t> storage; if (setjmp(png_jmpbuf(fPng_ptr))) { // Assume that any error that occurs while reading rows is caused by an incomplete input. if (fNumberPasses > 1) { // FIXME (msarett): Handle incomplete interlaced pngs. return (row == height) ? kSuccess : kInvalidInput; } // FIXME: We do a poor job on incomplete pngs compared to other decoders (ex: Chromium, // Ubuntu Image Viewer). This is because we use the default buffer size in libpng (8192 // bytes), and if we can't fill the buffer, we immediately fail. // For example, if we try to read 8192 bytes, and the image (incorrectly) only contains // half that, which may have been enough to contain a non-zero number of lines, we fail // when we could have decoded a few more lines and then failed. // The read function that we provide for libpng has no way of indicating that we have // made a partial read. // Making our buffer size smaller improves our incomplete decodes, but what impact does // it have on regular decode performance? Should we investigate using a different API // instead of png_read_row? Chromium uses png_process_data. *rowsDecoded = row; return (row == height) ? kSuccess : kIncompleteInput; } // FIXME: We could split these out based on subclass. void* dstRow = dst; if (fNumberPasses > 1) { storage.reset(height * srcRowBytes); uint8_t* const base = storage.get(); for (int i = 0; i < fNumberPasses; i++) { uint8_t* srcRow = base; for (int y = 0; y < height; y++) { png_read_row(fPng_ptr, srcRow, nullptr); srcRow += srcRowBytes; } } // Now swizzle it. uint8_t* srcRow = base; for (; row < height; row++) { fSwizzler->swizzle(dstRow, srcRow); dstRow = SkTAddOffset<void>(dstRow, dstRowBytes); srcRow += srcRowBytes; } } else { storage.reset(srcRowBytes); uint8_t* srcRow = storage.get(); for (; row < height; row++) { png_read_row(fPng_ptr, srcRow, nullptr); fSwizzler->swizzle(dstRow, srcRow); dstRow = SkTAddOffset<void>(dstRow, dstRowBytes); } } // read rest of file, and get additional comment and time chunks in info_ptr png_read_end(fPng_ptr, fInfo_ptr); return kSuccess; }