コード例 #1
0
void WEBPImageDecoder::applyColorProfile(const uint8_t* data, size_t size, ImageFrame& buffer)
{
    int width;
    int decodedHeight;
    if (!WebPIDecGetRGB(m_decoder, &decodedHeight, &width, 0, 0))
        return; // See also https://bugs.webkit.org/show_bug.cgi?id=74062
    if (decodedHeight <= 0)
        return;

    if (!m_haveReadProfile) {
        readColorProfile(data, size);
        m_haveReadProfile = true;
    }

    ASSERT(width == scaledSize().width());
    ASSERT(decodedHeight <= scaledSize().height());

    for (int y = m_decodedHeight; y < decodedHeight; ++y) {
        uint8_t* row = reinterpret_cast<uint8_t*>(buffer.getAddr(0, y));
        if (qcms_transform* transform = colorTransform())
            qcms_transform_data_type(transform, row, row, width, QCMS_OUTPUT_RGBX);
        uint8_t* pixel = row;
        for (int x = 0; x < width; ++x, pixel += 4)
            buffer.setRGBA(x, y, pixel[0], pixel[1], pixel[2], pixel[3]);
    }

    m_decodedHeight = decodedHeight;
}
コード例 #2
0
ファイル: tif_webp.c プロジェクト: OSGeo/gdal
static int
TWebPDecode(TIFF* tif, uint8* op, tmsize_t occ, uint16 s)
{
  static const char module[] = "WebPDecode";
  VP8StatusCode status = VP8_STATUS_OK;
  WebPState *sp = DecoderState(tif);
  (void) s;  

  assert(sp != NULL);
  assert(sp->state == LSTATE_INIT_DECODE);
  
  if (occ % sp->sDecBuffer.u.RGBA.stride)
  {
    TIFFErrorExt(tif->tif_clientdata, module,
                 "Fractional scanlines cannot be read");
    return 0;
  }

  status = WebPIAppend(sp->psDecoder, tif->tif_rawcp, tif->tif_rawcc);

  if (status != VP8_STATUS_OK && status != VP8_STATUS_SUSPENDED) {
    if (status == VP8_STATUS_INVALID_PARAM) {
       TIFFErrorExt(tif->tif_clientdata, module,
         "Invalid parameter used.");      
    } else if (status == VP8_STATUS_OUT_OF_MEMORY) {
      TIFFErrorExt(tif->tif_clientdata, module,
        "Out of memory.");         
    } else {
      TIFFErrorExt(tif->tif_clientdata, module,
        "Unrecognized error.");   
    }
    return 0;
  } else {
    int current_y, stride;
    uint8_t* buf;

    /* Returns the RGB/A image decoded so far */
    buf = WebPIDecGetRGB(sp->psDecoder, &current_y, NULL, NULL, &stride);
    
    if ((buf != NULL) &&
        (occ <= stride * (current_y - sp->last_y))) {
      memcpy(op,   
         buf + (sp->last_y * stride),
         occ);

      tif->tif_rawcp += tif->tif_rawcc;
      tif->tif_rawcc = 0;
      sp->last_y += occ / sp->sDecBuffer.u.RGBA.stride;
      return 1;
    } else {
      TIFFErrorExt(tif->tif_clientdata, module, "Unable to decode WebP data."); 
      return 0;
    }
  }
}
コード例 #3
0
void WEBPImageDecoder::applyPostProcessing(size_t frameIndex)
{
    ImageFrame& buffer = m_frameBufferCache[frameIndex];
    int width;
    int decodedHeight;
    if (!WebPIDecGetRGB(m_decoder, &decodedHeight, &width, 0, 0))
        return; // See also https://bugs.webkit.org/show_bug.cgi?id=74062
    if (decodedHeight <= 0)
        return;

    const IntRect& frameRect = buffer.originalFrameRect();
    ASSERT_WITH_SECURITY_IMPLICATION(width == frameRect.width());
    ASSERT_WITH_SECURITY_IMPLICATION(decodedHeight <= frameRect.height());
    const int left = frameRect.x();
    const int top = frameRect.y();

#if USE(QCMSLIB)
    if (qcms_transform* transform = colorTransform()) {
        for (int y = m_decodedHeight; y < decodedHeight; ++y) {
            const int canvasY = top + y;
            uint8_t* row = reinterpret_cast<uint8_t*>(buffer.getAddr(left, canvasY));
            qcms_transform_data_type(transform, row, row, width, QCMS_OUTPUT_RGBX);
            uint8_t* pixel = row;
            for (int x = 0; x < width; ++x, pixel += 4) {
                const int canvasX = left + x;
                buffer.setRGBA(canvasX, canvasY, pixel[0], pixel[1], pixel[2], pixel[3]);
            }
        }
    }
#endif // USE(QCMSLIB)

    // During the decoding of current frame, we may have set some pixels to be transparent (i.e. alpha < 255).
    // However, the value of each of these pixels should have been determined by blending it against the value
    // of that pixel in the previous frame if alpha blend source was 'BlendAtopPreviousFrame'. So, we correct these
    // pixels based on disposal method of the previous frame and the previous frame buffer.
    // FIXME: This could be avoided if libwebp decoder had an API that used the previous required frame
    // to do the alpha-blending by itself.
    if ((m_formatFlags & ANIMATION_FLAG) && frameIndex && buffer.alphaBlendSource() == ImageFrame::BlendAtopPreviousFrame && buffer.requiredPreviousFrameIndex() != kNotFound) {
        ImageFrame& prevBuffer = m_frameBufferCache[frameIndex - 1];
        ASSERT(prevBuffer.status() == ImageFrame::FrameComplete);
        ImageFrame::DisposalMethod prevDisposalMethod = prevBuffer.disposalMethod();
        if (prevDisposalMethod == ImageFrame::DisposeKeep) { // Blend transparent pixels with pixels in previous canvas.
            for (int y = m_decodedHeight; y < decodedHeight; ++y) {
                m_blendFunction(buffer, prevBuffer, top + y, left, width);
            }
        } else if (prevDisposalMethod == ImageFrame::DisposeOverwriteBgcolor) {
            const IntRect& prevRect = prevBuffer.originalFrameRect();
            // We need to blend a transparent pixel with its value just after initFrame() call. That is:
            //   * Blend with fully transparent pixel if it belongs to prevRect <-- This is a no-op.
            //   * Blend with the pixel in the previous canvas otherwise <-- Needs alpha-blending.
            for (int y = m_decodedHeight; y < decodedHeight; ++y) {
                int canvasY = top + y;
                int left1, width1, left2, width2;
                findBlendRangeAtRow(frameRect, prevRect, canvasY, left1, width1, left2, width2);
                if (width1 > 0)
                    m_blendFunction(buffer, prevBuffer, canvasY, left1, width1);
                if (width2 > 0)
                    m_blendFunction(buffer, prevBuffer, canvasY, left2, width2);
            }
        }
    }

    m_decodedHeight = decodedHeight;
    buffer.setPixelsChanged(true);
}
コード例 #4
0
ファイル: WEBPImageDecoder.cpp プロジェクト: mirror/chromium
void WEBPImageDecoder::applyPostProcessing(size_t frameIndex) {
  ImageFrame& buffer = m_frameBufferCache[frameIndex];
  int width;
  int decodedHeight;
  if (!WebPIDecGetRGB(m_decoder, &decodedHeight, &width, 0, 0))
    return;  // See also https://bugs.webkit.org/show_bug.cgi?id=74062
  if (decodedHeight <= 0)
    return;

  const IntRect& frameRect = buffer.originalFrameRect();
  SECURITY_DCHECK(width == frameRect.width());
  SECURITY_DCHECK(decodedHeight <= frameRect.height());
  const int left = frameRect.x();
  const int top = frameRect.y();

  // TODO (msarett):
  // Here we apply the color space transformation to the dst space.
  // It does not really make sense to transform to a gamma-encoded
  // space and then immediately after, perform a linear premultiply
  // and linear blending.  Can we find a way to perform the
  // premultiplication and blending in a linear space?
  SkColorSpaceXform* xform = colorTransform();
  if (xform) {
    const SkColorSpaceXform::ColorFormat srcFormat =
        SkColorSpaceXform::kBGRA_8888_ColorFormat;
    const SkColorSpaceXform::ColorFormat dstFormat =
        SkColorSpaceXform::kRGBA_8888_ColorFormat;
    for (int y = m_decodedHeight; y < decodedHeight; ++y) {
      const int canvasY = top + y;
      uint8_t* row = reinterpret_cast<uint8_t*>(buffer.getAddr(left, canvasY));
      xform->apply(dstFormat, row, srcFormat, row, width,
                   kUnpremul_SkAlphaType);

      uint8_t* pixel = row;
      for (int x = 0; x < width; ++x, pixel += 4) {
        const int canvasX = left + x;
        buffer.setRGBA(canvasX, canvasY, pixel[0], pixel[1], pixel[2],
                       pixel[3]);
      }
    }
  }

  // During the decoding of the current frame, we may have set some pixels to be
  // transparent (i.e. alpha < 255). If the alpha blend source was
  // 'BlendAtopPreviousFrame', the values of these pixels should be determined
  // by blending them against the pixels of the corresponding previous frame.
  // Compute the correct opaque values now.
  // FIXME: This could be avoided if libwebp decoder had an API that used the
  // previous required frame to do the alpha-blending by itself.
  if ((m_formatFlags & ANIMATION_FLAG) && frameIndex &&
      buffer.getAlphaBlendSource() == ImageFrame::BlendAtopPreviousFrame &&
      buffer.requiredPreviousFrameIndex() != kNotFound) {
    ImageFrame& prevBuffer = m_frameBufferCache[frameIndex - 1];
    ASSERT(prevBuffer.getStatus() == ImageFrame::FrameComplete);
    ImageFrame::DisposalMethod prevDisposalMethod =
        prevBuffer.getDisposalMethod();
    if (prevDisposalMethod == ImageFrame::DisposeKeep) {
      // Blend transparent pixels with pixels in previous canvas.
      for (int y = m_decodedHeight; y < decodedHeight; ++y) {
        m_blendFunction(buffer, prevBuffer, top + y, left, width);
      }
    } else if (prevDisposalMethod == ImageFrame::DisposeOverwriteBgcolor) {
      const IntRect& prevRect = prevBuffer.originalFrameRect();
      // We need to blend a transparent pixel with the starting value (from just
      // after the initFrame() call). If the pixel belongs to prevRect, the
      // starting value was fully transparent, so this is a no-op. Otherwise, we
      // need to blend against the pixel from the previous canvas.
      for (int y = m_decodedHeight; y < decodedHeight; ++y) {
        int canvasY = top + y;
        int left1, width1, left2, width2;
        findBlendRangeAtRow(frameRect, prevRect, canvasY, left1, width1, left2,
                            width2);
        if (width1 > 0)
          m_blendFunction(buffer, prevBuffer, canvasY, left1, width1);
        if (width2 > 0)
          m_blendFunction(buffer, prevBuffer, canvasY, left2, width2);
      }
    }
  }

  m_decodedHeight = decodedHeight;
  buffer.setPixelsChanged(true);
}
コード例 #5
0
ファイル: WEBPImageDecoder.cpp プロジェクト: 1833183060/wke
bool WEBPImageDecoder::decode(bool onlySize)
{
    // Minimum number of bytes needed to ensure one can parse size information.
    static const size_t sizeOfHeader = 30;
    // Number of bytes per pixel.
    static const int bytesPerPixel = 3;

    if (failed())
        return false;

    const size_t dataSize = m_data->size();
    if (dataSize < sizeOfHeader)
        return true;

    int width, height;
    const uint8_t* dataBytes = reinterpret_cast<const uint8_t*>(m_data->data());
    if (!WebPGetInfo(dataBytes, dataSize, &width, &height))
        return setFailed();
    if (!ImageDecoder::isSizeAvailable() && !setSize(width, height))
        return setFailed();
    if (onlySize)
        return true;

    bool allDataReceived = isAllDataReceived();
    int stride = width * bytesPerPixel;
    ASSERT(!m_frameBufferCache.isEmpty());
    ImageFrame& buffer = m_frameBufferCache[0];
    if (buffer.status() == ImageFrame::FrameEmpty) {
        ASSERT(width == size().width());
        ASSERT(height == size().height());
        if (!buffer.setSize(width, height))
            return setFailed();
        buffer.setStatus(allDataReceived ? ImageFrame::FrameComplete : ImageFrame::FramePartial);
        // FIXME: We currently hard code false below because libwebp doesn't support alpha yet.
        buffer.setHasAlpha(false);
        buffer.setOriginalFrameRect(IntRect(IntPoint(), size()));
        m_rgbOutput.resize(height * stride);
    }
    int newLastVisibleRow = 0; // Last completed row.
    if (allDataReceived) {
        if (!WebPDecodeRGBInto(dataBytes, dataSize, m_rgbOutput.data(), m_rgbOutput.size(), stride))
            return setFailed();
        newLastVisibleRow = height;
    } else {
        if (!m_decoder) {
            m_decoder = WebPINewRGB(MODE_RGB, m_rgbOutput.data(), m_rgbOutput.size(), stride);
            if (!m_decoder)
                return setFailed();
        }
        const VP8StatusCode status = WebPIUpdate(m_decoder, dataBytes, dataSize);
        if (status != VP8_STATUS_OK && status != VP8_STATUS_SUSPENDED)
            return setFailed();
        if (!WebPIDecGetRGB(m_decoder, &newLastVisibleRow, 0, 0, 0))
            return setFailed();
        ASSERT(newLastVisibleRow >= 0);
        ASSERT(newLastVisibleRow <= height);
    }
    // FIXME: remove this data copy.
    for (int y = m_lastVisibleRow; y < newLastVisibleRow; ++y) {
        const uint8_t* const src = &m_rgbOutput[y * stride];
        for (int x = 0; x < width; ++x)
            buffer.setRGBA(x, y, src[bytesPerPixel * x + 0], src[bytesPerPixel * x + 1], src[bytesPerPixel * x + 2], 0xff);
    }
    m_lastVisibleRow = newLastVisibleRow;
    if (m_lastVisibleRow == height)
         buffer.setStatus(ImageFrame::FrameComplete);
    return m_lastVisibleRow == height;
}
コード例 #6
0
ファイル: nsWEBPDecoder.cpp プロジェクト: AOSC-Dev/Pale-Moon
void
nsWEBPDecoder::WriteInternal(const char *aBuffer, uint32_t aCount)
{
  MOZ_ASSERT(!HasError(), "Shouldn't call WriteInternal after error!");

  const uint8_t* buf = (const uint8_t*)aBuffer;
  VP8StatusCode rv = WebPIAppend(mDecoder, buf, aCount);
  if (rv == VP8_STATUS_OUT_OF_MEMORY) {
    PostDecoderError(NS_ERROR_OUT_OF_MEMORY);
    return;
  } else if (rv == VP8_STATUS_INVALID_PARAM ||
             rv == VP8_STATUS_BITSTREAM_ERROR) {
    PostDataError();
    return;
  } else if (rv == VP8_STATUS_UNSUPPORTED_FEATURE ||
             rv == VP8_STATUS_USER_ABORT) {
    PostDecoderError(NS_ERROR_FAILURE);
    return;
  }

  // Catch any remaining erroneous return value.
  if (rv != VP8_STATUS_OK && rv != VP8_STATUS_SUSPENDED) {
    PostDecoderError(NS_ERROR_FAILURE);
    return;
  }

  int lastLineRead = -1;
  int height = 0;
  int width = 0;
  int stride = 0;

  mData = WebPIDecGetRGB(mDecoder, &lastLineRead, &width, &height, &stride);

  // The only valid format for WebP decoding for both alpha and non-alpha
  // images is BGRA, where Opaque images have an A of 255.
  // Assume transparency for all images.
  // XXX: This could be compositor-optimized by doing a one-time check for
  // all-255 alpha pixels, but that might interfere with progressive
  // decoding. Probably not worth it?
  PostHasTransparency();
  
  if (lastLineRead == -1 || !mData)
    return;

  if (width <= 0 || height <= 0) {
    PostDataError();
    return;
  }

  if (!HasSize())
    PostSize(width, height);

  if (IsSizeDecode())
    return;

  if (!mImageData) {
    PostDecoderError(NS_ERROR_FAILURE);
    return;
  }

  // Transfer from mData to mImageData
  if (lastLineRead > mLastLine) {
    for (int line = mLastLine; line < lastLineRead; line++) {
      for (int pix = 0; pix < width; pix++) {
        // RGBA -> BGRA
        uint32_t DataOffset = 4 * (line * width + pix);
        mImageData[DataOffset+0] = mData[DataOffset+2];
        mImageData[DataOffset+1] = mData[DataOffset+1];
        mImageData[DataOffset+2] = mData[DataOffset+0];
        mImageData[DataOffset+3] = mData[DataOffset+3];
      }
    } 

    // Invalidate
    nsIntRect r(0, mLastLine, width, lastLineRead);
    PostInvalidation(r);
  }

  mLastLine = lastLineRead;
  return;
}
コード例 #7
0
static gboolean
gdk_pixbuf__webp_image_load_increment (gpointer context,
                                       const guchar *buf, guint size,
                                       GError **error)
{
        gint w, h, stride;
        WebPContext *data = (WebPContext *) context;
        g_return_val_if_fail(data != NULL, FALSE);

        if (!data->got_header) {
                gint rc;
                rc = WebPGetInfo (buf, size, &w, &h);
                if (rc == 0) {
                        g_set_error (error,
                                     GDK_PIXBUF_ERROR,
                                     GDK_PIXBUF_ERROR_CORRUPT_IMAGE,
                                     "Cannot read WebP image header.");
                        return FALSE;
                }
                stride = w * 3;  /* TODO Update when alpha support released */
                data->got_header = TRUE;
                if (data->size_func) {
                        (* data->size_func) (&w, &h,
                                             data->user_data);
                }
                data->pixbuf = gdk_pixbuf_new (GDK_COLORSPACE_RGB,
                                               FALSE,
                                               8,
                                               w,
                                               h);
                data->decbuf = g_try_malloc (h * stride);
                if (!data->decbuf) {
                        g_set_error (error,
                                     GDK_PIXBUF_ERROR,
                                     GDK_PIXBUF_ERROR_INSUFFICIENT_MEMORY,
                                     "Cannot allocate memory for decoded image data.");
                        return FALSE;
                }
                data->idec = WebPINewRGB (MODE_RGB,
                                          data->decbuf,
                                          h * stride,
                                          stride);
                if (!data->idec) {
                        g_set_error (error,
                                     GDK_PIXBUF_ERROR,
                                     GDK_PIXBUF_ERROR_FAILED,
                                     "Cannot create WebP decoder.");
                        return FALSE;
                }
                if (data->prepare_func) {
                        (* data->prepare_func) (data->pixbuf,
                                                NULL,
                                                data->user_data);
                }
        }

        /* Append size bytes to decoder's buffer */
        const VP8StatusCode status = WebPIAppend (data->idec, buf, size);
        if (status != VP8_STATUS_SUSPENDED && status != VP8_STATUS_OK) {
                g_set_error (error,
                             GDK_PIXBUF_ERROR,
                             GDK_PIXBUF_ERROR_CORRUPT_IMAGE,
                             "WebP decoder failed with status code %d.",
                             status);
                return FALSE;
        }

        /* Decode decoder's updated buffer */
        guint8 *dec_output;
        dec_output = WebPIDecGetRGB (data->idec, &data->last_y, &w, &h, &stride);
        if (dec_output == NULL && status != VP8_STATUS_SUSPENDED) {
                g_set_error(error,
                            GDK_PIXBUF_ERROR,
                            GDK_PIXBUF_ERROR_FAILED,
                            "Bad inputs to WebP decoder.");
                return FALSE;
        }

        /* Copy decoder output to pixbuf */
        gint y, row;
        guchar *dptr;
        dptr = gdk_pixbuf_get_pixels (data->pixbuf);
        const guint8 offset = w % 4;  /* decoded width will be divisible by 4 */
        for (y = 0; y < data->last_y; ++y, dptr += offset) {
                row = y * stride;
                g_memmove (dptr + row, dec_output + row, stride);
        }

        if (data->update_func) {
                (* data->update_func) (data->pixbuf, 0, 0,
                                       w,
                                       data->last_y,
                                       data->user_data);
        }
        return TRUE;
}
コード例 #8
0
ファイル: nsWEBPDecoder.cpp プロジェクト: Pulfer/Pale-Moon
void
nsWEBPDecoder::WriteInternal(const char *aBuffer, uint32_t aCount, DecodeStrategy)
{
  NS_ABORT_IF_FALSE(!HasError(), "Shouldn't call WriteInternal after error!");

  const uint8_t* buf = (const uint8_t*)aBuffer;
  VP8StatusCode rv = WebPIAppend(mDecoder, buf, aCount);
  if (rv == VP8_STATUS_OUT_OF_MEMORY) {
    PostDecoderError(NS_ERROR_OUT_OF_MEMORY);
    return;
  } else if (rv == VP8_STATUS_INVALID_PARAM ||
             rv == VP8_STATUS_BITSTREAM_ERROR) {
    PostDataError();
    return;
  } else if (rv == VP8_STATUS_UNSUPPORTED_FEATURE ||
             rv == VP8_STATUS_USER_ABORT) {
    PostDecoderError(NS_ERROR_FAILURE);
    return;
  }

  // Catch any remaining erroneous return value.
  if (rv != VP8_STATUS_OK && rv != VP8_STATUS_SUSPENDED) {
    PostDecoderError(NS_ERROR_FAILURE);
    return;
  }

  int lastLineRead = -1;
  int height = 0;
  int width = 0;
  int stride = 0;

  mData = WebPIDecGetRGB(mDecoder, &lastLineRead, &width, &height, &stride);

  if (lastLineRead == -1 || !mData)
    return;

  if (width <= 0 || height <= 0) {
    PostDataError();
    return;
  }

  if (!HasSize())
    PostSize(width, height);

  if (IsSizeDecode())
    return;

  uint32_t imagelength;
  // First incremental Image data chunk. Special handling required.
  if (mLastLine == 0 && lastLineRead > 0) {
    imgFrame* aFrame;
    nsresult res = mImage.EnsureFrame(0, 0, 0, width, height,
                                       gfxASurface::ImageFormatARGB32,
                                       (uint8_t**)&mImageData, &imagelength, &aFrame);
    if (NS_FAILED(res) || !mImageData) {
      PostDecoderError(NS_ERROR_FAILURE);
      return;
    }
  }

  if (!mImageData) {
    PostDecoderError(NS_ERROR_FAILURE);
    return;
  }

  if (lastLineRead > mLastLine) {
    for (int line = mLastLine; line < lastLineRead; line++) {
      uint32_t *cptr32 = (uint32_t*)(mImageData + (line * width));
      uint8_t *cptr8 = mData + (line * stride);
      for (int pix = 0; pix < width; pix++, cptr8 += 4) {
	// if((cptr8[3] != 0) && (cptr8[0] != 0) && (cptr8[1] != 0) && (cptr8[2] != 0))
	   *cptr32++ = gfxPackedPixel(cptr8[3], cptr8[0], cptr8[1], cptr8[2]);
      }
    }

    // Invalidate
    nsIntRect r(0, mLastLine, width, lastLineRead);
    PostInvalidation(r);
  }

  mLastLine = lastLineRead;
  return;
}