void
TextureImageTextureSourceOGL::EnsureBuffer(const nsIntSize& aSize,
                                           gfxContentType aContentType)
{
  if (!mTexImage ||
      mTexImage->GetSize() != aSize.ToIntSize() ||
      mTexImage->GetContentType() != aContentType) {
    mTexImage = CreateTextureImage(mCompositor->gl(),
                                   aSize.ToIntSize(),
                                   aContentType,
                                   LOCAL_GL_CLAMP_TO_EDGE,
                                   FlagsToGLFlags(mFlags));
  }
  mTexImage->Resize(aSize.ToIntSize());
}
Esempio n. 2
0
void
TextureImageTextureSourceOGL::EnsureBuffer(const nsIntSize& aSize,
        gfxContentType aContentType)
{
    if (!mTexImage ||
            mTexImage->GetSize() != aSize.ToIntSize() ||
            mTexImage->GetContentType() != aContentType) {
        mTexImage = CreateTextureImage(mGL,
                                       aSize.ToIntSize(),
                                       aContentType,
                                       WrapMode(mGL, mFlags & TEXTURE_ALLOW_REPEAT),
                                       FlagsToGLFlags(mFlags));
    }
    mTexImage->Resize(aSize.ToIntSize());
}
Esempio n. 3
0
DynamicImage::Draw(gfxContext* aContext,
                   const nsIntSize& aSize,
                   const ImageRegion& aRegion,
                   uint32_t aWhichFrame,
                   GraphicsFilter aFilter,
                   const Maybe<SVGImageContext>& aSVGContext,
                   uint32_t aFlags)
{
  MOZ_ASSERT(!aSize.IsEmpty(), "Unexpected empty size");

  gfxIntSize drawableSize(mDrawable->Size());

  if (aSize == drawableSize) {
    gfxUtils::DrawPixelSnapped(aContext, mDrawable, drawableSize, aRegion,
                               SurfaceFormat::B8G8R8A8, aFilter);
    return DrawResult::SUCCESS;
  }

  gfxSize scale(double(aSize.width) / drawableSize.width,
                double(aSize.height) / drawableSize.height);

  ImageRegion region(aRegion);
  region.Scale(1.0 / scale.width, 1.0 / scale.height);

  gfxContextMatrixAutoSaveRestore saveMatrix(aContext);
  aContext->Multiply(gfxMatrix::Scaling(scale.width, scale.height));

  gfxUtils::DrawPixelSnapped(aContext, mDrawable, drawableSize, region,
                             SurfaceFormat::B8G8R8A8, aFilter);
  return DrawResult::SUCCESS;
}
Esempio n. 4
0
already_AddRefed<Image>
CreateSharedRGBImage(ImageContainer *aImageContainer,
                     nsIntSize aSize,
                     gfxImageFormat aImageFormat)
{
  NS_ASSERTION(aImageFormat == gfxImageFormatARGB32 ||
               aImageFormat == gfxImageFormatRGB24 ||
               aImageFormat == gfxImageFormatRGB16_565,
               "RGB formats supported only");

  if (!aImageContainer) {
    NS_WARNING("No ImageContainer to allocate DeprecatedSharedRGBImage");
    return nullptr;
  }

  ImageFormat format = SHARED_RGB;
  nsRefPtr<Image> image = aImageContainer->CreateImage(&format, 1);

  if (!image) {
    NS_WARNING("Failed to create DeprecatedSharedRGBImage");
    return nullptr;
  }

  if (gfxPlatform::GetPlatform()->UseDeprecatedTextures()) {
    nsRefPtr<DeprecatedSharedRGBImage> rgbImageDep = static_cast<DeprecatedSharedRGBImage*>(image.get());
    rgbImageDep->mSize = aSize.ToIntSize();
    rgbImageDep->mImageFormat = aImageFormat;

    if (!rgbImageDep->AllocateBuffer(aSize, aImageFormat)) {
      NS_WARNING("Failed to allocate shared memory for DeprecatedSharedRGBImage");
      return nullptr;
    }
    return rgbImageDep.forget();
  }
  nsRefPtr<SharedRGBImage> rgbImage = static_cast<SharedRGBImage*>(image.get());
  if (!rgbImage->Allocate(gfx::ToIntSize(aSize),
                          gfx::ImageFormatToSurfaceFormat(aImageFormat))) {
    NS_WARNING("Failed to allocate a shared image");
    return nullptr;
  }
  return image.forget();
}
Esempio n. 5
0
/* static */
nsresult
ImageEncoder::ExtractDataInternal(const nsAString& aType,
                                  const nsAString& aOptions,
                                  uint8_t* aImageBuffer,
                                  int32_t aFormat,
                                  const nsIntSize aSize,
                                  layers::Image* aImage,
                                  nsICanvasRenderingContextInternal* aContext,
                                  nsIInputStream** aStream,
                                  imgIEncoder* aEncoder)
{
  if (aSize.IsEmpty()) {
    return NS_ERROR_INVALID_ARG;
  }

  nsCOMPtr<nsIInputStream> imgStream;

  // get image bytes
  nsresult rv;
  if (aImageBuffer) {
    rv = ImageEncoder::GetInputStream(
      aSize.width,
      aSize.height,
      aImageBuffer,
      aFormat,
      aEncoder,
      nsPromiseFlatString(aOptions).get(),
      getter_AddRefs(imgStream));
  } else if (aContext) {
    NS_ConvertUTF16toUTF8 encoderType(aType);
    rv = aContext->GetInputStream(encoderType.get(),
                                  nsPromiseFlatString(aOptions).get(),
                                  getter_AddRefs(imgStream));
  } else if (aImage) {
    // It is safe to convert PlanarYCbCr format from YUV to RGB off-main-thread.
    // Other image formats could have problem to convert format off-main-thread.
    // So here it uses a help function GetBRGADataSourceSurfaceSync() to convert
    // format on main thread.
    if (aImage->GetFormat() == ImageFormat::PLANAR_YCBCR) {
      nsTArray<uint8_t> data;
      layers::PlanarYCbCrImage* ycbcrImage = static_cast<layers::PlanarYCbCrImage*> (aImage);
      gfxImageFormat format = gfxImageFormat::ARGB32;
      uint32_t stride = GetAlignedStride<16>(aSize.width * 4);
      size_t length = BufferSizeFromStrideAndHeight(stride, aSize.height);
      data.SetCapacity(length);

      gfxUtils::ConvertYCbCrToRGB(*ycbcrImage->GetData(),
                                  format,
                                  aSize,
                                  data.Elements(),
                                  stride);

      rv = aEncoder->InitFromData(data.Elements(),
                                  aSize.width * aSize.height * 4,
                                  aSize.width,
                                  aSize.height,
                                  aSize.width * 4,
                                  imgIEncoder::INPUT_FORMAT_HOSTARGB,
                                  aOptions);
    } else {
      RefPtr<gfx::DataSourceSurface> dataSurface;
      dataSurface = GetBRGADataSourceSurfaceSync(aImage);

      DataSourceSurface::MappedSurface map;
      if (!dataSurface->Map(gfx::DataSourceSurface::MapType::READ, &map)) {
        return NS_ERROR_INVALID_ARG;
      }
      rv = aEncoder->InitFromData(map.mData,
                                  aSize.width * aSize.height * 4,
                                  aSize.width,
                                  aSize.height,
                                  aSize.width * 4,
                                  imgIEncoder::INPUT_FORMAT_HOSTARGB,
                                  aOptions);
      dataSurface->Unmap();
    }

    if (NS_SUCCEEDED(rv)) {
      imgStream = do_QueryInterface(aEncoder);
    }
  } else {
    // no context, so we have to encode an empty image
    // note that if we didn't have a current context, the spec says we're
    // supposed to just return transparent black pixels of the canvas
    // dimensions.
    RefPtr<DataSourceSurface> emptyCanvas =
      Factory::CreateDataSourceSurfaceWithStride(IntSize(aSize.width, aSize.height),
                                                 SurfaceFormat::B8G8R8A8,
                                                 4 * aSize.width, true);
    if (NS_WARN_IF(!emptyCanvas)) {
      return NS_ERROR_INVALID_ARG;
    }

    DataSourceSurface::MappedSurface map;
    if (!emptyCanvas->Map(DataSourceSurface::MapType::WRITE, &map)) {
      return NS_ERROR_INVALID_ARG;
    }
    rv = aEncoder->InitFromData(map.mData,
                                aSize.width * aSize.height * 4,
                                aSize.width,
                                aSize.height,
                                aSize.width * 4,
                                imgIEncoder::INPUT_FORMAT_HOSTARGB,
                                aOptions);
    emptyCanvas->Unmap();
    if (NS_SUCCEEDED(rv)) {
      imgStream = do_QueryInterface(aEncoder);
    }
  }
  NS_ENSURE_SUCCESS(rv, rv);

  imgStream.forget(aStream);
  return rv;
}
Esempio n. 6
0
RawAccessFrameRef
Decoder::InternalAddFrame(uint32_t aFrameNum,
                          const nsIntSize& aTargetSize,
                          const nsIntRect& aFrameRect,
                          uint32_t aDecodeFlags,
                          SurfaceFormat aFormat,
                          uint8_t aPaletteDepth,
                          imgFrame* aPreviousFrame)
{
  MOZ_ASSERT(aFrameNum <= mFrameCount, "Invalid frame index!");
  if (aFrameNum > mFrameCount) {
    return RawAccessFrameRef();
  }

  if (aTargetSize.width <= 0 || aTargetSize.height <= 0 ||
      aFrameRect.width <= 0 || aFrameRect.height <= 0) {
    NS_WARNING("Trying to add frame with zero or negative size");
    return RawAccessFrameRef();
  }

  if (!SurfaceCache::CanHold(aTargetSize.ToIntSize())) {
    NS_WARNING("Trying to add frame that's too large for the SurfaceCache");
    return RawAccessFrameRef();
  }

  nsRefPtr<imgFrame> frame = new imgFrame();
  bool nonPremult =
    aDecodeFlags & imgIContainer::FLAG_DECODE_NO_PREMULTIPLY_ALPHA;
  if (NS_FAILED(frame->InitForDecoder(aTargetSize, aFrameRect, aFormat,
                                      aPaletteDepth, nonPremult))) {
    NS_WARNING("imgFrame::Init should succeed");
    return RawAccessFrameRef();
  }

  RawAccessFrameRef ref = frame->RawAccessRef();
  if (!ref) {
    frame->Abort();
    return RawAccessFrameRef();
  }

  InsertOutcome outcome =
    SurfaceCache::Insert(frame, ImageKey(mImage.get()),
                         RasterSurfaceKey(aTargetSize.ToIntSize(),
                                          aDecodeFlags,
                                          aFrameNum),
                         Lifetime::Persistent);
  if (outcome != InsertOutcome::SUCCESS) {
    // We either hit InsertOutcome::FAILURE, which is a temporary failure due to
    // low memory (we know it's not permanent because we checked CanHold()
    // above), or InsertOutcome::FAILURE_ALREADY_PRESENT, which means that
    // another decoder beat us to decoding this frame. Either way, we should
    // abort this decoder rather than treat this as a real error.
    mDecodeAborted = true;
    ref->Abort();
    return RawAccessFrameRef();
  }

  nsIntRect refreshArea;

  if (aFrameNum == 1) {
    MOZ_ASSERT(aPreviousFrame, "Must provide a previous frame when animated");
    aPreviousFrame->SetRawAccessOnly();

    // If we dispose of the first frame by clearing it, then the first frame's
    // refresh area is all of itself.
    // RESTORE_PREVIOUS is invalid (assumed to be DISPOSE_CLEAR).
    AnimationData previousFrameData = aPreviousFrame->GetAnimationData();
    if (previousFrameData.mDisposalMethod == DisposalMethod::CLEAR ||
        previousFrameData.mDisposalMethod == DisposalMethod::CLEAR_ALL ||
        previousFrameData.mDisposalMethod == DisposalMethod::RESTORE_PREVIOUS) {
      refreshArea = previousFrameData.mRect;
    }
  }

  if (aFrameNum > 0) {
    ref->SetRawAccessOnly();

    // Some GIFs are huge but only have a small area that they animate. We only
    // need to refresh that small area when frame 0 comes around again.
    refreshArea.UnionRect(refreshArea, frame->GetRect());
  }

  mFrameCount++;
  mImage->OnAddedFrame(mFrameCount, refreshArea);

  return ref;
}
Esempio n. 7
0
/* static */
nsresult
ImageEncoder::ExtractDataInternal(const nsAString& aType,
                                  const nsAString& aOptions,
                                  uint8_t* aImageBuffer,
                                  int32_t aFormat,
                                  const nsIntSize aSize,
                                  nsICanvasRenderingContextInternal* aContext,
                                  nsIInputStream** aStream,
                                  imgIEncoder* aEncoder)
{
  if (aSize.IsEmpty()) {
    return NS_ERROR_INVALID_ARG;
  }

  nsCOMPtr<nsIInputStream> imgStream;

  // get image bytes
  nsresult rv;
  if (aImageBuffer) {
    rv = ImageEncoder::GetInputStream(
      aSize.width,
      aSize.height,
      aImageBuffer,
      aFormat,
      aEncoder,
      nsPromiseFlatString(aOptions).get(),
      getter_AddRefs(imgStream));
  } else if (aContext) {
    NS_ConvertUTF16toUTF8 encoderType(aType);
    rv = aContext->GetInputStream(encoderType.get(),
                                  nsPromiseFlatString(aOptions).get(),
                                  getter_AddRefs(imgStream));
  } else {
    // no context, so we have to encode an empty image
    // note that if we didn't have a current context, the spec says we're
    // supposed to just return transparent black pixels of the canvas
    // dimensions.
    RefPtr<DataSourceSurface> emptyCanvas =
      Factory::CreateDataSourceSurfaceWithStride(IntSize(aSize.width, aSize.height),
                                                 SurfaceFormat::B8G8R8A8,
                                                 4 * aSize.width, true);
    if (NS_WARN_IF(!emptyCanvas)) {
      return NS_ERROR_INVALID_ARG;
    }

    DataSourceSurface::MappedSurface map;
    if (!emptyCanvas->Map(DataSourceSurface::MapType::WRITE, &map)) {
      return NS_ERROR_INVALID_ARG;
    }
    rv = aEncoder->InitFromData(map.mData,
                                aSize.width * aSize.height * 4,
                                aSize.width,
                                aSize.height,
                                aSize.width * 4,
                                imgIEncoder::INPUT_FORMAT_HOSTARGB,
                                aOptions);
    emptyCanvas->Unmap();
    if (NS_SUCCEEDED(rv)) {
      imgStream = do_QueryInterface(aEncoder);
    }
  }
  NS_ENSURE_SUCCESS(rv, rv);

  imgStream.forget(aStream);
  return rv;
}