Exemplo n.º 1
0
/* static */
already_AddRefed<VideoData>
VideoData::Create(const VideoInfo& aInfo,
                  ImageContainer* aContainer,
                  Image* aImage,
                  int64_t aOffset,
                  int64_t aTime,
                  int64_t aDuration,
                  const YCbCrBuffer& aBuffer,
                  bool aKeyframe,
                  int64_t aTimecode,
                  const IntRect& aPicture)
{
  if (!aImage && !aContainer) {
    // Create a dummy VideoData with no image. This gives us something to
    // send to media streams if necessary.
    RefPtr<VideoData> v(new VideoData(aOffset,
                                        aTime,
                                        aDuration,
                                        aKeyframe,
                                        aTimecode,
                                        aInfo.mDisplay,
                                        0));
    return v.forget();
  }

  // The following situation should never happen unless there is a bug
  // in the decoder
  if (aBuffer.mPlanes[1].mWidth != aBuffer.mPlanes[2].mWidth ||
      aBuffer.mPlanes[1].mHeight != aBuffer.mPlanes[2].mHeight) {
    NS_ERROR("C planes with different sizes");
    return nullptr;
  }

  // The following situations could be triggered by invalid input
  if (aPicture.width <= 0 || aPicture.height <= 0) {
    // In debug mode, makes the error more noticeable
    MOZ_ASSERT(false, "Empty picture rect");
    return nullptr;
  }
  if (!ValidatePlane(aBuffer.mPlanes[0]) || !ValidatePlane(aBuffer.mPlanes[1]) ||
      !ValidatePlane(aBuffer.mPlanes[2])) {
    NS_WARNING("Invalid plane size");
    return nullptr;
  }

  // Ensure the picture size specified in the headers can be extracted out of
  // the frame we've been supplied without indexing out of bounds.
  CheckedUint32 xLimit = aPicture.x + CheckedUint32(aPicture.width);
  CheckedUint32 yLimit = aPicture.y + CheckedUint32(aPicture.height);
  if (!xLimit.isValid() || xLimit.value() > aBuffer.mPlanes[0].mStride ||
      !yLimit.isValid() || yLimit.value() > aBuffer.mPlanes[0].mHeight)
  {
    // The specified picture dimensions can't be contained inside the video
    // frame, we'll stomp memory if we try to copy it. Fail.
    NS_WARNING("Overflowing picture rect");
    return nullptr;
  }

  RefPtr<VideoData> v(new VideoData(aOffset,
                                      aTime,
                                      aDuration,
                                      aKeyframe,
                                      aTimecode,
                                      aInfo.mDisplay,
                                      0));
#ifdef MOZ_WIDGET_GONK
  const YCbCrBuffer::Plane &Y = aBuffer.mPlanes[0];
  const YCbCrBuffer::Plane &Cb = aBuffer.mPlanes[1];
  const YCbCrBuffer::Plane &Cr = aBuffer.mPlanes[2];
#endif

  if (!aImage) {
    // Currently our decoder only knows how to output to ImageFormat::PLANAR_YCBCR
    // format.
#ifdef MOZ_WIDGET_GONK
    if (IsYV12Format(Y, Cb, Cr) && !IsInEmulator()) {
      v->mImage = aContainer->CreateImage(ImageFormat::GRALLOC_PLANAR_YCBCR);
    }
#endif
    if (!v->mImage) {
      v->mImage = aContainer->CreateImage(ImageFormat::PLANAR_YCBCR);
    }
  } else {
    v->mImage = aImage;
  }

  if (!v->mImage) {
    return nullptr;
  }
  NS_ASSERTION(v->mImage->GetFormat() == ImageFormat::PLANAR_YCBCR ||
               v->mImage->GetFormat() == ImageFormat::GRALLOC_PLANAR_YCBCR,
               "Wrong format?");
  PlanarYCbCrImage* videoImage = static_cast<PlanarYCbCrImage*>(v->mImage.get());

  bool shouldCopyData = (aImage == nullptr);
  if (!VideoData::SetVideoDataToImage(videoImage, aInfo, aBuffer, aPicture,
                                      shouldCopyData)) {
    return nullptr;
  }

#ifdef MOZ_WIDGET_GONK
  if (!videoImage->IsValid() && !aImage && IsYV12Format(Y, Cb, Cr)) {
    // Failed to allocate gralloc. Try fallback.
    v->mImage = aContainer->CreateImage(ImageFormat::PLANAR_YCBCR);
    if (!v->mImage) {
      return nullptr;
    }
    videoImage = static_cast<PlanarYCbCrImage*>(v->mImage.get());
    if(!VideoData::SetVideoDataToImage(videoImage, aInfo, aBuffer, aPicture,
                                       true /* aCopyData */)) {
      return nullptr;
    }
  }
#endif
  return v.forget();
}
Exemplo n.º 2
0
void
ImageLayerD3D9::RenderLayer()
{
  ImageContainer *container = GetContainer();
  if (!container || mD3DManager->CompositingDisabled()) {
    return;
  }

  AutoLockImage autoLock(container);

  Image *image = autoLock.GetImage();
  if (!image) {
    return;
  }

  SetShaderTransformAndOpacity();

  gfx::IntSize size = image->GetSize();

  if (image->GetFormat() == ImageFormat::CAIRO_SURFACE ||
      image->GetFormat() == ImageFormat::REMOTE_IMAGE_BITMAP ||
      image->GetFormat() == ImageFormat::D3D9_RGB32_TEXTURE)
  {
    NS_ASSERTION(image->GetFormat() != ImageFormat::CAIRO_SURFACE ||
                 !static_cast<CairoImage*>(image)->mSourceSurface ||
                 static_cast<CairoImage*>(image)->mSourceSurface->GetFormat() != SurfaceFormat::A8,
                 "Image layer has alpha image");

    bool hasAlpha = false;
    nsRefPtr<IDirect3DTexture9> texture = GetTexture(image, hasAlpha);

    device()->SetVertexShaderConstantF(CBvLayerQuad,
                                       ShaderConstantRect(0,
                                                          0,
                                                          size.width,
                                                          size.height),
                                       1);

    if (hasAlpha) {
      mD3DManager->SetShaderMode(DeviceManagerD3D9::RGBALAYER, GetMaskLayer());
    } else {
      mD3DManager->SetShaderMode(DeviceManagerD3D9::RGBLAYER, GetMaskLayer());
    }

    if (mFilter == GraphicsFilter::FILTER_NEAREST) {
      device()->SetSamplerState(0, D3DSAMP_MAGFILTER, D3DTEXF_POINT);
      device()->SetSamplerState(0, D3DSAMP_MINFILTER, D3DTEXF_POINT);
    }
    device()->SetTexture(0, texture);

    image = nullptr;
    autoLock.Unlock();

    device()->DrawPrimitive(D3DPT_TRIANGLESTRIP, 0, 2);
    if (mFilter == GraphicsFilter::FILTER_NEAREST) {
      device()->SetSamplerState(0, D3DSAMP_MAGFILTER, D3DTEXF_LINEAR);
      device()->SetSamplerState(0, D3DSAMP_MINFILTER, D3DTEXF_LINEAR);
    }
  } else {
    PlanarYCbCrImage *yuvImage =
      static_cast<PlanarYCbCrImage*>(image);

    if (!yuvImage->IsValid()) {
      return;
    }

    if (!yuvImage->GetBackendData(mozilla::layers::LayersBackend::LAYERS_D3D9)) {
      AllocateTexturesYCbCr(yuvImage, device(), mD3DManager);
    }

    PlanarYCbCrD3D9BackendData *data =
      static_cast<PlanarYCbCrD3D9BackendData*>(yuvImage->GetBackendData(mozilla::layers::LayersBackend::LAYERS_D3D9));

    if (!data) {
      return;
    }

    nsRefPtr<IDirect3DDevice9> dev;
    data->mYTexture->GetDevice(getter_AddRefs(dev));
    if (dev != device()) {
      return;
    }

    device()->SetVertexShaderConstantF(CBvLayerQuad,
                                       ShaderConstantRect(0,
                                                          0,
                                                          size.width,
                                                          size.height),
                                       1);

    device()->SetVertexShaderConstantF(CBvTextureCoords,
      ShaderConstantRect(
        (float)yuvImage->GetData()->mPicX / yuvImage->GetData()->mYSize.width,
        (float)yuvImage->GetData()->mPicY / yuvImage->GetData()->mYSize.height,
        (float)yuvImage->GetData()->mPicSize.width / yuvImage->GetData()->mYSize.width,
        (float)yuvImage->GetData()->mPicSize.height / yuvImage->GetData()->mYSize.height
      ),
      1);

    mD3DManager->SetShaderMode(DeviceManagerD3D9::YCBCRLAYER, GetMaskLayer());

    /*
     * Send 3d control data and metadata
     */
    if (mD3DManager->GetNv3DVUtils()) {
      Nv_Stereo_Mode mode;
      switch (yuvImage->GetData()->mStereoMode) {
      case StereoMode::LEFT_RIGHT:
        mode = NV_STEREO_MODE_LEFT_RIGHT;
        break;
      case StereoMode::RIGHT_LEFT:
        mode = NV_STEREO_MODE_RIGHT_LEFT;
        break;
      case StereoMode::BOTTOM_TOP:
        mode = NV_STEREO_MODE_BOTTOM_TOP;
        break;
      case StereoMode::TOP_BOTTOM:
        mode = NV_STEREO_MODE_TOP_BOTTOM;
        break;
      case StereoMode::MONO:
        mode = NV_STEREO_MODE_MONO;
        break;
      }

      // Send control data even in mono case so driver knows to leave stereo mode.
      mD3DManager->GetNv3DVUtils()->SendNv3DVControl(mode, true, FIREFOX_3DV_APP_HANDLE);

      if (yuvImage->GetData()->mStereoMode != StereoMode::MONO) {
        mD3DManager->GetNv3DVUtils()->SendNv3DVControl(mode, true, FIREFOX_3DV_APP_HANDLE);

        nsRefPtr<IDirect3DSurface9> renderTarget;
        device()->GetRenderTarget(0, getter_AddRefs(renderTarget));
        mD3DManager->GetNv3DVUtils()->SendNv3DVMetaData((unsigned int)yuvImage->GetSize().width,
                                                        (unsigned int)yuvImage->GetSize().height, (HANDLE)(data->mYTexture), (HANDLE)(renderTarget));
      }
    }

    // Linear scaling is default here, adhering to mFilter is difficult since
    // presumably even with point filtering we'll still want chroma upsampling
    // to be linear. In the current approach we can't.
    device()->SetTexture(0, data->mYTexture);
    device()->SetTexture(1, data->mCbTexture);
    device()->SetTexture(2, data->mCrTexture);

    image = nullptr;
    data = nullptr;
    autoLock.Unlock();

    device()->DrawPrimitive(D3DPT_TRIANGLESTRIP, 0, 2);

    device()->SetVertexShaderConstantF(CBvTextureCoords,
      ShaderConstantRect(0, 0, 1.0f, 1.0f), 1);
  }

  GetContainer()->NotifyPaintedImage(image);
}
Exemplo n.º 3
0
void
ImageLayerOGL::RenderLayer(int,
                           const nsIntPoint& aOffset)
{
  nsRefPtr<ImageContainer> container = GetContainer();

  if (!container || mOGLManager->CompositingDisabled())
    return;

  mOGLManager->MakeCurrent();

  AutoLockImage autoLock(container);

  Image *image = autoLock.GetImage();
  if (!image) {
    return;
  }

  NS_ASSERTION(image->GetFormat() != REMOTE_IMAGE_BITMAP,
    "Remote images aren't handled yet in OGL layers!");

  if (image->GetFormat() == PLANAR_YCBCR) {
    PlanarYCbCrImage *yuvImage =
      static_cast<PlanarYCbCrImage*>(image);

    if (!yuvImage->IsValid()) {
      return;
    }

    PlanarYCbCrOGLBackendData *data =
      static_cast<PlanarYCbCrOGLBackendData*>(yuvImage->GetBackendData(LAYERS_OPENGL));

    if (data && data->mTextures->GetGLContext() != gl()) {
      // If these textures were allocated by another layer manager,
      // clear them out and re-allocate below.
      data = nullptr;
      yuvImage->SetBackendData(LAYERS_OPENGL, nullptr);
    }

    if (!data) {
      AllocateTexturesYCbCr(yuvImage);
      data = static_cast<PlanarYCbCrOGLBackendData*>(yuvImage->GetBackendData(LAYERS_OPENGL));
    }

    if (!data || data->mTextures->GetGLContext() != gl()) {
      // XXX - Can this ever happen? If so I need to fix this!
      return;
    }

    gl()->MakeCurrent();
    gl()->fActiveTexture(LOCAL_GL_TEXTURE2);
    gl()->fBindTexture(LOCAL_GL_TEXTURE_2D, data->mTextures[2].GetTextureID());
    gl()->ApplyFilterToBoundTexture(mFilter);
    gl()->fActiveTexture(LOCAL_GL_TEXTURE1);
    gl()->fBindTexture(LOCAL_GL_TEXTURE_2D, data->mTextures[1].GetTextureID());
    gl()->ApplyFilterToBoundTexture(mFilter);
    gl()->fActiveTexture(LOCAL_GL_TEXTURE0);
    gl()->fBindTexture(LOCAL_GL_TEXTURE_2D, data->mTextures[0].GetTextureID());
    gl()->ApplyFilterToBoundTexture(mFilter);
    
    ShaderProgramOGL *program = mOGLManager->GetProgram(YCbCrLayerProgramType,
                                                        GetMaskLayer());

    program->Activate();
    program->SetLayerQuadRect(nsIntRect(0, 0,
                                        yuvImage->GetSize().width,
                                        yuvImage->GetSize().height));
    program->SetLayerTransform(GetEffectiveTransform());
    program->SetLayerOpacity(GetEffectiveOpacity());
    program->SetRenderOffset(aOffset);
    program->SetYCbCrTextureUnits(0, 1, 2);
    program->LoadMask(GetMaskLayer());

    mOGLManager->BindAndDrawQuadWithTextureRect(program,
                                                yuvImage->GetData()->GetPictureRect(),
                                                nsIntSize(yuvImage->GetData()->mYSize.width,
                                                          yuvImage->GetData()->mYSize.height));

    // We shouldn't need to do this, but do it anyway just in case
    // someone else forgets.
    gl()->fActiveTexture(LOCAL_GL_TEXTURE0);
  } else if (image->GetFormat() == CAIRO_SURFACE) {
    CairoImage *cairoImage =
      static_cast<CairoImage*>(image);

    if (!cairoImage->mSurface) {
      return;
    }

    NS_ASSERTION(cairoImage->mSurface->GetContentType() != gfxASurface::CONTENT_ALPHA,
                 "Image layer has alpha image");

    CairoOGLBackendData *data =
      static_cast<CairoOGLBackendData*>(cairoImage->GetBackendData(LAYERS_OPENGL));

    if (data && data->mTexture.GetGLContext() != gl()) {
      // If this texture was allocated by another layer manager, clear
      // it out and re-allocate below.
      data = nullptr;
      cairoImage->SetBackendData(LAYERS_OPENGL, nullptr);
    }

    if (!data) {
      AllocateTexturesCairo(cairoImage);
      data = static_cast<CairoOGLBackendData*>(cairoImage->GetBackendData(LAYERS_OPENGL));
    }

    if (!data || data->mTexture.GetGLContext() != gl()) {
      // XXX - Can this ever happen? If so I need to fix this!
      return;
    }

    gl()->MakeCurrent();

    gl()->fActiveTexture(LOCAL_GL_TEXTURE0);
    gl()->fBindTexture(LOCAL_GL_TEXTURE_2D, data->mTexture.GetTextureID());

    ShaderProgramOGL *program = 
      mOGLManager->GetProgram(data->mLayerProgram, GetMaskLayer());

    gl()->ApplyFilterToBoundTexture(mFilter);

    program->Activate();
    program->SetLayerQuadRect(nsIntRect(0, 0, 
                                        cairoImage->GetSize().width, 
                                        cairoImage->GetSize().height));
    program->SetLayerTransform(GetEffectiveTransform());
    program->SetLayerOpacity(GetEffectiveOpacity());
    program->SetRenderOffset(aOffset);
    program->SetTextureUnit(0);
    program->LoadMask(GetMaskLayer());

    mOGLManager->BindAndDrawQuad(program);
#ifdef XP_MACOSX
  } else if (image->GetFormat() == MAC_IO_SURFACE) {
     MacIOSurfaceImage *ioImage =
       static_cast<MacIOSurfaceImage*>(image);

     if (!mOGLManager->GetThebesLayerCallback()) {
       // If its an empty transaction we still need to update
       // the plugin IO Surface and make sure we grab the
       // new image
       ioImage->Update(GetContainer());
       image = nullptr;
       autoLock.Refresh();
       image = autoLock.GetImage();
       gl()->MakeCurrent();
       ioImage = static_cast<MacIOSurfaceImage*>(image);
     }

     if (!ioImage) {
       return;
     }

     gl()->fActiveTexture(LOCAL_GL_TEXTURE0);

     if (!ioImage->GetBackendData(LAYERS_OPENGL)) {
       AllocateTextureIOSurface(ioImage, gl());
     }

     MacIOSurfaceImageOGLBackendData *data =
      static_cast<MacIOSurfaceImageOGLBackendData*>(ioImage->GetBackendData(LAYERS_OPENGL));

     gl()->fActiveTexture(LOCAL_GL_TEXTURE0);
     gl()->fBindTexture(LOCAL_GL_TEXTURE_RECTANGLE_ARB, data->mTexture.GetTextureID());

     ShaderProgramOGL *program =
       mOGLManager->GetProgram(gl::RGBARectLayerProgramType, GetMaskLayer());

     program->Activate();
     if (program->GetTexCoordMultiplierUniformLocation() != -1) {
       // 2DRect case, get the multiplier right for a sampler2DRect
       program->SetTexCoordMultiplier(ioImage->GetSize().width, ioImage->GetSize().height);
     } else {
       NS_ASSERTION(0, "no rects?");
     }

     program->SetLayerQuadRect(nsIntRect(0, 0,
                                         ioImage->GetSize().width,
                                         ioImage->GetSize().height));
     program->SetLayerTransform(GetEffectiveTransform());
     program->SetLayerOpacity(GetEffectiveOpacity());
     program->SetRenderOffset(aOffset);
     program->SetTextureUnit(0);
     program->LoadMask(GetMaskLayer());

     mOGLManager->BindAndDrawQuad(program);
     gl()->fBindTexture(LOCAL_GL_TEXTURE_RECTANGLE_ARB, 0);
#endif
#ifdef MOZ_WIDGET_GONK
  } else if (image->GetFormat() == GONK_IO_SURFACE) {

    GonkIOSurfaceImage *ioImage = static_cast<GonkIOSurfaceImage*>(image);
    if (!ioImage) {
      return;
    }

    gl()->MakeCurrent();
    gl()->fActiveTexture(LOCAL_GL_TEXTURE0);

    if (!ioImage->GetBackendData(LAYERS_OPENGL)) {
      AllocateTextureIOSurface(ioImage, gl());
    }
    GonkIOSurfaceImageOGLBackendData *data =
      static_cast<GonkIOSurfaceImageOGLBackendData*>(ioImage->GetBackendData(LAYERS_OPENGL));

    gl()->fActiveTexture(LOCAL_GL_TEXTURE0);
    gl()->BindExternalBuffer(data->mTexture.GetTextureID(), ioImage->GetNativeBuffer());

    ShaderProgramOGL *program = mOGLManager->GetProgram(RGBAExternalLayerProgramType, GetMaskLayer());

    gl()->ApplyFilterToBoundTexture(mFilter);

    program->Activate();
    program->SetLayerQuadRect(nsIntRect(0, 0, 
                                        ioImage->GetSize().width, 
                                        ioImage->GetSize().height));
    program->SetLayerTransform(GetEffectiveTransform());
    program->SetLayerOpacity(GetEffectiveOpacity());
    program->SetRenderOffset(aOffset);
    program->SetTextureUnit(0);
    program->LoadMask(GetMaskLayer());

    mOGLManager->BindAndDrawQuadWithTextureRect(program,
                                                GetVisibleRegion().GetBounds(),
                                                nsIntSize(ioImage->GetSize().width,
                                                          ioImage->GetSize().height));
#endif
  }
  GetContainer()->NotifyPaintedImage(image);
}
Exemplo n.º 4
0
nsresult VP8TrackEncoder::PrepareRawFrame(VideoChunk &aChunk)
{
  nsRefPtr<Image> img;
  if (aChunk.mFrame.GetForceBlack() || aChunk.IsNull()) {
    if (!mMuteFrame) {
      mMuteFrame = VideoFrame::CreateBlackImage(gfxIntSize(mFrameWidth, mFrameHeight));
      MOZ_ASSERT(mMuteFrame);
    }
    img = mMuteFrame;
  } else {
    img = aChunk.mFrame.GetImage();
  }

  ImageFormat format = img->GetFormat();
  if (format != ImageFormat::PLANAR_YCBCR) {
    VP8LOG("Unsupported video format\n");
    return NS_ERROR_FAILURE;
  }

  // Cast away constness b/c some of the accessors are non-const
  PlanarYCbCrImage* yuv =
  const_cast<PlanarYCbCrImage *>(static_cast<const PlanarYCbCrImage *>(img.get()));
  // Big-time assumption here that this is all contiguous data coming
  // from getUserMedia or other sources.
  MOZ_ASSERT(yuv);
  if (!yuv->IsValid()) {
    NS_WARNING("PlanarYCbCrImage is not valid");
    return NS_ERROR_FAILURE;
  }
  const PlanarYCbCrImage::Data *data = yuv->GetData();

  if (isYUV420(data) && !data->mCbSkip) { // 420 planar
    mVPXImageWrapper->planes[VPX_PLANE_Y] = data->mYChannel;
    mVPXImageWrapper->planes[VPX_PLANE_U] = data->mCbChannel;
    mVPXImageWrapper->planes[VPX_PLANE_V] = data->mCrChannel;
    mVPXImageWrapper->stride[VPX_PLANE_Y] = data->mYStride;
    mVPXImageWrapper->stride[VPX_PLANE_U] = data->mCbCrStride;
    mVPXImageWrapper->stride[VPX_PLANE_V] = data->mCbCrStride;
  } else {
    uint32_t yPlaneSize = mFrameWidth * mFrameHeight;
    uint32_t halfWidth = (mFrameWidth + 1) / 2;
    uint32_t halfHeight = (mFrameHeight + 1) / 2;
    uint32_t uvPlaneSize = halfWidth * halfHeight;
    if (mI420Frame.IsEmpty()) {
      mI420Frame.SetLength(yPlaneSize + uvPlaneSize * 2);
    }

    MOZ_ASSERT(mI420Frame.Length() >= (yPlaneSize + uvPlaneSize * 2));
    uint8_t *y = mI420Frame.Elements();
    uint8_t *cb = mI420Frame.Elements() + yPlaneSize;
    uint8_t *cr = mI420Frame.Elements() + yPlaneSize + uvPlaneSize;

    if (isYUV420(data) && data->mCbSkip) {
      // If mCbSkip is set, we assume it's nv12 or nv21.
      if (data->mCbChannel < data->mCrChannel) { // nv12
        libyuv::NV12ToI420(data->mYChannel, data->mYStride,
                           data->mCbChannel, data->mCbCrStride,
                           y, mFrameWidth,
                           cb, halfWidth,
                           cr, halfWidth,
                           mFrameWidth, mFrameHeight);
      } else { // nv21
        libyuv::NV21ToI420(data->mYChannel, data->mYStride,
                           data->mCrChannel, data->mCbCrStride,
                           y, mFrameWidth,
                           cb, halfWidth,
                           cr, halfWidth,
                           mFrameWidth, mFrameHeight);
      }
    } else if (isYUV444(data) && !data->mCbSkip) {
      libyuv::I444ToI420(data->mYChannel, data->mYStride,
                         data->mCbChannel, data->mCbCrStride,
                         data->mCrChannel, data->mCbCrStride,
                         y, mFrameWidth,
                         cb, halfWidth,
                         cr, halfWidth,
                         mFrameWidth, mFrameHeight);
    } else if (isYUV422(data) && !data->mCbSkip) {
      libyuv::I422ToI420(data->mYChannel, data->mYStride,
                         data->mCbChannel, data->mCbCrStride,
                         data->mCrChannel, data->mCbCrStride,
                         y, mFrameWidth,
                         cb, halfWidth,
                         cr, halfWidth,
                         mFrameWidth, mFrameHeight);
    } else {
      VP8LOG("Unsupported planar format\n");
      return NS_ERROR_NOT_IMPLEMENTED;
    }

    mVPXImageWrapper->planes[VPX_PLANE_Y] = y;
    mVPXImageWrapper->planes[VPX_PLANE_U] = cb;
    mVPXImageWrapper->planes[VPX_PLANE_V] = cr;
    mVPXImageWrapper->stride[VPX_PLANE_Y] = mFrameWidth;
    mVPXImageWrapper->stride[VPX_PLANE_U] = halfWidth;
    mVPXImageWrapper->stride[VPX_PLANE_V] = halfWidth;
  }

  return NS_OK;
}
Exemplo n.º 5
0
void
ImageLayerD3D10::RenderLayer()
{
  ImageContainer *container = GetContainer();
  if (!container) {
    return;
  }

  AutoLockImage autoLock(container);

  Image *image = autoLock.GetImage();
  if (!image) {
    return;
  }

  gfxIntSize size = image->GetSize();

  SetEffectTransformAndOpacity();

  ID3D10EffectTechnique *technique;
  nsRefPtr<IDXGIKeyedMutex> keyedMutex;

  if (image->GetFormat() == ImageFormat::CAIRO_SURFACE ||
      image->GetFormat() == ImageFormat::REMOTE_IMAGE_BITMAP ||
      image->GetFormat() == ImageFormat::REMOTE_IMAGE_DXGI_TEXTURE ||
      image->GetFormat() == ImageFormat::D3D9_RGB32_TEXTURE) {
    NS_ASSERTION(image->GetFormat() != ImageFormat::CAIRO_SURFACE ||
                 !static_cast<CairoImage*>(image)->mSurface ||
                 static_cast<CairoImage*>(image)->mSurface->GetContentType() != gfxASurface::CONTENT_ALPHA,
                 "Image layer has alpha image");
    bool hasAlpha = false;

    nsRefPtr<ID3D10ShaderResourceView> srView = GetImageSRView(image, hasAlpha, getter_AddRefs(keyedMutex));
    if (!srView) {
      return;
    }

    uint8_t shaderFlags = SHADER_PREMUL;
    shaderFlags |= LoadMaskTexture();
    shaderFlags |= hasAlpha
                  ? SHADER_RGBA : SHADER_RGB;
    shaderFlags |= mFilter == gfxPattern::FILTER_NEAREST
                  ? SHADER_POINT : SHADER_LINEAR;
    technique = SelectShader(shaderFlags);


    effect()->GetVariableByName("tRGB")->AsShaderResource()->SetResource(srView);

    effect()->GetVariableByName("vLayerQuad")->AsVector()->SetFloatVector(
      ShaderConstantRectD3D10(
        (float)0,
        (float)0,
        (float)size.width,
        (float)size.height)
      );
  } else if (image->GetFormat() == ImageFormat::PLANAR_YCBCR) {
    PlanarYCbCrImage *yuvImage =
      static_cast<PlanarYCbCrImage*>(image);

    if (!yuvImage->IsValid()) {
      return;
    }

    if (!yuvImage->GetBackendData(mozilla::layers::LAYERS_D3D10)) {
      AllocateTexturesYCbCr(yuvImage);
    }

    PlanarYCbCrD3D10BackendData *data =
      static_cast<PlanarYCbCrD3D10BackendData*>(yuvImage->GetBackendData(mozilla::layers::LAYERS_D3D10));

    if (!data) {
      return;
    }

    nsRefPtr<ID3D10Device> dev;
    data->mYTexture->GetDevice(getter_AddRefs(dev));
    if (dev != device()) {
      return;
    }

    // TODO: At some point we should try to deal with mFilter here, you don't
    // really want to use point filtering in the case of NEAREST, since that
    // would also use point filtering for Chroma upsampling. Where most likely
    // the user would only want point filtering for final RGB image upsampling.

    technique = SelectShader(SHADER_YCBCR | LoadMaskTexture());

    effect()->GetVariableByName("tY")->AsShaderResource()->SetResource(data->mYView);
    effect()->GetVariableByName("tCb")->AsShaderResource()->SetResource(data->mCbView);
    effect()->GetVariableByName("tCr")->AsShaderResource()->SetResource(data->mCrView);

    effect()->GetVariableByName("vLayerQuad")->AsVector()->SetFloatVector(
      ShaderConstantRectD3D10(
        (float)0,
        (float)0,
        (float)size.width,
        (float)size.height)
      );

    effect()->GetVariableByName("vTextureCoords")->AsVector()->SetFloatVector(
      ShaderConstantRectD3D10(
        (float)yuvImage->GetData()->mPicX / yuvImage->GetData()->mYSize.width,
        (float)yuvImage->GetData()->mPicY / yuvImage->GetData()->mYSize.height,
        (float)yuvImage->GetData()->mPicSize.width / yuvImage->GetData()->mYSize.width,
        (float)yuvImage->GetData()->mPicSize.height / yuvImage->GetData()->mYSize.height)
       );
  }

  bool resetTexCoords = image->GetFormat() == ImageFormat::PLANAR_YCBCR;
  image = nullptr;
  autoLock.Unlock();

  technique->GetPassByIndex(0)->Apply(0);
  device()->Draw(4, 0);

  if (keyedMutex) {
    keyedMutex->ReleaseSync(0);
  }

  if (resetTexCoords) {
    effect()->GetVariableByName("vTextureCoords")->AsVector()->
      SetFloatVector(ShaderConstantRectD3D10(0, 0, 1.0f, 1.0f));
  }

  GetContainer()->NotifyPaintedImage(image);
}
Exemplo n.º 6
0
nsresult VP8TrackEncoder::PrepareRawFrame(VideoChunk &aChunk)
{
  RefPtr<Image> img;
  if (aChunk.mFrame.GetForceBlack() || aChunk.IsNull()) {
    if (!mMuteFrame) {
      mMuteFrame = VideoFrame::CreateBlackImage(gfx::IntSize(mFrameWidth, mFrameHeight));
      MOZ_ASSERT(mMuteFrame);
    }
    img = mMuteFrame;
  } else {
    img = aChunk.mFrame.GetImage();
  }

  if (img->GetSize() != IntSize(mFrameWidth, mFrameHeight)) {
    VP8LOG("Dynamic resolution changes (was %dx%d, now %dx%d) are unsupported\n",
           mFrameWidth, mFrameHeight, img->GetSize().width, img->GetSize().height);
    return NS_ERROR_FAILURE;
  }

  ImageFormat format = img->GetFormat();
  if (format == ImageFormat::PLANAR_YCBCR) {
    PlanarYCbCrImage* yuv = static_cast<PlanarYCbCrImage *>(img.get());

    MOZ_RELEASE_ASSERT(yuv);
    if (!yuv->IsValid()) {
      NS_WARNING("PlanarYCbCrImage is not valid");
      return NS_ERROR_FAILURE;
    }
    const PlanarYCbCrImage::Data *data = yuv->GetData();

    if (isYUV420(data) && !data->mCbSkip) {
      // 420 planar, no need for conversions
      mVPXImageWrapper->planes[VPX_PLANE_Y] = data->mYChannel;
      mVPXImageWrapper->planes[VPX_PLANE_U] = data->mCbChannel;
      mVPXImageWrapper->planes[VPX_PLANE_V] = data->mCrChannel;
      mVPXImageWrapper->stride[VPX_PLANE_Y] = data->mYStride;
      mVPXImageWrapper->stride[VPX_PLANE_U] = data->mCbCrStride;
      mVPXImageWrapper->stride[VPX_PLANE_V] = data->mCbCrStride;

      return NS_OK;
    }
  }

  // Not 420 planar, have to convert
  uint32_t yPlaneSize = mFrameWidth * mFrameHeight;
  uint32_t halfWidth = (mFrameWidth + 1) / 2;
  uint32_t halfHeight = (mFrameHeight + 1) / 2;
  uint32_t uvPlaneSize = halfWidth * halfHeight;

  if (mI420Frame.IsEmpty()) {
    mI420Frame.SetLength(yPlaneSize + uvPlaneSize * 2);
  }

  uint8_t *y = mI420Frame.Elements();
  uint8_t *cb = mI420Frame.Elements() + yPlaneSize;
  uint8_t *cr = mI420Frame.Elements() + yPlaneSize + uvPlaneSize;

  if (format == ImageFormat::PLANAR_YCBCR) {
    PlanarYCbCrImage* yuv = static_cast<PlanarYCbCrImage *>(img.get());

    MOZ_RELEASE_ASSERT(yuv);
    if (!yuv->IsValid()) {
      NS_WARNING("PlanarYCbCrImage is not valid");
      return NS_ERROR_FAILURE;
    }
    const PlanarYCbCrImage::Data *data = yuv->GetData();

    int rv;
    std::string yuvFormat;
    if (isYUV420(data) && data->mCbSkip) {
      // If mCbSkip is set, we assume it's nv12 or nv21.
      if (data->mCbChannel < data->mCrChannel) { // nv12
        rv = libyuv::NV12ToI420(data->mYChannel, data->mYStride,
                                data->mCbChannel, data->mCbCrStride,
                                y, mFrameWidth,
                                cb, halfWidth,
                                cr, halfWidth,
                                mFrameWidth, mFrameHeight);
        yuvFormat = "NV12";
      } else { // nv21
        rv = libyuv::NV21ToI420(data->mYChannel, data->mYStride,
                                data->mCrChannel, data->mCbCrStride,
                                y, mFrameWidth,
                                cb, halfWidth,
                                cr, halfWidth,
                                mFrameWidth, mFrameHeight);
        yuvFormat = "NV21";
      }
    } else if (isYUV444(data) && !data->mCbSkip) {
      rv = libyuv::I444ToI420(data->mYChannel, data->mYStride,
                              data->mCbChannel, data->mCbCrStride,
                              data->mCrChannel, data->mCbCrStride,
                              y, mFrameWidth,
                              cb, halfWidth,
                              cr, halfWidth,
                              mFrameWidth, mFrameHeight);
      yuvFormat = "I444";
    } else if (isYUV422(data) && !data->mCbSkip) {
      rv = libyuv::I422ToI420(data->mYChannel, data->mYStride,
                              data->mCbChannel, data->mCbCrStride,
                              data->mCrChannel, data->mCbCrStride,
                              y, mFrameWidth,
                              cb, halfWidth,
                              cr, halfWidth,
                              mFrameWidth, mFrameHeight);
      yuvFormat = "I422";
    } else {
      VP8LOG("Unsupported planar format\n");
      NS_ASSERTION(false, "Unsupported planar format");
      return NS_ERROR_NOT_IMPLEMENTED;
    }

    if (rv != 0) {
      VP8LOG("Converting an %s frame to I420 failed\n", yuvFormat.c_str());
      return NS_ERROR_FAILURE;
    }

    VP8LOG("Converted an %s frame to I420\n");
  } else {
    // Not YCbCr at all. Try to get access to the raw data and convert.

    RefPtr<SourceSurface> surf = img->GetAsSourceSurface();
    if (!surf) {
      VP8LOG("Getting surface from %s image failed\n", Stringify(format).c_str());
      return NS_ERROR_FAILURE;
    }

    RefPtr<DataSourceSurface> data = surf->GetDataSurface();
    if (!data) {
      VP8LOG("Getting data surface from %s image with %s (%s) surface failed\n",
             Stringify(format).c_str(), Stringify(surf->GetType()).c_str(),
             Stringify(surf->GetFormat()).c_str());
      return NS_ERROR_FAILURE;
    }

    DataSourceSurface::ScopedMap map(data, DataSourceSurface::READ);
    if (!map.IsMapped()) {
      VP8LOG("Reading DataSourceSurface from %s image with %s (%s) surface failed\n",
             Stringify(format).c_str(), Stringify(surf->GetType()).c_str(),
             Stringify(surf->GetFormat()).c_str());
      return NS_ERROR_FAILURE;
    }

    int rv;
    switch (surf->GetFormat()) {
      case SurfaceFormat::B8G8R8A8:
      case SurfaceFormat::B8G8R8X8:
        rv = libyuv::ARGBToI420(static_cast<uint8*>(map.GetData()),
                                map.GetStride(),
                                y, mFrameWidth,
                                cb, halfWidth,
                                cr, halfWidth,
                                mFrameWidth, mFrameHeight);
        break;
      case SurfaceFormat::R5G6B5_UINT16:
        rv = libyuv::RGB565ToI420(static_cast<uint8*>(map.GetData()),
                                  map.GetStride(),
                                  y, mFrameWidth,
                                  cb, halfWidth,
                                  cr, halfWidth,
                                  mFrameWidth, mFrameHeight);
        break;
      default:
        VP8LOG("Unsupported SourceSurface format %s\n",
               Stringify(surf->GetFormat()).c_str());
        NS_ASSERTION(false, "Unsupported SourceSurface format");
        return NS_ERROR_NOT_IMPLEMENTED;
    }

    if (rv != 0) {
      VP8LOG("%s to I420 conversion failed\n",
             Stringify(surf->GetFormat()).c_str());
      return NS_ERROR_FAILURE;
    }

    VP8LOG("Converted a %s frame to I420\n",
           Stringify(surf->GetFormat()).c_str());
  }

  mVPXImageWrapper->planes[VPX_PLANE_Y] = y;
  mVPXImageWrapper->planes[VPX_PLANE_U] = cb;
  mVPXImageWrapper->planes[VPX_PLANE_V] = cr;
  mVPXImageWrapper->stride[VPX_PLANE_Y] = mFrameWidth;
  mVPXImageWrapper->stride[VPX_PLANE_U] = halfWidth;
  mVPXImageWrapper->stride[VPX_PLANE_V] = halfWidth;

  return NS_OK;
}
Exemplo n.º 7
0
void
ImageLayerOGL::RenderLayer(int,
                           const nsIntPoint& aOffset)
{
  nsRefPtr<ImageContainer> container = GetContainer();

  if (!container || mOGLManager->CompositingDisabled())
    return;

  mOGLManager->MakeCurrent();

  AutoLockImage autoLock(container);

  Image *image = autoLock.GetImage();
  if (!image) {
    return;
  }

  NS_ASSERTION(image->GetFormat() != REMOTE_IMAGE_BITMAP,
    "Remote images aren't handled yet in OGL layers!");

  if (image->GetFormat() == PLANAR_YCBCR) {
    PlanarYCbCrImage *yuvImage =
      static_cast<PlanarYCbCrImage*>(image);

    if (!yuvImage->IsValid()) {
      return;
    }

    PlanarYCbCrOGLBackendData *data =
      static_cast<PlanarYCbCrOGLBackendData*>(yuvImage->GetBackendData(LAYERS_OPENGL));

    if (data && data->mTextures->GetGLContext() != gl()) {
      // If these textures were allocated by another layer manager,
      // clear them out and re-allocate below.
      data = nullptr;
      yuvImage->SetBackendData(LAYERS_OPENGL, nullptr);
    }

    if (!data) {
      AllocateTexturesYCbCr(yuvImage);
      data = static_cast<PlanarYCbCrOGLBackendData*>(yuvImage->GetBackendData(LAYERS_OPENGL));
    }

    if (!data || data->mTextures->GetGLContext() != gl()) {
      // XXX - Can this ever happen? If so I need to fix this!
      return;
    }

    gl()->MakeCurrent();
    gl()->fActiveTexture(LOCAL_GL_TEXTURE2);
    gl()->fBindTexture(LOCAL_GL_TEXTURE_2D, data->mTextures[2].GetTextureID());
    gl()->ApplyFilterToBoundTexture(mFilter);
    gl()->fActiveTexture(LOCAL_GL_TEXTURE1);
    gl()->fBindTexture(LOCAL_GL_TEXTURE_2D, data->mTextures[1].GetTextureID());
    gl()->ApplyFilterToBoundTexture(mFilter);
    gl()->fActiveTexture(LOCAL_GL_TEXTURE0);
    gl()->fBindTexture(LOCAL_GL_TEXTURE_2D, data->mTextures[0].GetTextureID());
    gl()->ApplyFilterToBoundTexture(mFilter);
    
    ShaderProgramOGL *program = mOGLManager->GetProgram(YCbCrLayerProgramType,
                                                        GetMaskLayer());

    program->Activate();
    program->SetLayerQuadRect(nsIntRect(0, 0,
                                        yuvImage->GetSize().width,
                                        yuvImage->GetSize().height));
    program->SetLayerTransform(GetEffectiveTransform());
    program->SetLayerOpacity(GetEffectiveOpacity());
    program->SetRenderOffset(aOffset);
    program->SetYCbCrTextureUnits(0, 1, 2);
    program->LoadMask(GetMaskLayer());

    mOGLManager->BindAndDrawQuadWithTextureRect(program,
                                                yuvImage->GetData()->GetPictureRect(),
                                                nsIntSize(yuvImage->GetData()->mYSize.width,
                                                          yuvImage->GetData()->mYSize.height));

    // We shouldn't need to do this, but do it anyway just in case
    // someone else forgets.
    gl()->fActiveTexture(LOCAL_GL_TEXTURE0);
  } else if (image->GetFormat() == CAIRO_SURFACE) {
    CairoImage *cairoImage =
      static_cast<CairoImage*>(image);

    if (!cairoImage->mSurface) {
      return;
    }

    NS_ASSERTION(cairoImage->mSurface->GetContentType() != gfxASurface::CONTENT_ALPHA,
                 "Image layer has alpha image");

    CairoOGLBackendData *data =
      static_cast<CairoOGLBackendData*>(cairoImage->GetBackendData(LAYERS_OPENGL));

    if (data && data->mTexture.GetGLContext() != gl()) {
      // If this texture was allocated by another layer manager, clear
      // it out and re-allocate below.
      data = nullptr;
      cairoImage->SetBackendData(LAYERS_OPENGL, nullptr);
    }

    if (!data) {
      AllocateTexturesCairo(cairoImage);
      data = static_cast<CairoOGLBackendData*>(cairoImage->GetBackendData(LAYERS_OPENGL));
    }

    if (!data || data->mTexture.GetGLContext() != gl()) {
      // XXX - Can this ever happen? If so I need to fix this!
      return;
    }

    gl()->MakeCurrent();

    gl()->fActiveTexture(LOCAL_GL_TEXTURE0);
    gl()->fBindTexture(LOCAL_GL_TEXTURE_2D, data->mTexture.GetTextureID());

    ShaderProgramOGL *program = 
      mOGLManager->GetProgram(data->mLayerProgram, GetMaskLayer());

    gl()->ApplyFilterToBoundTexture(mFilter);

    program->Activate();
    program->SetLayerQuadRect(nsIntRect(0, 0, 
                                        cairoImage->GetSize().width, 
                                        cairoImage->GetSize().height));
    program->SetLayerTransform(GetEffectiveTransform());
    program->SetLayerOpacity(GetEffectiveOpacity());
    program->SetRenderOffset(aOffset);
    program->SetTextureUnit(0);
    program->LoadMask(GetMaskLayer());

    mOGLManager->BindAndDrawQuad(program);
  } else if (image->GetFormat() == SHARED_TEXTURE) {
    SharedTextureImage* texImage =
      static_cast<SharedTextureImage*>(image);
    const SharedTextureImage::Data* data = texImage->GetData();
    GLContext::SharedHandleDetails handleDetails;
    if (!gl()->GetSharedHandleDetails(data->mShareType, data->mHandle, handleDetails)) {
      NS_ERROR("Failed to get shared handle details");
      return;
    }

    ShaderProgramOGL* program = mOGLManager->GetProgram(handleDetails.mProgramType, GetMaskLayer());

    program->Activate();
    if (handleDetails.mProgramType == gl::RGBARectLayerProgramType) {
      // 2DRect case, get the multiplier right for a sampler2DRect
      program->SetTexCoordMultiplier(data->mSize.width, data->mSize.height);
    }
    program->SetLayerTransform(GetEffectiveTransform());
    program->SetLayerOpacity(GetEffectiveOpacity());
    program->SetRenderOffset(aOffset);
    program->SetTextureUnit(0);
    program->SetTextureTransform(handleDetails.mTextureTransform);
    program->LoadMask(GetMaskLayer());

    if (!texImage->GetBackendData(LAYERS_OPENGL)) {
      AllocateTextureSharedTexture(texImage, gl(), handleDetails.mTarget);
    }

    ImageOGLBackendData *backendData =
      static_cast<ImageOGLBackendData*>(texImage->GetBackendData(LAYERS_OPENGL));
    gl()->fActiveTexture(LOCAL_GL_TEXTURE0);
    gl()->fBindTexture(handleDetails.mTarget, backendData->mTexture.GetTextureID());

    if (!gl()->AttachSharedHandle(data->mShareType, data->mHandle)) {
      NS_ERROR("Failed to bind shared texture handle");
      return;
    }

    gl()->ApplyFilterToBoundTexture(handleDetails.mTarget, mFilter);
    program->SetLayerQuadRect(nsIntRect(nsIntPoint(0, 0), data->mSize));
    mOGLManager->BindAndDrawQuad(program, data->mInverted);
    gl()->fBindTexture(handleDetails.mTarget, 0);
    gl()->DetachSharedHandle(data->mShareType, data->mHandle);
#ifdef MOZ_WIDGET_GONK
  } else if (image->GetFormat() == GONK_IO_SURFACE) {

    GonkIOSurfaceImage *ioImage = static_cast<GonkIOSurfaceImage*>(image);
    if (!ioImage) {
      return;
    }

    gl()->MakeCurrent();
    gl()->fActiveTexture(LOCAL_GL_TEXTURE0);

    if (!ioImage->GetBackendData(LAYERS_OPENGL)) {
      AllocateTextureIOSurface(ioImage, gl());
    }
    GonkIOSurfaceImageOGLBackendData *data =
      static_cast<GonkIOSurfaceImageOGLBackendData*>(ioImage->GetBackendData(LAYERS_OPENGL));

    gl()->fActiveTexture(LOCAL_GL_TEXTURE0);
    gl()->BindExternalBuffer(data->mTexture.GetTextureID(), ioImage->GetNativeBuffer());

    ShaderProgramOGL *program = mOGLManager->GetProgram(RGBAExternalLayerProgramType, GetMaskLayer());

    gl()->ApplyFilterToBoundTexture(mFilter);

    program->Activate();
    program->SetLayerQuadRect(nsIntRect(0, 0, 
                                        ioImage->GetSize().width, 
                                        ioImage->GetSize().height));
    program->SetLayerTransform(GetEffectiveTransform());
    program->SetLayerOpacity(GetEffectiveOpacity());
    program->SetRenderOffset(aOffset);
    program->SetTextureUnit(0);
    program->LoadMask(GetMaskLayer());

    mOGLManager->BindAndDrawQuadWithTextureRect(program,
                                                GetVisibleRegion().GetBounds(),
                                                nsIntSize(ioImage->GetSize().width,
                                                          ioImage->GetSize().height));
#endif
  }
  GetContainer()->NotifyPaintedImage(image);
}