예제 #1
0
void
nsVideoFrame::PaintVideo(nsIRenderingContext& aRenderingContext,
                         const nsRect& aDirtyRect, nsPoint aPt) 
{
  nsRect area = GetContentRect() - GetPosition() + aPt;
  nsHTMLVideoElement* element = static_cast<nsHTMLVideoElement*>(GetContent());
  nsIntSize videoSize = element->GetVideoSize(nsIntSize(0, 0));
  if (videoSize.width <= 0 || videoSize.height <= 0 || area.IsEmpty())
    return;

  gfxContext* ctx = static_cast<gfxContext*>(aRenderingContext.GetNativeGraphicData(nsIRenderingContext::NATIVE_THEBES_CONTEXT));
  nsPresContext* presContext = PresContext();
  gfxRect r = gfxRect(presContext->AppUnitsToGfxUnits(area.x), 
                      presContext->AppUnitsToGfxUnits(area.y), 
                      presContext->AppUnitsToGfxUnits(area.width), 
                      presContext->AppUnitsToGfxUnits(area.height));

  r = CorrectForAspectRatio(r, videoSize);
  element->Paint(ctx, nsLayoutUtils::GetGraphicsFilterForFrame(this), r);
}
예제 #2
0
nsresult
EMEH264Decoder::GmpInit()
{
  MOZ_ASSERT(IsOnGMPThread());

  nsTArray<nsCString> tags;
  tags.AppendElement(NS_LITERAL_CSTRING("h264"));
  tags.AppendElement(NS_ConvertUTF16toUTF8(mProxy->KeySystem()));
  nsresult rv = mMPS->GetGMPVideoDecoder(&tags,
                                         mProxy->GetOrigin(),
                                         &mHost,
                                         &mGMP);
  NS_ENSURE_SUCCESS(rv, rv);
  MOZ_ASSERT(mHost && mGMP);

  GMPVideoCodec codec;
  memset(&codec, 0, sizeof(codec));

  codec.mGMPApiVersion = kGMPVersion33;

  codec.mCodecType = kGMPVideoCodecH264;
  codec.mWidth = mConfig.display_width;
  codec.mHeight = mConfig.display_height;

  nsTArray<uint8_t> codecSpecific;
  codecSpecific.AppendElement(0); // mPacketizationMode.
  codecSpecific.AppendElements(mConfig.extra_data.begin(),
                               mConfig.extra_data.length());

  rv = mGMP->InitDecode(codec,
                        codecSpecific,
                        this,
                        PR_GetNumberOfProcessors());
  NS_ENSURE_SUCCESS(rv, rv);

  mVideoInfo.mDisplay = nsIntSize(mConfig.display_width, mConfig.display_height);
  mVideoInfo.mHasVideo = true;
  mPictureRegion = nsIntRect(0, 0, mConfig.display_width, mConfig.display_height);

  return NS_OK;
}
예제 #3
0
void
ContentHostTexture::UseTextureHost(const nsTArray<TimedTexture>& aTextures)
{
  ContentHostBase::UseTextureHost(aTextures);
  MOZ_ASSERT(aTextures.Length() == 1);
  const TimedTexture& t = aTextures[0];
  MOZ_ASSERT(t.mPictureRect.IsEqualInterior(
      nsIntRect(nsIntPoint(0, 0), nsIntSize(t.mTexture->GetSize()))),
      "Only default picture rect supported");

  if (t.mTexture != mTextureHost) {
    mReceivedNewHost = true;
  }

  mTextureHost = t.mTexture;
  mTextureHostOnWhite = nullptr;
  mTextureSourceOnWhite = nullptr;
  if (mTextureHost) {
    mTextureHost->PrepareTextureSource(mTextureSource);
  }
}
예제 #4
0
 SVGDrawingParameters(gfxContext* aContext,
                      const nsIntSize& aSize,
                      const ImageRegion& aRegion,
                      Filter aFilter,
                      const Maybe<SVGImageContext>& aSVGContext,
                      float aAnimationTime,
                      uint32_t aFlags)
   : context(aContext)
   , size(aSize.width, aSize.height)
   , region(aRegion)
   , filter(aFilter)
   , svgContext(aSVGContext)
   , viewportSize(aSize)
   , animationTime(aAnimationTime)
   , flags(aFlags)
   , opacity(aSVGContext ? aSVGContext->GetGlobalOpacity() : 1.0)
 {
   if (aSVGContext) {
     CSSIntSize sz = aSVGContext->GetViewportSize();
     viewportSize = nsIntSize(sz.width, sz.height); // XXX losing unit
   }
 }
예제 #5
0
static already_AddRefed<MediaDataDecoder>
CreateTestH264Decoder(layers::LayersBackend aBackend,
                      VideoInfo& aConfig)
{
  aConfig.mMimeType = "video/avc";
  aConfig.mId = 1;
  aConfig.mDuration = 40000;
  aConfig.mMediaTime = 0;
  aConfig.mDisplay = nsIntSize(64, 64);
  aConfig.mImage = nsIntRect(0, 0, 64, 64);
  aConfig.mExtraData = new MediaByteBuffer();
  aConfig.mExtraData->AppendElements(sTestH264ExtraData,
                                     MOZ_ARRAY_LENGTH(sTestH264ExtraData));

  PDMFactory::Init();

  RefPtr<PDMFactory> platform = new PDMFactory();
  RefPtr<MediaDataDecoder> decoder(
    platform->CreateDecoder(aConfig, nullptr, nullptr, aBackend, nullptr));

  return decoder.forget();
}
예제 #6
0
HRESULT
WMFReader::CreateD3DVideoFrame(IMFSample* aSample,
                               int64_t aTimestampUsecs,
                               int64_t aDurationUsecs,
                               int64_t aOffsetBytes,
                               VideoData** aOutVideoData)
{
  NS_ENSURE_TRUE(aSample, E_POINTER);
  NS_ENSURE_TRUE(aOutVideoData, E_POINTER);
  NS_ENSURE_TRUE(mDXVA2Manager, E_ABORT);
  NS_ENSURE_TRUE(mUseHwAccel, E_ABORT);

  *aOutVideoData = nullptr;
  HRESULT hr;

  ImageFormat format = D3D9_RGB32_TEXTURE;
  nsRefPtr<Image> image;
  hr = mDXVA2Manager->CopyToImage(aSample,
                                  nsIntSize(mPictureRegion.width, mPictureRegion.height),
                                  mDecoder->GetImageContainer(),
                                  getter_AddRefs(image));
  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
  NS_ENSURE_TRUE(image, E_FAIL);

  VideoData *v = VideoData::CreateFromImage(mInfo,
                                            mDecoder->GetImageContainer(),
                                            aOffsetBytes,
                                            aTimestampUsecs,
                                            aTimestampUsecs + aDurationUsecs,
                                            image.forget(),
                                            false,
                                            -1,
                                            mPictureRegion);

  NS_ENSURE_TRUE(v, E_FAIL);
  *aOutVideoData = v;

  return S_OK;
}
예제 #7
0
void *
MediaPluginReader::ImageBufferCallback::operator()(size_t aWidth, size_t aHeight,
                                                     MPAPI::ColorFormat aColorFormat)
{
  if (!mImageContainer) {
    NS_WARNING("No image container to construct an image");
    return nullptr;
  }

  nsRefPtr<mozilla::layers::SharedRGBImage> rgbImage;
  switch(aColorFormat) {
    case MPAPI::RGB565:
      rgbImage = mozilla::layers::SharedRGBImage::Create(mImageContainer,
                                                         nsIntSize(aWidth, aHeight),
                                                         gfxASurface::ImageFormatRGB16_565);
      mImage = rgbImage;
      return rgbImage->GetBuffer();
    case MPAPI::YCbCr:
    default:
      NS_NOTREACHED("Color format not supported");
      return nullptr;
  }
}
예제 #8
0
// Copy and return a decoded frame.
nsresult
AppleVDADecoder::OutputFrame(CVPixelBufferRef aImage,
                             AppleVDADecoder::AppleFrameRef aFrameRef)
{
  if (mIsShutDown || mIsFlushing) {
    // We are in the process of flushing or shutting down; ignore frame.
    return NS_OK;
  }

  LOG("mp4 output frame %lld dts %lld pts %lld duration %lld us%s",
      aFrameRef.byte_offset,
      aFrameRef.decode_timestamp.ToMicroseconds(),
      aFrameRef.composition_timestamp.ToMicroseconds(),
      aFrameRef.duration.ToMicroseconds(),
      aFrameRef.is_sync_point ? " keyframe" : ""
  );

  if (mQueuedSamples > mMaxRefFrames) {
    // We had stopped requesting more input because we had received too much at
    // the time. We can ask for more once again.
    mCallback->InputExhausted();
  }
  MOZ_ASSERT(mQueuedSamples);
  mQueuedSamples--;

  if (!aImage) {
    // Image was dropped by decoder.
    return NS_OK;
  }

  // Where our resulting image will end up.
  nsRefPtr<VideoData> data;
  // Bounds.
  VideoInfo info;
  info.mDisplay = nsIntSize(mDisplayWidth, mDisplayHeight);
  gfx::IntRect visible = gfx::IntRect(0,
                                      0,
                                      mPictureWidth,
                                      mPictureHeight);

  if (mUseSoftwareImages) {
    size_t width = CVPixelBufferGetWidth(aImage);
    size_t height = CVPixelBufferGetHeight(aImage);
    DebugOnly<size_t> planes = CVPixelBufferGetPlaneCount(aImage);
    MOZ_ASSERT(planes == 2, "Likely not NV12 format and it must be.");

    VideoData::YCbCrBuffer buffer;

    // Lock the returned image data.
    CVReturn rv = CVPixelBufferLockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);
    if (rv != kCVReturnSuccess) {
      NS_ERROR("error locking pixel data");
      mCallback->Error();
      return NS_ERROR_FAILURE;
    }
    // Y plane.
    buffer.mPlanes[0].mData =
      static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 0));
    buffer.mPlanes[0].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 0);
    buffer.mPlanes[0].mWidth = width;
    buffer.mPlanes[0].mHeight = height;
    buffer.mPlanes[0].mOffset = 0;
    buffer.mPlanes[0].mSkip = 0;
    // Cb plane.
    buffer.mPlanes[1].mData =
      static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1));
    buffer.mPlanes[1].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1);
    buffer.mPlanes[1].mWidth = (width+1) / 2;
    buffer.mPlanes[1].mHeight = (height+1) / 2;
    buffer.mPlanes[1].mOffset = 0;
    buffer.mPlanes[1].mSkip = 1;
    // Cr plane.
    buffer.mPlanes[2].mData =
      static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1));
    buffer.mPlanes[2].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1);
    buffer.mPlanes[2].mWidth = (width+1) / 2;
    buffer.mPlanes[2].mHeight = (height+1) / 2;
    buffer.mPlanes[2].mOffset = 1;
    buffer.mPlanes[2].mSkip = 1;

    // Copy the image data into our own format.
    data =
      VideoData::Create(info,
                        mImageContainer,
                        nullptr,
                        aFrameRef.byte_offset,
                        aFrameRef.composition_timestamp.ToMicroseconds(),
                        aFrameRef.duration.ToMicroseconds(),
                        buffer,
                        aFrameRef.is_sync_point,
                        aFrameRef.decode_timestamp.ToMicroseconds(),
                        visible);
    // Unlock the returned image data.
    CVPixelBufferUnlockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);
  } else {
    IOSurfacePtr surface = MacIOSurfaceLib::CVPixelBufferGetIOSurface(aImage);
    MOZ_ASSERT(surface, "Decoder didn't return an IOSurface backed buffer");

    nsRefPtr<MacIOSurface> macSurface = new MacIOSurface(surface);

    nsRefPtr<layers::Image> image =
      mImageContainer->CreateImage(ImageFormat::MAC_IOSURFACE);
    layers::MacIOSurfaceImage* videoImage =
      static_cast<layers::MacIOSurfaceImage*>(image.get());
    videoImage->SetSurface(macSurface);

    data =
      VideoData::CreateFromImage(info,
                                 mImageContainer,
                                 aFrameRef.byte_offset,
                                 aFrameRef.composition_timestamp.ToMicroseconds(),
                                 aFrameRef.duration.ToMicroseconds(),
                                 image.forget(),
                                 aFrameRef.is_sync_point,
                                 aFrameRef.decode_timestamp.ToMicroseconds(),
                                 visible);
  }

  if (!data) {
    NS_ERROR("Couldn't create VideoData for frame");
    mCallback->Error();
    return NS_ERROR_FAILURE;
  }

  // Frames come out in DTS order but we need to output them
  // in composition order.
  MonitorAutoLock mon(mMonitor);
  mReorderQueue.Push(data);
  while (mReorderQueue.Length() > mMaxRefFrames) {
    mCallback->Output(mReorderQueue.Pop().get());
  }
  LOG("%llu decoded frames queued",
      static_cast<unsigned long long>(mReorderQueue.Length()));

  return NS_OK;
}
// |aTexCoordRect| is the rectangle from the texture that we want to
// draw using the given program.  The program already has a necessary
// offset and scale, so the geometry that needs to be drawn is a unit
// square from 0,0 to 1,1.
//
// |aTexSize| is the actual size of the texture, as it can be larger
// than the rectangle given by |aTexCoordRect|.
void
CompositorOGL::BindAndDrawQuadWithTextureRect(ShaderProgramOGL *aProg,
                                              const Rect& aTexCoordRect,
                                              TextureSource *aTexture)
{
  NS_ASSERTION(aProg->HasInitialized(), "Shader program not correctly initialized");
  GLuint vertAttribIndex =
    aProg->AttribLocation(ShaderProgramOGL::VertexCoordAttrib);
  GLuint texCoordAttribIndex =
    aProg->AttribLocation(ShaderProgramOGL::TexCoordAttrib);
  NS_ASSERTION(texCoordAttribIndex != GLuint(-1), "no texture coords?");

  // Given what we know about these textures and coordinates, we can
  // compute fmod(t, 1.0f) to get the same texture coordinate out.  If
  // the texCoordRect dimension is < 0 or > width/height, then we have
  // wraparound that we need to deal with by drawing multiple quads,
  // because we can't rely on full non-power-of-two texture support
  // (which is required for the REPEAT wrap mode).

  GLContext::RectTriangles rects;

  GLenum wrapMode = aTexture->AsSourceOGL()->GetWrapMode();

  IntSize realTexSize = aTexture->GetSize();
  if (!mGLContext->CanUploadNonPowerOfTwo()) {
    realTexSize = IntSize(NextPowerOfTwo(realTexSize.width),
                          NextPowerOfTwo(realTexSize.height));
  }

  // We need to convert back to actual texels here to get proper behaviour with
  // our GL helper functions. Should fix this sometime.
  // I want to vomit.
  IntRect texCoordRect = IntRect(NS_roundf(aTexCoordRect.x * aTexture->GetSize().width),
                                 NS_roundf(aTexCoordRect.y * aTexture->GetSize().height),
                                 NS_roundf(aTexCoordRect.width * aTexture->GetSize().width),
                                 NS_roundf(aTexCoordRect.height * aTexture->GetSize().height));

  // This is fairly disgusting - if the texture should be flipped it will have a
  // negative height, in which case we un-invert the texture coords and pass the
  // flipped 'flag' to the functions below. We can't just use the inverted coords
  // because our GL funtions use an explicit flag.
  bool flipped = false;
  if (texCoordRect.height < 0) {
    flipped = true;
    texCoordRect.y = texCoordRect.YMost();
    texCoordRect.height = -texCoordRect.height;
  }

  if (wrapMode == LOCAL_GL_REPEAT) {
    rects.addRect(/* dest rectangle */
                  0.0f, 0.0f, 1.0f, 1.0f,
                  /* tex coords */
                  texCoordRect.x / GLfloat(realTexSize.width),
                  texCoordRect.y / GLfloat(realTexSize.height),
                  texCoordRect.XMost() / GLfloat(realTexSize.width),
                  texCoordRect.YMost() / GLfloat(realTexSize.height),
                  flipped);
  } else {
    nsIntRect tcRect(texCoordRect.x, texCoordRect.y,
                     texCoordRect.width, texCoordRect.height);
    GLContext::DecomposeIntoNoRepeatTriangles(tcRect,
                                              nsIntSize(realTexSize.width, realTexSize.height),
                                              rects, flipped);
  }

  DrawWithVertexBuffer2(mGLContext, mVBOs,
                        LOCAL_GL_TRIANGLES, rects.elements(),
                        vertAttribIndex, rects.vertexPointer(),
                        texCoordAttribIndex, rects.texCoordPointer());
}
예제 #10
0
void
ContainerLayerD3D10::RenderLayer()
{
  float renderTargetOffset[] = { 0, 0 };

  nsIntRect visibleRect = mVisibleRegion.GetBounds();
  float opacity = GetEffectiveOpacity();
  bool useIntermediate = UseIntermediateSurface();

  nsRefPtr<ID3D10RenderTargetView> previousRTView;
  nsRefPtr<ID3D10Texture2D> renderTexture;
  nsRefPtr<ID3D10RenderTargetView> rtView;
  float previousRenderTargetOffset[2];
  nsIntSize previousViewportSize;

  gfx3DMatrix oldViewMatrix;

  if (useIntermediate) {
    device()->OMGetRenderTargets(1, getter_AddRefs(previousRTView), NULL);
 
    D3D10_TEXTURE2D_DESC desc;
    memset(&desc, 0, sizeof(D3D10_TEXTURE2D_DESC));
    desc.ArraySize = 1;
    desc.MipLevels = 1;
    desc.Width = visibleRect.width;
    desc.Height = visibleRect.height;
    desc.BindFlags = D3D10_BIND_RENDER_TARGET | D3D10_BIND_SHADER_RESOURCE;
    desc.SampleDesc.Count = 1;
    desc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
    HRESULT hr;
    hr = device()->CreateTexture2D(&desc, NULL, getter_AddRefs(renderTexture));

    if (FAILED(hr)) {
      LayerManagerD3D10::ReportFailure(NS_LITERAL_CSTRING("Failed to create new texture for ContainerLayerD3D10!"), 
                                       hr);
      return;
    }

    hr = device()->CreateRenderTargetView(renderTexture, NULL, getter_AddRefs(rtView));
    NS_ASSERTION(SUCCEEDED(hr), "Failed to create render target view for ContainerLayerD3D10!");

    effect()->GetVariableByName("vRenderTargetOffset")->
      GetRawValue(previousRenderTargetOffset, 0, 8);

    previousViewportSize = mD3DManager->GetViewport();

    if (mVisibleRegion.GetNumRects() != 1 || !(GetContentFlags() & CONTENT_OPAQUE)) {
      const gfx3DMatrix& transform3D = GetEffectiveTransform();
      gfxMatrix transform;
      // If we have an opaque ancestor layer, then we can be sure that
      // all the pixels we draw into are either opaque already or will be
      // covered by something opaque. Otherwise copying up the background is
      // not safe.
      if (mSupportsComponentAlphaChildren) {
        bool is2d = transform3D.Is2D(&transform);
        NS_ASSERTION(is2d, "Transform should be 2d when mSupportsComponentAlphaChildren.");

        // Copy background up from below. This applies any 2D transform that is
        // applied to use relative to our parent, and compensates for the offset
        // that was applied on our parent's rendering.
        D3D10_BOX srcBox;
        srcBox.left = std::max<int32_t>(visibleRect.x + int32_t(transform.x0) - int32_t(previousRenderTargetOffset[0]), 0);
        srcBox.top = std::max<int32_t>(visibleRect.y + int32_t(transform.y0) - int32_t(previousRenderTargetOffset[1]), 0);
        srcBox.right = std::min<int32_t>(srcBox.left + visibleRect.width, previousViewportSize.width);
        srcBox.bottom = std::min<int32_t>(srcBox.top + visibleRect.height, previousViewportSize.height);
        srcBox.back = 1;
        srcBox.front = 0;

        nsRefPtr<ID3D10Resource> srcResource;
        previousRTView->GetResource(getter_AddRefs(srcResource));

        device()->CopySubresourceRegion(renderTexture, 0,
                                        0, 0, 0,
                                        srcResource, 0,
                                        &srcBox);
      } else {
        float black[] = { 0, 0, 0, 0};
        device()->ClearRenderTargetView(rtView, black);
      }
    }

    ID3D10RenderTargetView *rtViewPtr = rtView;
    device()->OMSetRenderTargets(1, &rtViewPtr, NULL);

    renderTargetOffset[0] = (float)visibleRect.x;
    renderTargetOffset[1] = (float)visibleRect.y;
    effect()->GetVariableByName("vRenderTargetOffset")->
      SetRawValue(renderTargetOffset, 0, 8);

    mD3DManager->SetViewport(nsIntSize(visibleRect.Size()));
  }
    
  D3D10_RECT oldD3D10Scissor;
  UINT numRects = 1;
  device()->RSGetScissorRects(&numRects, &oldD3D10Scissor);
  // Convert scissor to an nsIntRect. D3D10_RECT's are exclusive
  // on the bottom and right values.
  nsIntRect oldScissor(oldD3D10Scissor.left,
                       oldD3D10Scissor.top,
                       oldD3D10Scissor.right - oldD3D10Scissor.left,
                       oldD3D10Scissor.bottom - oldD3D10Scissor.top);

  nsAutoTArray<Layer*, 12> children;
  SortChildrenBy3DZOrder(children);

  /*
   * Render this container's contents.
   */
  for (uint32_t i = 0; i < children.Length(); i++) {
    LayerD3D10* layerToRender = static_cast<LayerD3D10*>(children.ElementAt(i)->ImplData());

    if (layerToRender->GetLayer()->GetEffectiveVisibleRegion().IsEmpty()) {
      continue;
    }
    
    nsIntRect scissorRect =
        layerToRender->GetLayer()->CalculateScissorRect(oldScissor, nullptr);
    if (scissorRect.IsEmpty()) {
      continue;
    }

    D3D10_RECT d3drect;
    d3drect.left = scissorRect.x;
    d3drect.top = scissorRect.y;
    d3drect.right = scissorRect.x + scissorRect.width;
    d3drect.bottom = scissorRect.y + scissorRect.height;
    device()->RSSetScissorRects(1, &d3drect);

    layerToRender->RenderLayer();
  }

  device()->RSSetScissorRects(1, &oldD3D10Scissor);

  if (useIntermediate) {
    mD3DManager->SetViewport(previousViewportSize);
    ID3D10RenderTargetView *rtView = previousRTView;
    device()->OMSetRenderTargets(1, &rtView, NULL);
    effect()->GetVariableByName("vRenderTargetOffset")->
      SetRawValue(previousRenderTargetOffset, 0, 8);

    SetEffectTransformAndOpacity();

    ID3D10EffectTechnique *technique;
    if (LoadMaskTexture()) {
      if (GetTransform().CanDraw2D()) {
        technique = SelectShader(SHADER_RGBA | SHADER_PREMUL | SHADER_MASK);
      } else {
        technique = SelectShader(SHADER_RGBA | SHADER_PREMUL | SHADER_MASK_3D);
      }
    } else {
        technique = SelectShader(SHADER_RGBA | SHADER_PREMUL | SHADER_NO_MASK);
    }

    effect()->GetVariableByName("vLayerQuad")->AsVector()->SetFloatVector(
      ShaderConstantRectD3D10(
        (float)visibleRect.x,
        (float)visibleRect.y,
        (float)visibleRect.width,
        (float)visibleRect.height)
      );

    technique->GetPassByIndex(0)->Apply(0);

    ID3D10ShaderResourceView *view;
    device()->CreateShaderResourceView(renderTexture, NULL, &view);
    device()->PSSetShaderResources(0, 1, &view);    
    device()->Draw(4, 0);
    view->Release();
  }
}
예제 #11
0
HRESULT
WMFVideoMFTManager::ConfigureVideoFrameGeometry()
{
  RefPtr<IMFMediaType> mediaType;
  HRESULT hr = mDecoder->GetOutputMediaType(mediaType);
  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);

  // If we enabled/disabled DXVA in response to a resolution
  // change then we need to renegotiate our media types,
  // and resubmit our previous frame (since the MFT appears
  // to lose it otherwise).
  if (mUseHwAccel && !CanUseDXVA(mediaType)) {
    mDXVAEnabled = false;
    if (!Init()) {
      return E_FAIL;
    }

    mDecoder->Input(mLastInput);
    return S_OK;
  }

  // Verify that the video subtype is what we expect it to be.
  // When using hardware acceleration/DXVA2 the video format should
  // be NV12, which is DXVA2's preferred format. For software decoding
  // we use YV12, as that's easier for us to stick into our rendering
  // pipeline than NV12. NV12 has interleaved UV samples, whereas YV12
  // is a planar format.
  GUID videoFormat;
  hr = mediaType->GetGUID(MF_MT_SUBTYPE, &videoFormat);
  NS_ENSURE_TRUE(videoFormat == MFVideoFormat_NV12 || !mUseHwAccel, E_FAIL);
  NS_ENSURE_TRUE(videoFormat == MFVideoFormat_YV12 || mUseHwAccel, E_FAIL);

  nsIntRect pictureRegion;
  hr = GetPictureRegion(mediaType, pictureRegion);
  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);

  UINT32 width = pictureRegion.width;
  UINT32 height = pictureRegion.height;
  mImageSize = nsIntSize(width, height);
  // Calculate and validate the picture region and frame dimensions after
  // scaling by the pixel aspect ratio.
  pictureRegion = mVideoInfo.ScaledImageRect(width, height);
  if (!IsValidVideoRegion(mImageSize, pictureRegion, mVideoInfo.mDisplay)) {
    // Video track's frame sizes will overflow. Ignore the video track.
    return E_FAIL;
  }

  if (mDXVA2Manager) {
    hr = mDXVA2Manager->ConfigureForSize(width, height);
    NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
  }

  // Success! Save state.
  GetDefaultStride(mediaType, width, &mVideoStride);

  LOG("WMFVideoMFTManager frame geometry frame=(%u,%u) stride=%u picture=(%d, %d, %d, %d) display=(%d,%d)",
      width, height,
      mVideoStride,
      pictureRegion.x, pictureRegion.y, pictureRegion.width, pictureRegion.height,
      mVideoInfo.mDisplay.width, mVideoInfo.mDisplay.height);

  return S_OK;
}
예제 #12
0
// Copy and return a decoded frame.
nsresult
AppleVTDecoder::OutputFrame(CVPixelBufferRef aImage,
                            AppleVTDecoder::AppleFrameRef aFrameRef)
{
  if (mIsShutDown || mIsFlushing) {
    // We are in the process of flushing or shutting down; ignore frame.
    return NS_OK;
  }

  LOG("mp4 output frame %lld dts %lld pts %lld duration %lld us%s",
      aFrameRef.byte_offset,
      aFrameRef.decode_timestamp.ToMicroseconds(),
      aFrameRef.composition_timestamp.ToMicroseconds(),
      aFrameRef.duration.ToMicroseconds(),
      aFrameRef.is_sync_point ? " keyframe" : ""
  );

  if (!aImage) {
    // Image was dropped by decoder or none return yet.
    // We need more input to continue.
    mCallback->InputExhausted();
    return NS_OK;
  }

  bool useNullSample = false;
  if (mSeekTargetThreshold.isSome()) {
    if ((aFrameRef.composition_timestamp + aFrameRef.duration) < mSeekTargetThreshold.ref()) {
      useNullSample = true;
    } else {
      mSeekTargetThreshold.reset();
    }
  }

  // Where our resulting image will end up.
  RefPtr<MediaData> data;
  // Bounds.
  VideoInfo info;
  info.mDisplay = nsIntSize(mDisplayWidth, mDisplayHeight);
  gfx::IntRect visible = gfx::IntRect(0,
                                      0,
                                      mPictureWidth,
                                      mPictureHeight);

  if (useNullSample) {
    data = new NullData(aFrameRef.byte_offset,
                        aFrameRef.composition_timestamp.ToMicroseconds(),
                        aFrameRef.duration.ToMicroseconds());
  } else if (mUseSoftwareImages) {
    size_t width = CVPixelBufferGetWidth(aImage);
    size_t height = CVPixelBufferGetHeight(aImage);
    DebugOnly<size_t> planes = CVPixelBufferGetPlaneCount(aImage);
    MOZ_ASSERT(planes == 2, "Likely not NV12 format and it must be.");

    VideoData::YCbCrBuffer buffer;

    // Lock the returned image data.
    CVReturn rv = CVPixelBufferLockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);
    if (rv != kCVReturnSuccess) {
      NS_ERROR("error locking pixel data");
      mCallback->Error(MediaDataDecoderError::DECODE_ERROR);
      return NS_ERROR_FAILURE;
    }
    // Y plane.
    buffer.mPlanes[0].mData =
      static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 0));
    buffer.mPlanes[0].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 0);
    buffer.mPlanes[0].mWidth = width;
    buffer.mPlanes[0].mHeight = height;
    buffer.mPlanes[0].mOffset = 0;
    buffer.mPlanes[0].mSkip = 0;
    // Cb plane.
    buffer.mPlanes[1].mData =
      static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1));
    buffer.mPlanes[1].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1);
    buffer.mPlanes[1].mWidth = (width+1) / 2;
    buffer.mPlanes[1].mHeight = (height+1) / 2;
    buffer.mPlanes[1].mOffset = 0;
    buffer.mPlanes[1].mSkip = 1;
    // Cr plane.
    buffer.mPlanes[2].mData =
      static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1));
    buffer.mPlanes[2].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1);
    buffer.mPlanes[2].mWidth = (width+1) / 2;
    buffer.mPlanes[2].mHeight = (height+1) / 2;
    buffer.mPlanes[2].mOffset = 1;
    buffer.mPlanes[2].mSkip = 1;

    // Copy the image data into our own format.
    data =
      VideoData::CreateAndCopyData(info,
                                   mImageContainer,
                                   aFrameRef.byte_offset,
                                   aFrameRef.composition_timestamp.ToMicroseconds(),
                                   aFrameRef.duration.ToMicroseconds(),
                                   buffer,
                                   aFrameRef.is_sync_point,
                                   aFrameRef.decode_timestamp.ToMicroseconds(),
                                   visible);
    // Unlock the returned image data.
    CVPixelBufferUnlockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);
  } else {
#ifndef MOZ_WIDGET_UIKIT
    IOSurfacePtr surface = MacIOSurfaceLib::CVPixelBufferGetIOSurface(aImage);
    MOZ_ASSERT(surface, "Decoder didn't return an IOSurface backed buffer");

    RefPtr<MacIOSurface> macSurface = new MacIOSurface(surface);

    RefPtr<layers::Image> image = new MacIOSurfaceImage(macSurface);

    data =
      VideoData::CreateFromImage(info,
                                 aFrameRef.byte_offset,
                                 aFrameRef.composition_timestamp.ToMicroseconds(),
                                 aFrameRef.duration.ToMicroseconds(),
                                 image.forget(),
                                 aFrameRef.is_sync_point,
                                 aFrameRef.decode_timestamp.ToMicroseconds(),
                                 visible);
#else
    MOZ_ASSERT_UNREACHABLE("No MacIOSurface on iOS");
#endif
  }

  if (!data) {
    NS_ERROR("Couldn't create VideoData for frame");
    mCallback->Error(MediaDataDecoderError::FATAL_ERROR);
    return NS_ERROR_FAILURE;
  }

  // Frames come out in DTS order but we need to output them
  // in composition order.
  MonitorAutoLock mon(mMonitor);
  mReorderQueue.Push(data);
  if (mReorderQueue.Length() > mMaxRefFrames) {
    mCallback->Output(mReorderQueue.Pop().get());
  }
  mCallback->InputExhausted();
  LOG("%llu decoded frames queued",
      static_cast<unsigned long long>(mReorderQueue.Length()));

  return NS_OK;
}
예제 #13
0
void
TiledContentHost::RenderLayerBuffer(TiledLayerBufferComposite& aLayerBuffer,
                                    const gfxRGBA* aBackgroundColor,
                                    EffectChain& aEffectChain,
                                    float aOpacity,
                                    const gfx::Filter& aFilter,
                                    const gfx::Rect& aClipRect,
                                    nsIntRegion aVisibleRegion,
                                    gfx::Matrix4x4 aTransform)
{
  if (!mCompositor) {
    NS_WARNING("Can't render tiled content host - no compositor");
    return;
  }
  float resolution = aLayerBuffer.GetResolution();
  gfx::Size layerScale(1, 1);

  // We assume that the current frame resolution is the one used in our high
  // precision layer buffer. Compensate for a changing frame resolution when
  // rendering the low precision buffer.
  if (aLayerBuffer.GetFrameResolution() != mTiledBuffer.GetFrameResolution()) {
    const CSSToParentLayerScale& layerResolution = aLayerBuffer.GetFrameResolution();
    const CSSToParentLayerScale& localResolution = mTiledBuffer.GetFrameResolution();
    layerScale.width = layerScale.height = layerResolution.scale / localResolution.scale;
    aVisibleRegion.ScaleRoundOut(layerScale.width, layerScale.height);
  }

  // If we're drawing the low precision buffer, make sure the high precision
  // buffer is masked out to avoid overdraw and rendering artifacts with
  // non-opaque layers.
  nsIntRegion maskRegion;
  if (resolution != mTiledBuffer.GetResolution()) {
    maskRegion = mTiledBuffer.GetValidRegion();
    // XXX This should be ScaleRoundIn, but there is no such function on
    //     nsIntRegion.
    maskRegion.ScaleRoundOut(layerScale.width, layerScale.height);
  }

  // Make sure the resolution and difference in frame resolution are accounted
  // for in the layer transform.
  aTransform.PreScale(1/(resolution * layerScale.width),
                      1/(resolution * layerScale.height), 1);

  uint32_t rowCount = 0;
  uint32_t tileX = 0;
  nsIntRect visibleRect = aVisibleRegion.GetBounds();
  gfx::IntSize scaledTileSize = aLayerBuffer.GetScaledTileSize();
  for (int32_t x = visibleRect.x; x < visibleRect.x + visibleRect.width;) {
    rowCount++;
    int32_t tileStartX = aLayerBuffer.GetTileStart(x, scaledTileSize.width);
    int32_t w = scaledTileSize.width - tileStartX;
    if (x + w > visibleRect.x + visibleRect.width) {
      w = visibleRect.x + visibleRect.width - x;
    }
    int tileY = 0;
    for (int32_t y = visibleRect.y; y < visibleRect.y + visibleRect.height;) {
      int32_t tileStartY = aLayerBuffer.GetTileStart(y, scaledTileSize.height);
      int32_t h = scaledTileSize.height - tileStartY;
      if (y + h > visibleRect.y + visibleRect.height) {
        h = visibleRect.y + visibleRect.height - y;
      }

      TileHost tileTexture = aLayerBuffer.
        GetTile(nsIntPoint(aLayerBuffer.RoundDownToTileEdge(x, scaledTileSize.width),
                           aLayerBuffer.RoundDownToTileEdge(y, scaledTileSize.height)));
      if (tileTexture != aLayerBuffer.GetPlaceholderTile()) {
        nsIntRegion tileDrawRegion;
        tileDrawRegion.And(nsIntRect(x, y, w, h), aLayerBuffer.GetValidRegion());
        tileDrawRegion.And(tileDrawRegion, aVisibleRegion);
        tileDrawRegion.Sub(tileDrawRegion, maskRegion);

        if (!tileDrawRegion.IsEmpty()) {
          tileDrawRegion.ScaleRoundOut(resolution, resolution);
          nsIntPoint tileOffset((x - tileStartX) * resolution,
                                (y - tileStartY) * resolution);
          gfx::IntSize tileSize = aLayerBuffer.GetTileSize();
          RenderTile(tileTexture, aBackgroundColor, aEffectChain, aOpacity, aTransform,
                     aFilter, aClipRect, tileDrawRegion, tileOffset,
                     nsIntSize(tileSize.width, tileSize.height));
        }
      }
      tileY++;
      y += h;
    }
    tileX++;
    x += w;
  }
  gfx::Rect rect(visibleRect.x, visibleRect.y,
                 visibleRect.width, visibleRect.height);
  GetCompositor()->DrawDiagnostics(DiagnosticFlags::CONTENT,
                                   rect, aClipRect, aTransform, mFlashCounter);
}
예제 #14
0
// |aTexCoordRect| is the rectangle from the texture that we want to
// draw using the given program.  The program already has a necessary
// offset and scale, so the geometry that needs to be drawn is a unit
// square from 0,0 to 1,1.
//
// |aTexSize| is the actual size of the texture, as it can be larger
// than the rectangle given by |aTexCoordRect|.
void 
LayerManagerOGL::BindAndDrawQuadWithTextureRect(LayerProgram *aProg,
                                                const nsIntRect& aTexCoordRect,
                                                const nsIntSize& aTexSize,
                                                GLenum aWrapMode /* = LOCAL_GL_REPEAT */,
                                                bool aFlipped /* = false */)
{
  GLuint vertAttribIndex =
    aProg->AttribLocation(LayerProgram::VertexAttrib);
  GLuint texCoordAttribIndex =
    aProg->AttribLocation(LayerProgram::TexCoordAttrib);
  NS_ASSERTION(texCoordAttribIndex != GLuint(-1), "no texture coords?");

  // clear any bound VBO so that glVertexAttribPointer() goes back to
  // "pointer mode"
  mGLContext->fBindBuffer(LOCAL_GL_ARRAY_BUFFER, 0);

  // Given what we know about these textures and coordinates, we can
  // compute fmod(t, 1.0f) to get the same texture coordinate out.  If
  // the texCoordRect dimension is < 0 or > width/height, then we have
  // wraparound that we need to deal with by drawing multiple quads,
  // because we can't rely on full non-power-of-two texture support
  // (which is required for the REPEAT wrap mode).

  GLContext::RectTriangles rects;

  nsIntSize realTexSize = aTexSize;
  if (!mGLContext->CanUploadNonPowerOfTwo()) {
    realTexSize = nsIntSize(NextPowerOfTwo(aTexSize.width),
                            NextPowerOfTwo(aTexSize.height));
  }

  if (aWrapMode == LOCAL_GL_REPEAT) {
    rects.addRect(/* dest rectangle */
                  0.0f, 0.0f, 1.0f, 1.0f,
                  /* tex coords */
                  aTexCoordRect.x / GLfloat(realTexSize.width),
                  aTexCoordRect.y / GLfloat(realTexSize.height),
                  aTexCoordRect.XMost() / GLfloat(realTexSize.width),
                  aTexCoordRect.YMost() / GLfloat(realTexSize.height),
                  aFlipped);
  } else {
    GLContext::DecomposeIntoNoRepeatTriangles(aTexCoordRect, realTexSize,
                                              rects, aFlipped);
  }

  mGLContext->fVertexAttribPointer(vertAttribIndex, 2,
                                   LOCAL_GL_FLOAT, LOCAL_GL_FALSE, 0,
                                   rects.vertexPointer());

  mGLContext->fVertexAttribPointer(texCoordAttribIndex, 2,
                                   LOCAL_GL_FLOAT, LOCAL_GL_FALSE, 0,
                                   rects.texCoordPointer());

  {
    mGLContext->fEnableVertexAttribArray(texCoordAttribIndex);
    {
      mGLContext->fEnableVertexAttribArray(vertAttribIndex);

      mGLContext->fDrawArrays(LOCAL_GL_TRIANGLES, 0, rects.elements());

      mGLContext->fDisableVertexAttribArray(vertAttribIndex);
    }
    mGLContext->fDisableVertexAttribArray(texCoordAttribIndex);
  }
}
예제 #15
0
FFmpegH264Decoder<LIBAV_VER>::DecodeResult
FFmpegH264Decoder<LIBAV_VER>::DoDecodeFrame(MediaRawData* aSample,
                                            uint8_t* aData, int aSize)
{
  MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());

  AVPacket packet;
  av_init_packet(&packet);

  packet.data = aData;
  packet.size = aSize;
  packet.dts = aSample->mTimecode;
  packet.pts = aSample->mTime;
  packet.flags = aSample->mKeyframe ? AV_PKT_FLAG_KEY : 0;
  packet.pos = aSample->mOffset;

  // LibAV provides no API to retrieve the decoded sample's duration.
  // (FFmpeg >= 1.0 provides av_frame_get_pkt_duration)
  // As such we instead use a map using the dts as key that we will retrieve
  // later.
  // The map will have a typical size of 16 entry.
  mDurationMap.Insert(aSample->mTimecode, aSample->mDuration);

  if (!PrepareFrame()) {
    NS_WARNING("FFmpeg h264 decoder failed to allocate frame.");
    mCallback->Error();
    return DecodeResult::DECODE_ERROR;
  }

  // Required with old version of FFmpeg/LibAV
  mFrame->reordered_opaque = AV_NOPTS_VALUE;

  int decoded;
  int bytesConsumed =
    avcodec_decode_video2(mCodecContext, mFrame, &decoded, &packet);

  FFMPEG_LOG("DoDecodeFrame:decode_video: rv=%d decoded=%d "
             "(Input: pts(%lld) dts(%lld) Output: pts(%lld) "
             "opaque(%lld) pkt_pts(%lld) pkt_dts(%lld))",
             bytesConsumed, decoded, packet.pts, packet.dts, mFrame->pts,
             mFrame->reordered_opaque, mFrame->pkt_pts, mFrame->pkt_dts);

  if (bytesConsumed < 0) {
    NS_WARNING("FFmpeg video decoder error.");
    mCallback->Error();
    return DecodeResult::DECODE_ERROR;
  }

  // If we've decoded a frame then we need to output it
  if (decoded) {
    int64_t pts = mPtsContext.GuessCorrectPts(mFrame->pkt_pts, mFrame->pkt_dts);
    FFMPEG_LOG("Got one frame output with pts=%lld opaque=%lld",
               pts, mCodecContext->reordered_opaque);
    // Retrieve duration from dts.
    // We use the first entry found matching this dts (this is done to
    // handle damaged file with multiple frames with the same dts)

    int64_t duration;
    if (!mDurationMap.Find(mFrame->pkt_dts, duration)) {
      NS_WARNING("Unable to retrieve duration from map");
      duration = aSample->mDuration;
      // dts are probably incorrectly reported ; so clear the map as we're
      // unlikely to find them in the future anyway. This also guards
      // against the map becoming extremely big.
      mDurationMap.Clear();
    }

    VideoInfo info;
    info.mDisplay = nsIntSize(mDisplayWidth, mDisplayHeight);

    VideoData::YCbCrBuffer b;
    b.mPlanes[0].mData = mFrame->data[0];
    b.mPlanes[0].mStride = mFrame->linesize[0];
    b.mPlanes[0].mHeight = mFrame->height;
    b.mPlanes[0].mWidth = mFrame->width;
    b.mPlanes[0].mOffset = b.mPlanes[0].mSkip = 0;

    b.mPlanes[1].mData = mFrame->data[1];
    b.mPlanes[1].mStride = mFrame->linesize[1];
    b.mPlanes[1].mHeight = (mFrame->height + 1) >> 1;
    b.mPlanes[1].mWidth = (mFrame->width + 1) >> 1;
    b.mPlanes[1].mOffset = b.mPlanes[1].mSkip = 0;

    b.mPlanes[2].mData = mFrame->data[2];
    b.mPlanes[2].mStride = mFrame->linesize[2];
    b.mPlanes[2].mHeight = (mFrame->height + 1) >> 1;
    b.mPlanes[2].mWidth = (mFrame->width + 1) >> 1;
    b.mPlanes[2].mOffset = b.mPlanes[2].mSkip = 0;

    nsRefPtr<VideoData> v = VideoData::Create(info,
                                              mImageContainer,
                                              aSample->mOffset,
                                              pts,
                                              duration,
                                              b,
                                              !!mFrame->key_frame,
                                              -1,
                                              gfx::IntRect(0, 0, mCodecContext->width, mCodecContext->height));
    if (!v) {
      NS_WARNING("image allocation error.");
      mCallback->Error();
      return DecodeResult::DECODE_ERROR;
    }
    mCallback->Output(v);
    return DecodeResult::DECODE_FRAME;
  }
  return DecodeResult::DECODE_NO_FRAME;
}
예제 #16
0
HRESULT
WMFVideoMFTManager::ConfigureVideoFrameGeometry()
{
  RefPtr<IMFMediaType> mediaType;
  HRESULT hr = mDecoder->GetOutputMediaType(mediaType);
  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);

  // Verify that the video subtype is what we expect it to be.
  // When using hardware acceleration/DXVA2 the video format should
  // be NV12, which is DXVA2's preferred format. For software decoding
  // we use YV12, as that's easier for us to stick into our rendering
  // pipeline than NV12. NV12 has interleaved UV samples, whereas YV12
  // is a planar format.
  GUID videoFormat;
  hr = mediaType->GetGUID(MF_MT_SUBTYPE, &videoFormat);
  NS_ENSURE_TRUE(videoFormat == MFVideoFormat_NV12 || !mUseHwAccel, E_FAIL);
  NS_ENSURE_TRUE(videoFormat == MFVideoFormat_YV12 || mUseHwAccel, E_FAIL);

  nsIntRect pictureRegion;
  hr = GetPictureRegion(mediaType, pictureRegion);
  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);

  UINT32 width = 0, height = 0;
  hr = MFGetAttributeSize(mediaType, MF_MT_FRAME_SIZE, &width, &height);
  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);

  uint32_t aspectNum = 0, aspectDenom = 0;
  hr = MFGetAttributeRatio(mediaType,
                           MF_MT_PIXEL_ASPECT_RATIO,
                           &aspectNum,
                           &aspectDenom);
  NS_ENSURE_TRUE(SUCCEEDED(hr), hr);

  // Calculate and validate the picture region and frame dimensions after
  // scaling by the pixel aspect ratio.
  nsIntSize frameSize = nsIntSize(width, height);
  nsIntSize displaySize = nsIntSize(pictureRegion.width, pictureRegion.height);
  ScaleDisplayByAspectRatio(displaySize, float(aspectNum) / float(aspectDenom));
  if (!IsValidVideoRegion(frameSize, pictureRegion, displaySize)) {
    // Video track's frame sizes will overflow. Ignore the video track.
    return E_FAIL;
  }

  if (mDXVA2Manager) {
    hr = mDXVA2Manager->ConfigureForSize(width, height);
    NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
  }

  // Success! Save state.
  mVideoInfo.mDisplay = displaySize;
  GetDefaultStride(mediaType, &mVideoStride);
  mVideoWidth = width;
  mVideoHeight = height;
  mPictureRegion = pictureRegion;

  LOG("WMFVideoMFTManager frame geometry frame=(%u,%u) stride=%u picture=(%d, %d, %d, %d) display=(%d,%d) PAR=%d:%d",
      width, height,
      mVideoStride,
      mPictureRegion.x, mPictureRegion.y, mPictureRegion.width, mPictureRegion.height,
      displaySize.width, displaySize.height,
      aspectNum, aspectDenom);

  return S_OK;
}
예제 #17
0
 nsIntSize GetSize() {
   if (mTexImage)
     return mTexImage->GetSize();
   return nsIntSize(0, 0);
 }
예제 #18
0
nsresult
MP4Reader::ReadMetadata(MediaInfo* aInfo,
                        MetadataTags** aTags)
{
  if (!mDemuxerInitialized) {
    MonitorAutoLock mon(mDemuxerMonitor);
    bool ok = InvokeAndRetry(this, &MP4Reader::InitDemuxer, mStream, &mDemuxerMonitor);
    NS_ENSURE_TRUE(ok, NS_ERROR_FAILURE);
    mIndexReady = true;

    // To decode, we need valid video and a place to put it.
    mInfo.mVideo.mHasVideo = mVideo.mActive = mDemuxer->HasValidVideo() &&
                                              mDecoder->GetImageContainer();
    if (mVideo.mActive) {
      mVideo.mTrackDemuxer = new MP4VideoDemuxer(mDemuxer);
    }

    mInfo.mAudio.mHasAudio = mAudio.mActive = mDemuxer->HasValidAudio();
    if (mAudio.mActive) {
      mAudio.mTrackDemuxer = new MP4AudioDemuxer(mDemuxer);
    }
    mCrypto = mDemuxer->Crypto();

    {
      MonitorAutoUnlock unlock(mDemuxerMonitor);
      ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
      mInfo.mCrypto.mIsEncrypted = mIsEncrypted = mCrypto.valid;
    }

    // Remember that we've initialized the demuxer, so that if we're decoding
    // an encrypted stream and we need to wait for a CDM to be set, we don't
    // need to reinit the demuxer.
    mDemuxerInitialized = true;
  } else if (mPlatform && !IsWaitingMediaResources()) {
    *aInfo = mInfo;
    *aTags = nullptr;
    return NS_OK;
  }

  if (HasAudio()) {
    const AudioDecoderConfig& audio = mDemuxer->AudioConfig();
    mInfo.mAudio.mRate = audio.samples_per_second;
    mInfo.mAudio.mChannels = audio.channel_count;
    mAudio.mCallback = new DecoderCallback(this, kAudio);
  }

  if (HasVideo()) {
    const VideoDecoderConfig& video = mDemuxer->VideoConfig();
    mInfo.mVideo.mDisplay =
      nsIntSize(video.display_width, video.display_height);
    mVideo.mCallback = new DecoderCallback(this, kVideo);

    // Collect telemetry from h264 AVCC SPS.
    if (!mFoundSPSForTelemetry) {
      mFoundSPSForTelemetry = AccumulateSPSTelemetry(video.extra_data);
    }
  }

  if (mIsEncrypted) {
    nsTArray<uint8_t> initData;
    ExtractCryptoInitData(initData);
    if (initData.Length() == 0) {
      return NS_ERROR_FAILURE;
    }

    mInfo.mCrypto.mInitData = initData;
    mInfo.mCrypto.mType = NS_LITERAL_STRING("cenc");
  }

  // Get the duration, and report it to the decoder if we have it.
  Microseconds duration;
  {
    MonitorAutoLock lock(mDemuxerMonitor);
    duration = mDemuxer->Duration();
  }
  if (duration != -1) {
    ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
    mDecoder->SetMediaDuration(duration);
  }

  *aInfo = mInfo;
  *aTags = nullptr;

  if (!IsWaitingMediaResources() && !IsWaitingOnCDMResource()) {
    NS_ENSURE_TRUE(EnsureDecodersSetup(), NS_ERROR_FAILURE);
  }

  MonitorAutoLock mon(mDemuxerMonitor);
  UpdateIndex();

  return NS_OK;
}
void
ClientTiledPaintedLayer::RenderLayer()
{
  LayerManager::DrawPaintedLayerCallback callback =
    ClientManager()->GetPaintedLayerCallback();
  void *data = ClientManager()->GetPaintedLayerCallbackData();
  if (!callback) {
    ClientManager()->SetTransactionIncomplete();
    return;
  }

  if (!mContentClient) {
    mContentClient = new TiledContentClient(this, ClientManager());

    mContentClient->Connect();
    ClientManager()->AsShadowForwarder()->Attach(mContentClient, this);
    MOZ_ASSERT(mContentClient->GetForwarder());
  }

  if (mContentClient->mTiledBuffer.HasFormatChanged()) {
    mValidRegion = nsIntRegion();
    mContentClient->mTiledBuffer.ResetPaintedAndValidState();
  }

  TILING_LOG("TILING %p: Initial visible region %s\n", this, Stringify(mVisibleRegion).c_str());
  TILING_LOG("TILING %p: Initial valid region %s\n", this, Stringify(mValidRegion).c_str());
  TILING_LOG("TILING %p: Initial low-precision valid region %s\n", this, Stringify(mLowPrecisionValidRegion).c_str());

  nsIntRegion neededRegion = mVisibleRegion;
#ifndef MOZ_IGNORE_PAINT_WILL_RESAMPLE
  // This is handled by PadDrawTargetOutFromRegion in TiledContentClient for mobile
  if (MayResample()) {
    // If we're resampling then bilinear filtering can read up to 1 pixel
    // outside of our texture coords. Make the visible region a single rect,
    // and pad it out by 1 pixel (restricted to tile boundaries) so that
    // we always have valid content or transparent pixels to sample from.
    nsIntRect bounds = neededRegion.GetBounds();
    nsIntRect wholeTiles = bounds;
    wholeTiles.Inflate(nsIntSize(
      gfxPlatform::GetPlatform()->GetTileWidth(),
      gfxPlatform::GetPlatform()->GetTileHeight()));
    nsIntRect padded = bounds;
    padded.Inflate(1);
    padded.IntersectRect(padded, wholeTiles);
    neededRegion = padded;
  }
#endif

  nsIntRegion invalidRegion;
  invalidRegion.Sub(neededRegion, mValidRegion);
  if (invalidRegion.IsEmpty()) {
    EndPaint();
    return;
  }

  if (!ClientManager()->IsRepeatTransaction()) {
    // Only paint the mask layer on the first transaction.
    if (GetMaskLayer()) {
      ToClientLayer(GetMaskLayer())->RenderLayer();
    }

    // In some cases we can take a fast path and just be done with it.
    if (UseFastPath()) {
      TILING_LOG("TILING %p: Taking fast-path\n", this);
      mValidRegion = neededRegion;
      mContentClient->mTiledBuffer.PaintThebes(mValidRegion, invalidRegion, callback, data);
      ClientManager()->Hold(this);
      mContentClient->UseTiledLayerBuffer(TiledContentClient::TILED_BUFFER);
      return;
    }

    // For more complex cases we need to calculate a bunch of metrics before we
    // can do the paint.
    BeginPaint();
    if (mPaintData.mPaintFinished) {
      return;
    }

    // Make sure that tiles that fall outside of the visible region or outside of the
    // critical displayport are discarded on the first update. Also make sure that we
    // only draw stuff inside the critical displayport on the first update.
    mValidRegion.And(mValidRegion, neededRegion);
    if (!mPaintData.mCriticalDisplayPort.IsEmpty()) {
      mValidRegion.And(mValidRegion, LayerIntRect::ToUntyped(mPaintData.mCriticalDisplayPort));
      invalidRegion.And(invalidRegion, LayerIntRect::ToUntyped(mPaintData.mCriticalDisplayPort));
    }

    TILING_LOG("TILING %p: First-transaction valid region %s\n", this, Stringify(mValidRegion).c_str());
    TILING_LOG("TILING %p: First-transaction invalid region %s\n", this, Stringify(invalidRegion).c_str());
  } else {
    if (!mPaintData.mCriticalDisplayPort.IsEmpty()) {
      invalidRegion.And(invalidRegion, LayerIntRect::ToUntyped(mPaintData.mCriticalDisplayPort));
    }
    TILING_LOG("TILING %p: Repeat-transaction invalid region %s\n", this, Stringify(invalidRegion).c_str());
  }

  nsIntRegion lowPrecisionInvalidRegion;
  if (gfxPrefs::UseLowPrecisionBuffer()) {
    // Calculate the invalid region for the low precision buffer. Make sure
    // to remove the valid high-precision area so we don't double-paint it.
    lowPrecisionInvalidRegion.Sub(neededRegion, mLowPrecisionValidRegion);
    lowPrecisionInvalidRegion.Sub(lowPrecisionInvalidRegion, mValidRegion);
  }
  TILING_LOG("TILING %p: Low-precision invalid region %s\n", this, Stringify(lowPrecisionInvalidRegion).c_str());

  bool updatedHighPrecision = RenderHighPrecision(invalidRegion,
                                                  neededRegion,
                                                  callback, data);
  if (updatedHighPrecision) {
    ClientManager()->Hold(this);
    mContentClient->UseTiledLayerBuffer(TiledContentClient::TILED_BUFFER);

    if (!mPaintData.mPaintFinished) {
      // There is still more high-res stuff to paint, so we're not
      // done yet. A subsequent transaction will take care of this.
      ClientManager()->SetRepeatTransaction();
      return;
    }
  }

  // If there is nothing to draw in low-precision, then we're done.
  if (lowPrecisionInvalidRegion.IsEmpty()) {
    EndPaint();
    return;
  }

  if (updatedHighPrecision) {
    // If there are low precision updates, but we just did some high-precision
    // updates, then mark the paint as unfinished and request a repeat transaction.
    // This is so that we don't perform low-precision updates in the same transaction
    // as high-precision updates.
    TILING_LOG("TILING %p: Scheduling repeat transaction for low-precision painting\n", this);
    ClientManager()->SetRepeatTransaction();
    mPaintData.mLowPrecisionPaintCount = 1;
    mPaintData.mPaintFinished = false;
    return;
  }

  bool updatedLowPrecision = RenderLowPrecision(lowPrecisionInvalidRegion,
                                                neededRegion,
                                                callback, data);
  if (updatedLowPrecision) {
    ClientManager()->Hold(this);
    mContentClient->UseTiledLayerBuffer(TiledContentClient::LOW_PRECISION_TILED_BUFFER);

    if (!mPaintData.mPaintFinished) {
      // There is still more low-res stuff to paint, so we're not
      // done yet. A subsequent transaction will take care of this.
      ClientManager()->SetRepeatTransaction();
      return;
    }
  }

  // If we get here, we've done all the high- and low-precision
  // paints we wanted to do, so we can finish the paint and chill.
  EndPaint();
}
예제 #20
0
// Copy and return a decoded frame.
nsresult
AppleVTDecoder::OutputFrame(CVPixelBufferRef aImage,
                            nsAutoPtr<FrameRef> aFrameRef)
{
  size_t width = CVPixelBufferGetWidth(aImage);
  size_t height = CVPixelBufferGetHeight(aImage);
  LOG("  got decoded frame data... %ux%u %s", width, height,
      CVPixelBufferIsPlanar(aImage) ? "planar" : "chunked");
#ifdef DEBUG
  size_t planes = CVPixelBufferGetPlaneCount(aImage);
  for (size_t i = 0; i < planes; ++i) {
    size_t stride = CVPixelBufferGetBytesPerRowOfPlane(aImage, i);
    LOG("     plane %u %ux%u rowbytes %u",
        (unsigned)i,
        CVPixelBufferGetWidthOfPlane(aImage, i),
        CVPixelBufferGetHeightOfPlane(aImage, i),
        (unsigned)stride);
  }
  MOZ_ASSERT(planes == 2);
#endif // DEBUG

  VideoData::YCbCrBuffer buffer;

  // Lock the returned image data.
  CVReturn rv = CVPixelBufferLockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);
  if (rv != kCVReturnSuccess) {
    NS_ERROR("error locking pixel data");
    mCallback->Error();
    return NS_ERROR_FAILURE;
  }
  // Y plane.
  buffer.mPlanes[0].mData =
    static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 0));
  buffer.mPlanes[0].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 0);
  buffer.mPlanes[0].mWidth = width;
  buffer.mPlanes[0].mHeight = height;
  buffer.mPlanes[0].mOffset = 0;
  buffer.mPlanes[0].mSkip = 0;
  // Cb plane.
  buffer.mPlanes[1].mData =
    static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1));
  buffer.mPlanes[1].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1);
  buffer.mPlanes[1].mWidth = (width+1) / 2;
  buffer.mPlanes[1].mHeight = (height+1) / 2;
  buffer.mPlanes[1].mOffset = 0;
  buffer.mPlanes[1].mSkip = 1;
  // Cr plane.
  buffer.mPlanes[2].mData =
    static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1));
  buffer.mPlanes[2].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1);
  buffer.mPlanes[2].mWidth = (width+1) / 2;
  buffer.mPlanes[2].mHeight = (height+1) / 2;
  buffer.mPlanes[2].mOffset = 1;
  buffer.mPlanes[2].mSkip = 1;

  // Bounds.
  VideoInfo info;
  info.mDisplay = nsIntSize(width, height);
  info.mHasVideo = true;
  gfx::IntRect visible = gfx::IntRect(0,
                                      0,
                                      mConfig.display_width,
                                      mConfig.display_height);

  // Copy the image data into our own format.
  nsAutoPtr<VideoData> data;
  data =
    VideoData::Create(info,
                      mImageContainer,
                      nullptr,
                      aFrameRef->byte_offset,
                      aFrameRef->composition_timestamp,
                      aFrameRef->duration,
                      buffer,
                      aFrameRef->is_sync_point,
                      aFrameRef->decode_timestamp,
                      visible);
  // Unlock the returned image data.
  CVPixelBufferUnlockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);

  if (!data) {
    NS_ERROR("Couldn't create VideoData for frame");
    mCallback->Error();
    return NS_ERROR_FAILURE;
  }

  // Frames come out in DTS order but we need to output them
  // in composition order.
  mReorderQueue.Push(data.forget());
  // Assume a frame with a PTS <= current DTS is ready.
  while (mReorderQueue.Length() > 0) {
    VideoData* readyData = mReorderQueue.Pop();
    if (readyData->mTime <= aFrameRef->decode_timestamp) {
      LOG("returning queued frame with pts %lld", readyData->mTime);
      mCallback->Output(readyData);
    } else {
      LOG("requeued frame with pts %lld > %lld",
          readyData->mTime, aFrameRef->decode_timestamp);
      mReorderQueue.Push(readyData);
      break;
    }
  }
  LOG("%llu decoded frames queued",
      static_cast<unsigned long long>(mReorderQueue.Length()));

  return NS_OK;
}
예제 #21
0
void
GLBlitTextureImageHelper::BlitTextureImage(TextureImage *aSrc, const nsIntRect& aSrcRect,
                                           TextureImage *aDst, const nsIntRect& aDstRect)
{
    GLContext *gl = mCompositor->gl();
    NS_ASSERTION(!aSrc->InUpdate(), "Source texture is in update!");
    NS_ASSERTION(!aDst->InUpdate(), "Destination texture is in update!");

    if (!aSrc || !aDst || aSrcRect.IsEmpty() || aDstRect.IsEmpty())
        return;

    int savedFb = 0;
    gl->fGetIntegerv(LOCAL_GL_FRAMEBUFFER_BINDING, &savedFb);

    ScopedGLState scopedScissorTestState(gl, LOCAL_GL_SCISSOR_TEST, false);
    ScopedGLState scopedBlendState(gl, LOCAL_GL_BLEND, false);

    // 2.0 means scale up by two
    float blitScaleX = float(aDstRect.width) / float(aSrcRect.width);
    float blitScaleY = float(aDstRect.height) / float(aSrcRect.height);

    // We start iterating over all destination tiles
    aDst->BeginBigImageIteration();
    do {
        // calculate portion of the tile that is going to be painted to
        nsIntRect dstSubRect;
        nsIntRect dstTextureRect = ThebesIntRect(aDst->GetTileRect());
        dstSubRect.IntersectRect(aDstRect, dstTextureRect);

        // this tile is not part of the destination rectangle aDstRect
        if (dstSubRect.IsEmpty())
            continue;

        // (*) transform the rect of this tile into the rectangle defined by aSrcRect...
        nsIntRect dstInSrcRect(dstSubRect);
        dstInSrcRect.MoveBy(-aDstRect.TopLeft());
        // ...which might be of different size, hence scale accordingly
        dstInSrcRect.ScaleRoundOut(1.0f / blitScaleX, 1.0f / blitScaleY);
        dstInSrcRect.MoveBy(aSrcRect.TopLeft());

        SetBlitFramebufferForDestTexture(aDst->GetTextureID());
        UseBlitProgram();

        aSrc->BeginBigImageIteration();
        // now iterate over all tiles in the source Image...
        do {
            // calculate portion of the source tile that is in the source rect
            nsIntRect srcSubRect;
            nsIntRect srcTextureRect = ThebesIntRect(aSrc->GetTileRect());
            srcSubRect.IntersectRect(aSrcRect, srcTextureRect);

            // this tile is not part of the source rect
            if (srcSubRect.IsEmpty()) {
                continue;
            }
            // calculate intersection of source rect with destination rect
            srcSubRect.IntersectRect(srcSubRect, dstInSrcRect);
            // this tile does not overlap the current destination tile
            if (srcSubRect.IsEmpty()) {
                continue;
            }
            // We now have the intersection of
            //     the current source tile
            // and the desired source rectangle
            // and the destination tile
            // and the desired destination rectange
            // in destination space.
            // We need to transform this back into destination space, inverting the transform from (*)
            nsIntRect srcSubInDstRect(srcSubRect);
            srcSubInDstRect.MoveBy(-aSrcRect.TopLeft());
            srcSubInDstRect.ScaleRoundOut(blitScaleX, blitScaleY);
            srcSubInDstRect.MoveBy(aDstRect.TopLeft());

            // we transform these rectangles to be relative to the current src and dst tiles, respectively
            nsIntSize srcSize = srcTextureRect.Size();
            nsIntSize dstSize = dstTextureRect.Size();
            srcSubRect.MoveBy(-srcTextureRect.x, -srcTextureRect.y);
            srcSubInDstRect.MoveBy(-dstTextureRect.x, -dstTextureRect.y);

            float dx0 = 2.0f * float(srcSubInDstRect.x) / float(dstSize.width) - 1.0f;
            float dy0 = 2.0f * float(srcSubInDstRect.y) / float(dstSize.height) - 1.0f;
            float dx1 = 2.0f * float(srcSubInDstRect.x + srcSubInDstRect.width) / float(dstSize.width) - 1.0f;
            float dy1 = 2.0f * float(srcSubInDstRect.y + srcSubInDstRect.height) / float(dstSize.height) - 1.0f;
            ScopedViewportRect autoViewportRect(gl, 0, 0, dstSize.width, dstSize.height);

            RectTriangles rects;

            nsIntSize realTexSize = srcSize;
            if (!CanUploadNonPowerOfTwo(gl)) {
                realTexSize = nsIntSize(gfx::NextPowerOfTwo(srcSize.width),
                                        gfx::NextPowerOfTwo(srcSize.height));
            }

            if (aSrc->GetWrapMode() == LOCAL_GL_REPEAT) {
                rects.addRect(/* dest rectangle */
                        dx0, dy0, dx1, dy1,
                        /* tex coords */
                        srcSubRect.x / float(realTexSize.width),
                        srcSubRect.y / float(realTexSize.height),
                        srcSubRect.XMost() / float(realTexSize.width),
                        srcSubRect.YMost() / float(realTexSize.height));
            } else {
                DecomposeIntoNoRepeatTriangles(srcSubRect, realTexSize, rects);

                // now put the coords into the d[xy]0 .. d[xy]1 coordinate space
                // from the 0..1 that it comes out of decompose
                InfallibleTArray<RectTriangles::coord>& coords = rects.vertCoords();

                for (unsigned int i = 0; i < coords.Length(); ++i) {
                    coords[i].x = (coords[i].x * (dx1 - dx0)) + dx0;
                    coords[i].y = (coords[i].y * (dy1 - dy0)) + dy0;
                }
            }

            ScopedBindTextureUnit autoTexUnit(gl, LOCAL_GL_TEXTURE0);
            ScopedBindTexture autoTex(gl, aSrc->GetTextureID());
            ScopedVertexAttribPointer autoAttrib0(gl, 0, 2, LOCAL_GL_FLOAT, LOCAL_GL_FALSE, 0, 0, rects.vertCoords().Elements());
            ScopedVertexAttribPointer autoAttrib1(gl, 1, 2, LOCAL_GL_FLOAT, LOCAL_GL_FALSE, 0, 0, rects.texCoords().Elements());

            gl->fDrawArrays(LOCAL_GL_TRIANGLES, 0, rects.elements());

        } while (aSrc->NextTile());
    } while (aDst->NextTile());

    // unbind the previous texture from the framebuffer
    SetBlitFramebufferForDestTexture(0);

    gl->fBindFramebuffer(LOCAL_GL_FRAMEBUFFER, savedFb);
}
예제 #22
0
// Copy and return a decoded frame.
nsresult
AppleVDADecoder::OutputFrame(CVPixelBufferRef aImage,
                             nsAutoPtr<AppleVDADecoder::AppleFrameRef> aFrameRef)
{
  IOSurfacePtr surface = MacIOSurfaceLib::CVPixelBufferGetIOSurface(aImage);
  MOZ_ASSERT(surface, "Decoder didn't return an IOSurface backed buffer");

  LOG("mp4 output frame %lld dts %lld pts %lld duration %lld us%s",
    aFrameRef->byte_offset,
    aFrameRef->decode_timestamp,
    aFrameRef->composition_timestamp,
    aFrameRef->duration,
    aFrameRef->is_sync_point ? " keyframe" : ""
  );

  nsRefPtr<MacIOSurface> macSurface = new MacIOSurface(surface);
  // Bounds.
  VideoInfo info;
  info.mDisplay = nsIntSize(mConfig.display_width, mConfig.display_height);
  info.mHasVideo = true;
  gfx::IntRect visible = gfx::IntRect(0,
                                      0,
                                      mConfig.display_width,
                                      mConfig.display_height);

  nsRefPtr<layers::Image> image =
    mImageContainer->CreateImage(ImageFormat::MAC_IOSURFACE);
  layers::MacIOSurfaceImage* videoImage =
    static_cast<layers::MacIOSurfaceImage*>(image.get());
  videoImage->SetSurface(macSurface);

  nsAutoPtr<VideoData> data;
  data = VideoData::CreateFromImage(info,
                                    mImageContainer,
                                    aFrameRef->byte_offset,
                                    aFrameRef->composition_timestamp,
                                    aFrameRef->duration, image.forget(),
                                    aFrameRef->is_sync_point,
                                    aFrameRef->decode_timestamp,
                                    visible);

  if (!data) {
    NS_ERROR("Couldn't create VideoData for frame");
    mCallback->Error();
    return NS_ERROR_FAILURE;
  }

  // Frames come out in DTS order but we need to output them
  // in composition order.
  mReorderQueue.Push(data.forget());
  // Assume a frame with a PTS <= current DTS is ready.
  while (mReorderQueue.Length() > 0) {
    VideoData* readyData = mReorderQueue.Pop();
    if (readyData->mTime <= aFrameRef->decode_timestamp) {
      LOG("returning queued frame with pts %lld", readyData->mTime);
      mCallback->Output(readyData);
    } else {
      LOG("requeued frame with pts %lld > %lld",
          readyData->mTime, aFrameRef->decode_timestamp);
      mReorderQueue.Push(readyData);
      break;
    }
  }
  LOG("%llu decoded frames queued",
      static_cast<unsigned long long>(mReorderQueue.Length()));

  return NS_OK;
}
예제 #23
0
NS_IMETHODIMP nsViewManager::DispatchEvent(nsGUIEvent *aEvent,
                                           nsIView* aView, nsEventStatus *aStatus)
{
  NS_ASSERTION(!aView || static_cast<nsView*>(aView)->GetViewManager() == this,
               "wrong view manager");

  SAMPLE_LABEL("event", "nsViewManager::DispatchEvent");

  *aStatus = nsEventStatus_eIgnore;

  switch(aEvent->message)
    {
    case NS_SIZE:
      {
        if (aView)
          {
            // client area dimensions are set on the view
            nscoord width = ((nsSizeEvent*)aEvent)->windowSize->width;
            nscoord height = ((nsSizeEvent*)aEvent)->windowSize->height;

            // The root view may not be set if this is the resize associated with
            // window creation

            if (aView == mRootView)
              {
                PRInt32 p2a = AppUnitsPerDevPixel();
                SetWindowDimensions(NSIntPixelsToAppUnits(width, p2a),
                                    NSIntPixelsToAppUnits(height, p2a));
                *aStatus = nsEventStatus_eConsumeNoDefault;
              }
            else if (IsViewForPopup(aView))
              {
                nsXULPopupManager* pm = nsXULPopupManager::GetInstance();
                if (pm)
                  {
                    pm->PopupResized(aView->GetFrame(), nsIntSize(width, height));
                    *aStatus = nsEventStatus_eConsumeNoDefault;
                  }
              }
          }
        }

        break;

    case NS_MOVE:
      {
        // A popup's parent view is the root view for the parent window, so when
        // a popup moves, the popup's frame and view position must be updated
        // to match.
        if (aView && IsViewForPopup(aView))
          {
            nsXULPopupManager* pm = nsXULPopupManager::GetInstance();
            if (pm)
              {
                pm->PopupMoved(aView->GetFrame(), aEvent->refPoint);
                *aStatus = nsEventStatus_eConsumeNoDefault;
              }
          }
        break;
      }

    case NS_DONESIZEMOVE:
      {
        if (mPresShell) {
          nsPresContext* presContext = mPresShell->GetPresContext();
          if (presContext) {
            nsEventStateManager::ClearGlobalActiveContent(nsnull);
          }

        }

        nsIPresShell::ClearMouseCapture(nsnull);
      }
      break;
  
    case NS_XUL_CLOSE:
      {
        // if this is a popup, make a request to hide it. Note that a popuphidden
        // event listener may cancel the event and the popup will not be hidden.
        nsIWidget* widget = aView->GetWidget();
        if (widget) {
          nsWindowType type;
          widget->GetWindowType(type);
          if (type == eWindowType_popup) {
            nsXULPopupManager* pm = nsXULPopupManager::GetInstance();
            if (pm) {
              pm->HidePopup(aView->GetFrame());
              *aStatus = nsEventStatus_eConsumeNoDefault;
            }
          }
        }
      }
      break;

    case NS_WILL_PAINT:
      {
        if (!aView || !mContext)
          break;

        *aStatus = nsEventStatus_eConsumeNoDefault;

        nsPaintEvent *event = static_cast<nsPaintEvent*>(aEvent);

        NS_ASSERTION(static_cast<nsView*>(aView) ==
                       nsView::GetViewFor(event->widget),
                     "view/widget mismatch");

        // If an ancestor widget was hidden and then shown, we could
        // have a delayed resize to handle.
        for (nsViewManager *vm = this; vm;
             vm = vm->mRootView->GetParent()
                    ? vm->mRootView->GetParent()->GetViewManager()
                    : nsnull) {
          if (vm->mDelayedResize != nsSize(NSCOORD_NONE, NSCOORD_NONE) &&
              vm->mRootView->IsEffectivelyVisible() &&
              mPresShell && mPresShell->IsVisible()) {
            vm->FlushDelayedResize(true);
            vm->InvalidateView(vm->mRootView);
          }
        }

        // Flush things like reflows and plugin widget geometry updates by
        // calling WillPaint on observer presShells.
        nsRefPtr<nsViewManager> rootVM = RootViewManager();
        if (mPresShell) {
          rootVM->CallWillPaintOnObservers(event->willSendDidPaint);
        }
        // Flush view widget geometry updates and invalidations.
        rootVM->ProcessPendingUpdates();
      }
      break;

    case NS_PAINT:
      {
        if (!aView || !mContext)
          break;

        *aStatus = nsEventStatus_eConsumeNoDefault;
        nsPaintEvent *event = static_cast<nsPaintEvent*>(aEvent);
        nsView* view = static_cast<nsView*>(aView);
        NS_ASSERTION(view == nsView::GetViewFor(event->widget),
                     "view/widget mismatch");
        NS_ASSERTION(IsPaintingAllowed(),
                     "shouldn't be receiving paint events while painting is "
                     "disallowed!");

        if (!event->didSendWillPaint) {
          // Send NS_WILL_PAINT event ourselves.
          nsPaintEvent willPaintEvent(true, NS_WILL_PAINT, event->widget);
          willPaintEvent.willSendDidPaint = event->willSendDidPaint;
          DispatchEvent(&willPaintEvent, view, aStatus);

          // Get the view pointer again since NS_WILL_PAINT might have
          // destroyed it during CallWillPaintOnObservers (bug 378273).
          view = nsView::GetViewFor(event->widget);
        }

        if (!view || event->region.IsEmpty())
          break;

        // Paint.
        Refresh(view, event->region, event->willSendDidPaint);

        break;
      }

    case NS_DID_PAINT: {
      nsRefPtr<nsViewManager> rootVM = RootViewManager();
      rootVM->CallDidPaintOnObserver();
      break;
    }

    case NS_CREATE:
    case NS_DESTROY:
    case NS_SETZLEVEL:
      /* Don't pass these events through. Passing them through
         causes performance problems on pages with lots of views/frames 
         @see bug 112861 */
      *aStatus = nsEventStatus_eConsumeNoDefault;
      break;

    case NS_DISPLAYCHANGED:

      //Destroy the cached backbuffer to force a new backbuffer
      //be constructed with the appropriate display depth.
      //@see bugzilla bug 6061
      *aStatus = nsEventStatus_eConsumeDoDefault;
      break;

    case NS_SYSCOLORCHANGED:
      {
        if (mPresShell) {
          // Hold a refcount to the presshell. The continued existence of the observer will
          // delay deletion of this view hierarchy should the event want to cause its
          // destruction in, say, some JavaScript event handler.
          nsCOMPtr<nsIPresShell> presShell = mPresShell;
          presShell->HandleEvent(aView->GetFrame(), aEvent, false, aStatus);
        }
      }
      break; 

    default:
      {
        if ((NS_IS_MOUSE_EVENT(aEvent) &&
             // Ignore mouse events that we synthesize.
             static_cast<nsMouseEvent*>(aEvent)->reason ==
               nsMouseEvent::eReal &&
             // Ignore mouse exit and enter (we'll get moves if the user
             // is really moving the mouse) since we get them when we
             // create and destroy widgets.
             aEvent->message != NS_MOUSE_EXIT &&
             aEvent->message != NS_MOUSE_ENTER) ||
            NS_IS_KEY_EVENT(aEvent) ||
            NS_IS_IME_EVENT(aEvent) ||
            aEvent->message == NS_PLUGIN_INPUT_EVENT) {
          gLastUserEventTime = PR_IntervalToMicroseconds(PR_IntervalNow());
        }

        if (aEvent->message == NS_DEACTIVATE) {
          // if a window is deactivated, clear the mouse capture regardless
          // of what is capturing
          nsIPresShell::ClearMouseCapture(nsnull);
        }

        // Find the view whose coordinates system we're in.
        nsIView* view = aView;
        bool dispatchUsingCoordinates = NS_IsEventUsingCoordinates(aEvent);
        if (dispatchUsingCoordinates) {
          // Will dispatch using coordinates. Pretty bogus but it's consistent
          // with what presshell does.
          view = GetDisplayRootFor(view);
        }
  
        // If the view has no frame, look for a view that does.
        nsIFrame* frame = view->GetFrame();
        if (!frame &&
            (dispatchUsingCoordinates || NS_IS_KEY_EVENT(aEvent) ||
             NS_IS_IME_RELATED_EVENT(aEvent) ||
             NS_IS_NON_RETARGETED_PLUGIN_EVENT(aEvent) ||
             aEvent->message == NS_PLUGIN_ACTIVATE ||
             aEvent->message == NS_PLUGIN_FOCUS)) {
          while (view && !view->GetFrame()) {
            view = view->GetParent();
          }

          if (view) {
            frame = view->GetFrame();
          }
        }

        if (nsnull != frame) {
          // Hold a refcount to the presshell. The continued existence of the
          // presshell will delay deletion of this view hierarchy should the event
          // want to cause its destruction in, say, some JavaScript event handler.
          nsCOMPtr<nsIPresShell> shell = view->GetViewManager()->GetPresShell();
          if (shell) {
            shell->HandleEvent(frame, aEvent, false, aStatus);
          }
        }
    
        break;
      }
    }

  return NS_OK;
}
예제 #24
0
FFmpegH264Decoder<LIBAV_VER>::DecodeResult
FFmpegH264Decoder<LIBAV_VER>::DoDecodeFrame(MediaRawData* aSample,
                                            uint8_t* aData, int aSize)
{
  AVPacket packet;
  av_init_packet(&packet);

  packet.data = aData;
  packet.size = aSize;
  packet.dts = aSample->mTimecode;
  packet.pts = aSample->mTime;
  packet.flags = aSample->mKeyframe ? AV_PKT_FLAG_KEY : 0;
  packet.pos = aSample->mOffset;

  if (!PrepareFrame()) {
    NS_WARNING("FFmpeg h264 decoder failed to allocate frame.");
    mCallback->Error();
    return DecodeResult::DECODE_ERROR;
  }

  // Required with old version of FFmpeg/LibAV
  mFrame->reordered_opaque = AV_NOPTS_VALUE;

  int decoded;
  int bytesConsumed =
    avcodec_decode_video2(mCodecContext, mFrame, &decoded, &packet);

  FFMPEG_LOG("DoDecodeFrame:decode_video: rv=%d decoded=%d "
             "(Input: pts(%lld) dts(%lld) Output: pts(%lld) "
             "opaque(%lld) pkt_pts(%lld) pkt_dts(%lld))",
             bytesConsumed, decoded, packet.pts, packet.dts, mFrame->pts,
             mFrame->reordered_opaque, mFrame->pkt_pts, mFrame->pkt_dts);

  if (bytesConsumed < 0) {
    NS_WARNING("FFmpeg video decoder error.");
    mCallback->Error();
    return DecodeResult::DECODE_ERROR;
  }

  // If we've decoded a frame then we need to output it
  if (decoded) {
    int64_t pts = GetPts(packet);
    FFMPEG_LOG("Got one frame output with pts=%lld opaque=%lld",
               pts, mCodecContext->reordered_opaque);

    VideoInfo info;
    info.mDisplay = nsIntSize(mDisplayWidth, mDisplayHeight);

    VideoData::YCbCrBuffer b;
    b.mPlanes[0].mData = mFrame->data[0];
    b.mPlanes[0].mStride = mFrame->linesize[0];
    b.mPlanes[0].mHeight = mFrame->height;
    b.mPlanes[0].mWidth = mFrame->width;
    b.mPlanes[0].mOffset = b.mPlanes[0].mSkip = 0;

    b.mPlanes[1].mData = mFrame->data[1];
    b.mPlanes[1].mStride = mFrame->linesize[1];
    b.mPlanes[1].mHeight = (mFrame->height + 1) >> 1;
    b.mPlanes[1].mWidth = (mFrame->width + 1) >> 1;
    b.mPlanes[1].mOffset = b.mPlanes[1].mSkip = 0;

    b.mPlanes[2].mData = mFrame->data[2];
    b.mPlanes[2].mStride = mFrame->linesize[2];
    b.mPlanes[2].mHeight = (mFrame->height + 1) >> 1;
    b.mPlanes[2].mWidth = (mFrame->width + 1) >> 1;
    b.mPlanes[2].mOffset = b.mPlanes[2].mSkip = 0;

    nsRefPtr<VideoData> v = VideoData::Create(info,
                                              mImageContainer,
                                              aSample->mOffset,
                                              pts,
                                              aSample->mDuration,
                                              b,
                                              aSample->mKeyframe,
                                              -1,
                                              gfx::IntRect(0, 0, mCodecContext->width, mCodecContext->height));
    if (!v) {
      NS_WARNING("image allocation error.");
      mCallback->Error();
      return DecodeResult::DECODE_ERROR;
    }
    mCallback->Output(v);
    return DecodeResult::DECODE_FRAME;
  }
  return DecodeResult::DECODE_NO_FRAME;
}
예제 #25
0
void
TiledContentHost::RenderLayerBuffer(TiledLayerBufferComposite& aLayerBuffer,
                                    const nsIntRegion& aValidRegion,
                                    EffectChain& aEffectChain,
                                    float aOpacity,
                                    const gfx::Point& aOffset,
                                    const gfx::Filter& aFilter,
                                    const gfx::Rect& aClipRect,
                                    const nsIntRegion& aMaskRegion,
                                    nsIntRect aVisibleRect,
                                    gfx::Matrix4x4 aTransform)
{
  float resolution = aLayerBuffer.GetResolution();
  gfxSize layerScale(1, 1);
  // We assume that the current frame resolution is the one used in our primary
  // layer buffer. Compensate for a changing frame resolution.
  if (aLayerBuffer.GetFrameResolution() != mVideoMemoryTiledBuffer.GetFrameResolution()) {
    const gfxSize& layerResolution = aLayerBuffer.GetFrameResolution();
    const gfxSize& localResolution = mVideoMemoryTiledBuffer.GetFrameResolution();
    layerScale.width = layerResolution.width / localResolution.width;
    layerScale.height = layerResolution.height / localResolution.height;
    aVisibleRect.ScaleRoundOut(layerScale.width, layerScale.height);
  }
  aTransform.Scale(1/(resolution * layerScale.width),
                   1/(resolution * layerScale.height), 1);

  uint32_t rowCount = 0;
  uint32_t tileX = 0;
  for (int32_t x = aVisibleRect.x; x < aVisibleRect.x + aVisibleRect.width;) {
    rowCount++;
    int32_t tileStartX = aLayerBuffer.GetTileStart(x);
    int32_t w = aLayerBuffer.GetScaledTileLength() - tileStartX;
    if (x + w > aVisibleRect.x + aVisibleRect.width) {
      w = aVisibleRect.x + aVisibleRect.width - x;
    }
    int tileY = 0;
    for (int32_t y = aVisibleRect.y; y < aVisibleRect.y + aVisibleRect.height;) {
      int32_t tileStartY = aLayerBuffer.GetTileStart(y);
      int32_t h = aLayerBuffer.GetScaledTileLength() - tileStartY;
      if (y + h > aVisibleRect.y + aVisibleRect.height) {
        h = aVisibleRect.y + aVisibleRect.height - y;
      }

      TiledTexture tileTexture = aLayerBuffer.
        GetTile(nsIntPoint(aLayerBuffer.RoundDownToTileEdge(x),
                           aLayerBuffer.RoundDownToTileEdge(y)));
      if (tileTexture != aLayerBuffer.GetPlaceholderTile()) {
        nsIntRegion tileDrawRegion;
        tileDrawRegion.And(aValidRegion,
                           nsIntRect(x * layerScale.width,
                                     y * layerScale.height,
                                     w * layerScale.width,
                                     h * layerScale.height));
        tileDrawRegion.Sub(tileDrawRegion, aMaskRegion);

        if (!tileDrawRegion.IsEmpty()) {
          tileDrawRegion.ScaleRoundOut(resolution / layerScale.width,
                                       resolution / layerScale.height);

          nsIntPoint tileOffset((x - tileStartX) * resolution,
                                (y - tileStartY) * resolution);
          uint32_t tileSize = aLayerBuffer.GetTileLength();
          RenderTile(tileTexture, aEffectChain, aOpacity, aTransform, aOffset, aFilter, aClipRect, tileDrawRegion,
                     tileOffset, nsIntSize(tileSize, tileSize));
        }
      }
      tileY++;
      y += h;
    }
    tileX++;
    x += w;
  }
}
예제 #26
0
void
ThebesLayerD3D10::FillTexturesBlackWhite(const nsIntRegion& aRegion, const nsIntPoint& aOffset)
{
  if (mTexture && mTextureOnWhite) {
    // It would be more optimal to draw the actual geometry, but more code
    // and probably not worth the win here as this will often be a single
    // rect.
    nsRefPtr<ID3D10RenderTargetView> oldRT;
    device()->OMGetRenderTargets(1, getter_AddRefs(oldRT), NULL);

    nsRefPtr<ID3D10RenderTargetView> viewBlack;
    nsRefPtr<ID3D10RenderTargetView> viewWhite;
    device()->CreateRenderTargetView(mTexture, NULL, getter_AddRefs(viewBlack));
    device()->CreateRenderTargetView(mTextureOnWhite, NULL, getter_AddRefs(viewWhite));

    D3D10_RECT oldScissor;
    UINT numRects = 1;
    device()->RSGetScissorRects(&numRects, &oldScissor);

    D3D10_TEXTURE2D_DESC desc;
    mTexture->GetDesc(&desc);

    D3D10_RECT scissor = { 0, 0, desc.Width, desc.Height };
    device()->RSSetScissorRects(1, &scissor);

    mD3DManager->SetupInputAssembler();
    nsIntSize oldVP = mD3DManager->GetViewport();

    mD3DManager->SetViewport(nsIntSize(desc.Width, desc.Height));

    ID3D10RenderTargetView *views[2] = { viewBlack, viewWhite };
    device()->OMSetRenderTargets(2, views, NULL);

    gfx3DMatrix transform;
    transform.Translate(gfxPoint3D(-aOffset.x, -aOffset.y, 0));
    void* raw = &const_cast<gfx3DMatrix&>(transform)._11;
    effect()->GetVariableByName("mLayerTransform")->SetRawValue(raw, 0, 64);

    ID3D10EffectTechnique *technique =
      effect()->GetTechniqueByName("PrepareAlphaExtractionTextures");

    nsIntRegionRectIterator iter(aRegion);

    const nsIntRect *iterRect;
    while ((iterRect = iter.Next())) {
      effect()->GetVariableByName("vLayerQuad")->AsVector()->SetFloatVector(
        ShaderConstantRectD3D10(
          (float)iterRect->x,
          (float)iterRect->y,
          (float)iterRect->width,
          (float)iterRect->height)
        );

      technique->GetPassByIndex(0)->Apply(0);
      device()->Draw(4, 0);
    }

    views[0] = oldRT;
    device()->OMSetRenderTargets(1, views, NULL);
    mD3DManager->SetViewport(oldVP);
    device()->RSSetScissorRects(1, &oldScissor);
  }
}