コード例 #1
0
GonkVideoDecoderManager::GonkVideoDecoderManager(
  MediaTaskQueue* aTaskQueue,
  mozilla::layers::ImageContainer* aImageContainer,
  const mp4_demuxer::VideoDecoderConfig& aConfig)
  : GonkDecoderManager(aTaskQueue)
  , mImageContainer(aImageContainer)
  , mReaderCallback(nullptr)
  , mColorConverterBufferSize(0)
  , mNativeWindow(nullptr)
  , mPendingVideoBuffersLock("GonkVideoDecoderManager::mPendingVideoBuffersLock")
{
  NS_ASSERTION(!NS_IsMainThread(), "Should not be on main thread.");
  MOZ_ASSERT(mImageContainer);
  MOZ_COUNT_CTOR(GonkVideoDecoderManager);
  mVideoWidth  = aConfig.display_width;
  mVideoHeight = aConfig.display_height;
  mDisplayWidth = aConfig.display_width;
  mDisplayHeight = aConfig.display_height;
  mInfo.mVideo.mHasVideo = true;
  nsIntSize displaySize(mDisplayWidth, mDisplayHeight);
  mInfo.mVideo.mDisplay = displaySize;

  nsIntRect pictureRect(0, 0, mVideoWidth, mVideoHeight);
  nsIntSize frameSize(mVideoWidth, mVideoHeight);
  mPicture = pictureRect;
  mInitialFrame = frameSize;
  mHandler = new MessageHandler(this);
  mVideoListener = new VideoResourceListener(this);

}
コード例 #2
0
nsresult MediaOmxReader::ReadMetadata(VideoInfo* aInfo,
                                        MetadataTags** aTags)
{
  NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");

  *aTags = nullptr;

  if (!mOmxDecoder.get()) {
    mOmxDecoder = new OmxDecoder(mDecoder->GetResource(), mDecoder);
    if (!mOmxDecoder->Init()) {
      return NS_ERROR_FAILURE;
    }
  }

  // Set the total duration (the max of the audio and video track).
  int64_t durationUs;
  mOmxDecoder->GetDuration(&durationUs);
  if (durationUs) {
    ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
    mDecoder->SetMediaDuration(durationUs);
  }

  if (mOmxDecoder->HasVideo()) {
    int32_t width, height;
    mOmxDecoder->GetVideoParameters(&width, &height);
    nsIntRect pictureRect(0, 0, width, height);

    // Validate the container-reported frame and pictureRect sizes. This ensures
    // that our video frame creation code doesn't overflow.
    nsIntSize displaySize(width, height);
    nsIntSize frameSize(width, height);
    if (!VideoInfo::ValidateVideoRegion(frameSize, pictureRect, displaySize)) {
      return NS_ERROR_FAILURE;
    }

    // Video track's frame sizes will not overflow. Activate the video track.
    mHasVideo = mInfo.mHasVideo = true;
    mInfo.mDisplay = displaySize;
    mPicture = pictureRect;
    mInitialFrame = frameSize;
    VideoFrameContainer* container = mDecoder->GetVideoFrameContainer();
    if (container) {
      container->SetCurrentFrame(gfxIntSize(displaySize.width, displaySize.height),
                                 nullptr,
                                 mozilla::TimeStamp::Now());
    }
  }

  if (mOmxDecoder->HasAudio()) {
    int32_t numChannels, sampleRate;
    mOmxDecoder->GetAudioParameters(&numChannels, &sampleRate);
    mHasAudio = mInfo.mHasAudio = true;
    mInfo.mAudioChannels = numChannels;
    mInfo.mAudioRate = sampleRate;
  }

 *aInfo = mInfo;

  return NS_OK;
}
コード例 #3
0
GonkVideoDecoderManager::GonkVideoDecoderManager(
  mozilla::layers::ImageContainer* aImageContainer,
  const VideoInfo& aConfig)
  : mImageContainer(aImageContainer)
  , mReaderCallback(nullptr)
  , mLastDecodedTime(0)
  , mColorConverterBufferSize(0)
  , mNativeWindow(nullptr)
  , mPendingVideoBuffersLock("GonkVideoDecoderManager::mPendingVideoBuffersLock")
  , mMonitor("GonkVideoDecoderManager")
{
  MOZ_COUNT_CTOR(GonkVideoDecoderManager);
  mMimeType = aConfig.mMimeType;
  mVideoWidth  = aConfig.mDisplay.width;
  mVideoHeight = aConfig.mDisplay.height;
  mDisplayWidth = aConfig.mDisplay.width;
  mDisplayHeight = aConfig.mDisplay.height;
  mInfo.mVideo = aConfig;

  mCodecSpecificData = aConfig.mCodecSpecificConfig;
  nsIntRect pictureRect(0, 0, mVideoWidth, mVideoHeight);
  nsIntSize frameSize(mVideoWidth, mVideoHeight);
  mPicture = pictureRect;
  mInitialFrame = frameSize;
  mHandler = new MessageHandler(this);
  mVideoListener = new VideoResourceListener(this);

}
コード例 #4
0
RefPtr<MediaDataDecoder::InitPromise>
GonkVideoDecoderManager::Init()
{
  nsIntSize displaySize(mDisplayWidth, mDisplayHeight);
  nsIntRect pictureRect(0, 0, mVideoWidth, mVideoHeight);

  uint32_t maxWidth, maxHeight;
  char propValue[PROPERTY_VALUE_MAX];
  property_get("ro.moz.omx.hw.max_width", propValue, "-1");
  maxWidth = -1 == atoi(propValue) ? MAX_VIDEO_WIDTH : atoi(propValue);
  property_get("ro.moz.omx.hw.max_height", propValue, "-1");
  maxHeight = -1 == atoi(propValue) ? MAX_VIDEO_HEIGHT : atoi(propValue) ;

  if (mVideoWidth * mVideoHeight > maxWidth * maxHeight) {
    GVDM_LOG("Video resolution exceeds hw codec capability");
    return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__);
  }

  // Validate the container-reported frame and pictureRect sizes. This ensures
  // that our video frame creation code doesn't overflow.
  nsIntSize frameSize(mVideoWidth, mVideoHeight);
  if (!IsValidVideoRegion(frameSize, pictureRect, displaySize)) {
    GVDM_LOG("It is not a valid region");
    return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__);
  }

  mReaderTaskQueue = AbstractThread::GetCurrent()->AsTaskQueue();
  MOZ_ASSERT(mReaderTaskQueue);

  if (mDecodeLooper.get() != nullptr) {
    return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__);
  }

  if (!InitLoopers(MediaData::VIDEO_DATA)) {
    return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__);
  }

  RefPtr<InitPromise> p = mInitPromise.Ensure(__func__);
  android::sp<GonkVideoDecoderManager> self = this;
  mVideoCodecRequest.Begin(mVideoListener->Init()
    ->Then(mReaderTaskQueue, __func__,
      [self] (bool) -> void {
        self->mVideoCodecRequest.Complete();
        self->codecReserved();
      }, [self] (bool) -> void {
        self->mVideoCodecRequest.Complete();
        self->codecCanceled();
      }));
  mDecoder = MediaCodecProxy::CreateByType(mDecodeLooper, mMimeType.get(), false, mVideoListener);
  mDecoder->AsyncAskMediaCodec();

  uint32_t capability = MediaCodecProxy::kEmptyCapability;
  if (mDecoder->getCapability(&capability) == OK && (capability &
      MediaCodecProxy::kCanExposeGraphicBuffer)) {
    mNativeWindow = new GonkNativeWindow();
  }

  return p;
}
コード例 #5
0
ファイル: GStreamerReader.cpp プロジェクト: msliu/gecko-dev
void GStreamerReader::VideoPreroll()
{
  /* The first video buffer has reached the video sink. Get width and height */
  LOG(PR_LOG_DEBUG, "Video preroll");
  GstPad* sinkpad = gst_element_get_static_pad(GST_ELEMENT(mVideoAppSink), "sink");
  int PARNumerator, PARDenominator;
#if GST_VERSION_MAJOR >= 1
  GstCaps* caps = gst_pad_get_current_caps(sinkpad);
  memset (&mVideoInfo, 0, sizeof (mVideoInfo));
  gst_video_info_from_caps(&mVideoInfo, caps);
  mFormat = mVideoInfo.finfo->format;
  mPicture.width = mVideoInfo.width;
  mPicture.height = mVideoInfo.height;
  PARNumerator = GST_VIDEO_INFO_PAR_N(&mVideoInfo);
  PARDenominator = GST_VIDEO_INFO_PAR_D(&mVideoInfo);
#else
  GstCaps* caps = gst_pad_get_negotiated_caps(sinkpad);
  gst_video_format_parse_caps(caps, &mFormat, &mPicture.width, &mPicture.height);
  if (!gst_video_parse_caps_pixel_aspect_ratio(caps, &PARNumerator, &PARDenominator)) {
    PARNumerator = 1;
    PARDenominator = 1;
  }
#endif
  NS_ASSERTION(mPicture.width && mPicture.height, "invalid video resolution");

  // Calculate display size according to pixel aspect ratio.
  nsIntRect pictureRect(0, 0, mPicture.width, mPicture.height);
  nsIntSize frameSize = nsIntSize(mPicture.width, mPicture.height);
  nsIntSize displaySize = nsIntSize(mPicture.width, mPicture.height);
  ScaleDisplayByAspectRatio(displaySize, float(PARNumerator) / float(PARDenominator));

  // If video frame size is overflow, stop playing.
  if (IsValidVideoRegion(frameSize, pictureRect, displaySize)) {
    GstStructure* structure = gst_caps_get_structure(caps, 0);
    gst_structure_get_fraction(structure, "framerate", &fpsNum, &fpsDen);
    mInfo.mVideo.mDisplay = ThebesIntSize(displaySize.ToIntSize());
    mInfo.mVideo.mHasVideo = true;
  } else {
    LOG(PR_LOG_DEBUG, "invalid video region");
    Eos();
  }
  gst_caps_unref(caps);
  gst_object_unref(sinkpad);
}
コード例 #6
0
nsRefPtr<MediaDataDecoder::InitPromise>
GonkVideoDecoderManager::Init(MediaDataDecoderCallback* aCallback)
{
  nsIntSize displaySize(mDisplayWidth, mDisplayHeight);
  nsIntRect pictureRect(0, 0, mVideoWidth, mVideoHeight);
  // Validate the container-reported frame and pictureRect sizes. This ensures
  // that our video frame creation code doesn't overflow.
  nsIntSize frameSize(mVideoWidth, mVideoHeight);
  if (!IsValidVideoRegion(frameSize, pictureRect, displaySize)) {
    GVDM_LOG("It is not a valid region");
    return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__);
  }

  mReaderCallback = aCallback;

  mReaderTaskQueue = AbstractThread::GetCurrent()->AsTaskQueue();
  MOZ_ASSERT(!mReaderTaskQueue);

  if (mLooper.get() != nullptr) {
    return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__);
  }
  // Create ALooper
  mLooper = new ALooper;
  mManagerLooper = new ALooper;
  mManagerLooper->setName("GonkVideoDecoderManager");
  // Register AMessage handler to ALooper.
  mManagerLooper->registerHandler(mHandler);
  // Start ALooper thread.
  if (mLooper->start() != OK || mManagerLooper->start() != OK ) {
    return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__);
  }
  nsRefPtr<InitPromise> p = mInitPromise.Ensure(__func__);
  mDecoder = MediaCodecProxy::CreateByType(mLooper, mMimeType.get(), false, mVideoListener);
  mDecoder->AsyncAskMediaCodec();

  uint32_t capability = MediaCodecProxy::kEmptyCapability;
  if (mDecoder->getCapability(&capability) == OK && (capability &
      MediaCodecProxy::kCanExposeGraphicBuffer)) {
    mNativeWindow = new GonkNativeWindow();
  }

  return p;
}
コード例 #7
0
GonkVideoDecoderManager::GonkVideoDecoderManager(
  mozilla::layers::ImageContainer* aImageContainer,
  const VideoInfo& aConfig)
  : mImageContainer(aImageContainer)
  , mColorConverterBufferSize(0)
  , mPendingReleaseItemsLock("GonkVideoDecoderManager::mPendingReleaseItemsLock")
  , mNeedsCopyBuffer(false)
{
  MOZ_COUNT_CTOR(GonkVideoDecoderManager);
  mMimeType = aConfig.mMimeType;
  mVideoWidth  = aConfig.mDisplay.width;
  mVideoHeight = aConfig.mDisplay.height;
  mDisplayWidth = aConfig.mDisplay.width;
  mDisplayHeight = aConfig.mDisplay.height;
  mInfo.mVideo = aConfig;

  mCodecSpecificData = aConfig.mCodecSpecificConfig;
  nsIntRect pictureRect(0, 0, mVideoWidth, mVideoHeight);
  nsIntSize frameSize(mVideoWidth, mVideoHeight);
  mPicture = pictureRect;
  mInitialFrame = frameSize;
}
コード例 #8
0
android::sp<MediaCodecProxy>
GonkVideoDecoderManager::Init(MediaDataDecoderCallback* aCallback)
{
  nsIntSize displaySize(mDisplayWidth, mDisplayHeight);
  nsIntRect pictureRect(0, 0, mVideoWidth, mVideoHeight);
  // Validate the container-reported frame and pictureRect sizes. This ensures
  // that our video frame creation code doesn't overflow.
  nsIntSize frameSize(mVideoWidth, mVideoHeight);
  if (!IsValidVideoRegion(frameSize, pictureRect, displaySize)) {
    GVDM_LOG("It is not a valid region");
    return nullptr;
  }

  mReaderCallback = aCallback;

  if (mLooper.get() != nullptr) {
    return nullptr;
  }
  // Create ALooper
  mLooper = new ALooper;
  mManagerLooper = new ALooper;
  mManagerLooper->setName("GonkVideoDecoderManager");
  // Register AMessage handler to ALooper.
  mManagerLooper->registerHandler(mHandler);
  // Start ALooper thread.
  if (mLooper->start() != OK || mManagerLooper->start() != OK ) {
    return nullptr;
  }
  mDecoder = MediaCodecProxy::CreateByType(mLooper, mMimeType.get(), false, mVideoListener);
  mDecoder->AskMediaCodecAndWait();
  uint32_t capability = MediaCodecProxy::kEmptyCapability;
  if (mDecoder->getCapability(&capability) == OK && (capability &
      MediaCodecProxy::kCanExposeGraphicBuffer)) {
    mNativeWindow = new GonkNativeWindow();
  }

  return mDecoder;
}
コード例 #9
0
void MediaOmxReader::HandleResourceAllocated()
{
  EnsureActive();

  // After resources are available, set the metadata.
  if (!mOmxDecoder->EnsureMetadata()) {
    mMetadataPromise.Reject(ReadMetadataFailureReason::METADATA_ERROR, __func__);
    return;
  }

  bool isMP3 = mDecoder->GetResource()->GetContentType().EqualsASCII(AUDIO_MP3);
  if (isMP3 && mMP3FrameParser.IsMP3()) {
    // Check if the MP3 frame parser found a duration.
    mLastParserDuration = mMP3FrameParser.GetDuration();
  }

  if (mLastParserDuration >= 0) {
    // Prefer the parser duration if we have it.
    mInfo.mMetadataDuration = Some(TimeUnit::FromMicroseconds(mLastParserDuration));
  } else {
    // MP3 parser failed to find a duration.
    // Set the total duration (the max of the audio and video track).
    int64_t durationUs;
    mOmxDecoder->GetDuration(&durationUs);
    if (durationUs) {
      mInfo.mMetadataDuration = Some(TimeUnit::FromMicroseconds(durationUs));
    }
  }

  if (mOmxDecoder->HasVideo()) {
    int32_t displayWidth, displayHeight, width, height;
    mOmxDecoder->GetVideoParameters(&displayWidth, &displayHeight,
                                    &width, &height);
    nsIntRect pictureRect(0, 0, width, height);

    // Validate the container-reported frame and pictureRect sizes. This ensures
    // that our video frame creation code doesn't overflow.
    nsIntSize displaySize(displayWidth, displayHeight);
    nsIntSize frameSize(width, height);
    if (!IsValidVideoRegion(frameSize, pictureRect, displaySize)) {
      mMetadataPromise.Reject(ReadMetadataFailureReason::METADATA_ERROR, __func__);
      return;
    }

    // Video track's frame sizes will not overflow. Activate the video track.
    mHasVideo = true;
    mInfo.mVideo.mDisplay = displaySize;
    mPicture = pictureRect;
    mInitialFrame = frameSize;
    VideoFrameContainer* container = mDecoder->GetVideoFrameContainer();
    if (container) {
      container->ClearCurrentFrame(gfxIntSize(displaySize.width, displaySize.height));
    }
  }

  if (mOmxDecoder->HasAudio()) {
    int32_t numChannels, sampleRate;
    mOmxDecoder->GetAudioParameters(&numChannels, &sampleRate);
    mHasAudio = true;
    mInfo.mAudio.mChannels = numChannels;
    mInfo.mAudio.mRate = sampleRate;
  }

  nsRefPtr<MetadataHolder> metadata = new MetadataHolder();
  metadata->mInfo = mInfo;
  metadata->mTags = nullptr;

#ifdef MOZ_AUDIO_OFFLOAD
  CheckAudioOffload();
#endif

  mMetadataPromise.Resolve(metadata, __func__);
}
コード例 #10
0
void
ImageHost::Composite(EffectChain& aEffectChain,
                     float aOpacity,
                     const gfx::Matrix4x4& aTransform,
                     const gfx::Filter& aFilter,
                     const gfx::Rect& aClipRect,
                     const nsIntRegion* aVisibleRegion)
{
  if (!GetCompositor()) {
    // should only happen when a tab is dragged to another window and
    // async-video is still sending frames but we haven't attached the
    // set the new compositor yet.
    return;
  }
  if (!mFrontBuffer) {
    return;
  }

  // Make sure the front buffer has a compositor
  mFrontBuffer->SetCompositor(GetCompositor());

  AutoLockCompositableHost autoLock(this);
  if (autoLock.Failed()) {
    NS_WARNING("failed to lock front buffer");
    return;
  }

  if (!mFrontBuffer->BindTextureSource(mTextureSource)) {
    return;
  }

  if (!mTextureSource) {
    // BindTextureSource above should have returned false!
    MOZ_ASSERT(false);
    return;
  }

  bool isAlphaPremultiplied = !(mFrontBuffer->GetFlags() & TextureFlags::NON_PREMULTIPLIED);
  RefPtr<TexturedEffect> effect = CreateTexturedEffect(mFrontBuffer->GetFormat(),
                                                       mTextureSource.get(),
                                                       aFilter,
                                                       isAlphaPremultiplied);
  if (!effect) {
    return;
  }

  aEffectChain.mPrimaryEffect = effect;
  IntSize textureSize = mTextureSource->GetSize();
  gfx::Rect gfxPictureRect
    = mHasPictureRect ? gfx::Rect(0, 0, mPictureRect.width, mPictureRect.height)
                      : gfx::Rect(0, 0, textureSize.width, textureSize.height);

  gfx::Rect pictureRect(0, 0,
                        mPictureRect.width,
                        mPictureRect.height);
  BigImageIterator* it = mTextureSource->AsBigImageIterator();
  if (it) {

    // This iteration does not work if we have multiple texture sources here
    // (e.g. 3 YCbCr textures). There's nothing preventing the different
    // planes from having different resolutions or tile sizes. For example, a
    // YCbCr frame could have Cb and Cr planes that are half the resolution of
    // the Y plane, in such a way that the Y plane overflows the maximum
    // texture size and the Cb and Cr planes do not. Then the Y plane would be
    // split into multiple tiles and the Cb and Cr planes would just be one
    // tile each.
    // To handle the general case correctly, we'd have to create a grid of
    // intersected tiles over all planes, and then draw each grid tile using
    // the corresponding source tiles from all planes, with appropriate
    // per-plane per-tile texture coords.
    // DrawQuad currently assumes that all planes use the same texture coords.
    MOZ_ASSERT(it->GetTileCount() == 1 || !mTextureSource->GetNextSibling(),
               "Can't handle multi-plane BigImages");

    it->BeginBigImageIteration();
    do {
      nsIntRect tileRect = it->GetTileRect();
      gfx::Rect rect(tileRect.x, tileRect.y, tileRect.width, tileRect.height);
      if (mHasPictureRect) {
        rect = rect.Intersect(pictureRect);
        effect->mTextureCoords = Rect(Float(rect.x - tileRect.x)/ tileRect.width,
                                      Float(rect.y - tileRect.y) / tileRect.height,
                                      Float(rect.width) / tileRect.width,
                                      Float(rect.height) / tileRect.height);
      } else {
        effect->mTextureCoords = Rect(0, 0, 1, 1);
      }
      if (mFrontBuffer->GetFlags() & TextureFlags::NEEDS_Y_FLIP) {
        effect->mTextureCoords.y = effect->mTextureCoords.YMost();
        effect->mTextureCoords.height = -effect->mTextureCoords.height;
      }
      GetCompositor()->DrawQuad(rect, aClipRect, aEffectChain,
                                aOpacity, aTransform);
      GetCompositor()->DrawDiagnostics(DiagnosticFlags::IMAGE | DiagnosticFlags::BIGIMAGE,
                                       rect, aClipRect, aTransform, mFlashCounter);
    } while (it->NextTile());
    it->EndBigImageIteration();
    // layer border
    GetCompositor()->DrawDiagnostics(DiagnosticFlags::IMAGE,
                                     gfxPictureRect, aClipRect,
                                     aTransform, mFlashCounter);
  } else {
    IntSize textureSize = mTextureSource->GetSize();
    gfx::Rect rect;
    if (mHasPictureRect) {
      effect->mTextureCoords = Rect(Float(mPictureRect.x) / textureSize.width,
                                    Float(mPictureRect.y) / textureSize.height,
                                    Float(mPictureRect.width) / textureSize.width,
                                    Float(mPictureRect.height) / textureSize.height);
      rect = pictureRect;
    } else {
      effect->mTextureCoords = Rect(0, 0, 1, 1);
      rect = gfx::Rect(0, 0, textureSize.width, textureSize.height);
    }

    if (mFrontBuffer->GetFlags() & TextureFlags::NEEDS_Y_FLIP) {
      effect->mTextureCoords.y = effect->mTextureCoords.YMost();
      effect->mTextureCoords.height = -effect->mTextureCoords.height;
    }

    GetCompositor()->DrawQuad(rect, aClipRect, aEffectChain,
                              aOpacity, aTransform);
    GetCompositor()->DrawDiagnostics(DiagnosticFlags::IMAGE,
                                     rect, aClipRect,
                                     aTransform, mFlashCounter);
  }
}
コード例 #11
0
ファイル: ImageHost.cpp プロジェクト: ConradIrwin/gecko-dev
void
ImageHost::Composite(EffectChain& aEffectChain,
                     float aOpacity,
                     const gfx::Matrix4x4& aTransform,
                     const gfx::Filter& aFilter,
                     const gfx::Rect& aClipRect,
                     const nsIntRegion* aVisibleRegion,
                     TiledLayerProperties* aLayerProperties)
{
  if (!GetCompositor()) {
    // should only happen when a tab is dragged to another window and
    // async-video is still sending frames but we haven't attached the
    // set the new compositor yet.
    return;
  }
  if (!mFrontBuffer) {
    return;
  }

  // Make sure the front buffer has a compositor
  mFrontBuffer->SetCompositor(GetCompositor());

  AutoLockTextureHost autoLock(mFrontBuffer);
  if (autoLock.Failed()) {
    NS_WARNING("failed to lock front buffer");
    return;
  }
  RefPtr<NewTextureSource> source = mFrontBuffer->GetTextureSources();
  if (!source) {
    return;
  }
  RefPtr<TexturedEffect> effect = CreateTexturedEffect(mFrontBuffer->GetFormat(),
                                                       source,
                                                       aFilter);
  if (!effect) {
    return;
  }

  aEffectChain.mPrimaryEffect = effect;
  IntSize textureSize = source->GetSize();
  gfx::Rect gfxPictureRect
    = mHasPictureRect ? gfx::Rect(0, 0, mPictureRect.width, mPictureRect.height)
                      : gfx::Rect(0, 0, textureSize.width, textureSize.height);

  gfx::Rect pictureRect(0, 0,
                        mPictureRect.width,
                        mPictureRect.height);
  //XXX: We might have multiple texture sources here (e.g. 3 YCbCr textures), and we're
  // only iterating over the tiles of the first one. Are we assuming that the tiling
  // will be identical? Can we ensure that somehow?
  TileIterator* it = source->AsTileIterator();
  if (it) {
    it->BeginTileIteration();
    do {
      nsIntRect tileRect = it->GetTileRect();
      gfx::Rect rect(tileRect.x, tileRect.y, tileRect.width, tileRect.height);
      if (mHasPictureRect) {
        rect = rect.Intersect(pictureRect);
        effect->mTextureCoords = Rect(Float(rect.x - tileRect.x)/ tileRect.width,
                                      Float(rect.y - tileRect.y) / tileRect.height,
                                      Float(rect.width) / tileRect.width,
                                      Float(rect.height) / tileRect.height);
      } else {
        effect->mTextureCoords = Rect(0, 0, 1, 1);
      }
      GetCompositor()->DrawQuad(rect, aClipRect, aEffectChain,
                                aOpacity, aTransform);
      GetCompositor()->DrawDiagnostics(DIAGNOSTIC_IMAGE|DIAGNOSTIC_BIGIMAGE,
                                       rect, aClipRect, aTransform);
    } while (it->NextTile());
    it->EndTileIteration();
    // layer border
    GetCompositor()->DrawDiagnostics(DIAGNOSTIC_IMAGE,
                                     gfxPictureRect, aClipRect,
                                     aTransform);
  } else {
    IntSize textureSize = source->GetSize();
    gfx::Rect rect;
    if (mHasPictureRect) {
      effect->mTextureCoords = Rect(Float(mPictureRect.x) / textureSize.width,
                                    Float(mPictureRect.y) / textureSize.height,
                                    Float(mPictureRect.width) / textureSize.width,
                                    Float(mPictureRect.height) / textureSize.height);
      rect = pictureRect;
    } else {
      effect->mTextureCoords = Rect(0, 0, 1, 1);
      rect = gfx::Rect(0, 0, textureSize.width, textureSize.height);
    }

    if (mFrontBuffer->GetFlags() & TEXTURE_NEEDS_Y_FLIP) {
      effect->mTextureCoords.y = effect->mTextureCoords.YMost();
      effect->mTextureCoords.height = -effect->mTextureCoords.height;
    }

    GetCompositor()->DrawQuad(rect, aClipRect, aEffectChain,
                              aOpacity, aTransform);
    GetCompositor()->DrawDiagnostics(DIAGNOSTIC_IMAGE,
                                     rect, aClipRect,
                                     aTransform);
  }
}
コード例 #12
0
nsresult MediaOmxReader::ReadMetadata(MediaInfo* aInfo,
                                      MetadataTags** aTags)
{
  NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
  EnsureActive();

  *aTags = nullptr;

  // Initialize the internal OMX Decoder.
  nsresult rv = InitOmxDecoder();
  if (NS_FAILED(rv)) {
    return rv;
  }

  bool isMP3 = mDecoder->GetResource()->GetContentType().EqualsASCII(AUDIO_MP3);
  if (isMP3) {
    // When read sdcard's file on b2g platform at constructor,
    // the mDecoder->GetResource()->GetLength() would return -1.
    // Delay set the total duration on this function.
    mMP3FrameParser.SetLength(mDecoder->GetResource()->GetLength());
    ProcessCachedData(0, true);
  }

  if (!mOmxDecoder->AllocateMediaResources()) {
    return NS_ERROR_FAILURE;
  }
  // Bug 1050667, both MediaDecoderStateMachine and MediaOmxReader
  // relies on IsWaitingMediaResources() function. And the waiting state will be
  // changed by binder thread, so we store the waiting state in a cache value to
  // make them in consistent state.
  UpdateIsWaitingMediaResources();
  if (IsWaitingMediaResources()) {
    return NS_OK;
  }
  // After resources are available, set the metadata.
  if (!mOmxDecoder->EnsureMetadata()) {
    return NS_ERROR_FAILURE;
  }

  if (isMP3 && mMP3FrameParser.IsMP3()) {
    int64_t duration = mMP3FrameParser.GetDuration();
    // The MP3FrameParser may reported a duration;
    // return -1 if no frame has been parsed.
    if (duration >= 0) {
      ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
      mUseParserDuration = true;
      mLastParserDuration = duration;
      mDecoder->SetMediaDuration(mLastParserDuration);
    }
  } else {
    // Set the total duration (the max of the audio and video track).
    int64_t durationUs;
    mOmxDecoder->GetDuration(&durationUs);
    if (durationUs) {
      ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
      mDecoder->SetMediaDuration(durationUs);
    }
  }

  if (mOmxDecoder->HasVideo()) {
    int32_t displayWidth, displayHeight, width, height;
    mOmxDecoder->GetVideoParameters(&displayWidth, &displayHeight,
                                    &width, &height);
    nsIntRect pictureRect(0, 0, width, height);

    // Validate the container-reported frame and pictureRect sizes. This ensures
    // that our video frame creation code doesn't overflow.
    nsIntSize displaySize(displayWidth, displayHeight);
    nsIntSize frameSize(width, height);
    if (!IsValidVideoRegion(frameSize, pictureRect, displaySize)) {
      return NS_ERROR_FAILURE;
    }

    // Video track's frame sizes will not overflow. Activate the video track.
    mHasVideo = mInfo.mVideo.mHasVideo = true;
    mInfo.mVideo.mDisplay = displaySize;
    mPicture = pictureRect;
    mInitialFrame = frameSize;
    VideoFrameContainer* container = mDecoder->GetVideoFrameContainer();
    if (container) {
      container->SetCurrentFrame(gfxIntSize(displaySize.width, displaySize.height),
                                 nullptr,
                                 mozilla::TimeStamp::Now());
    }
  }

  if (mOmxDecoder->HasAudio()) {
    int32_t numChannels, sampleRate;
    mOmxDecoder->GetAudioParameters(&numChannels, &sampleRate);
    mHasAudio = mInfo.mAudio.mHasAudio = true;
    mInfo.mAudio.mChannels = numChannels;
    mInfo.mAudio.mRate = sampleRate;
  }

 *aInfo = mInfo;

#ifdef MOZ_AUDIO_OFFLOAD
  CheckAudioOffload();
#endif

  return NS_OK;
}
コード例 #13
0
ファイル: MediaOmxReader.cpp プロジェクト: aknow/gecko-dev
nsresult MediaOmxReader::ReadMetadata(MediaInfo* aInfo,
                                      MetadataTags** aTags)
{
  NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
  EnsureActive();

  *aTags = nullptr;

  // Initialize the internal OMX Decoder.
  nsresult rv = InitOmxDecoder();
  if (NS_FAILED(rv)) {
    return rv;
  }

  if (!mOmxDecoder->TryLoad()) {
    return NS_ERROR_FAILURE;
  }

#ifdef MOZ_AUDIO_OFFLOAD
  CheckAudioOffload();
#endif

  if (IsWaitingMediaResources()) {
    return NS_OK;
  }

  // Set the total duration (the max of the audio and video track).
  int64_t durationUs;
  mOmxDecoder->GetDuration(&durationUs);
  if (durationUs) {
    ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
    mDecoder->SetMediaDuration(durationUs);
  }

  if (mOmxDecoder->HasVideo()) {
    int32_t displayWidth, displayHeight, width, height;
    mOmxDecoder->GetVideoParameters(&displayWidth, &displayHeight,
                                    &width, &height);
    nsIntRect pictureRect(0, 0, width, height);

    // Validate the container-reported frame and pictureRect sizes. This ensures
    // that our video frame creation code doesn't overflow.
    nsIntSize displaySize(displayWidth, displayHeight);
    nsIntSize frameSize(width, height);
    if (!IsValidVideoRegion(frameSize, pictureRect, displaySize)) {
      return NS_ERROR_FAILURE;
    }

    // Video track's frame sizes will not overflow. Activate the video track.
    mHasVideo = mInfo.mVideo.mHasVideo = true;
    mInfo.mVideo.mDisplay = displaySize;
    mPicture = pictureRect;
    mInitialFrame = frameSize;
    VideoFrameContainer* container = mDecoder->GetVideoFrameContainer();
    if (container) {
      container->SetCurrentFrame(gfxIntSize(displaySize.width, displaySize.height),
                                 nullptr,
                                 mozilla::TimeStamp::Now());
    }
  }

  if (mOmxDecoder->HasAudio()) {
    int32_t numChannels, sampleRate;
    mOmxDecoder->GetAudioParameters(&numChannels, &sampleRate);
    mHasAudio = mInfo.mAudio.mHasAudio = true;
    mInfo.mAudio.mChannels = numChannels;
    mInfo.mAudio.mRate = sampleRate;
  }

 *aInfo = mInfo;

  return NS_OK;
}
コード例 #14
0
ファイル: ImageHost.cpp プロジェクト: ollie314/gecko-dev
void
ImageHost::Composite(LayerComposite* aLayer,
                     EffectChain& aEffectChain,
                     float aOpacity,
                     const gfx::Matrix4x4& aTransform,
                     const gfx::SamplingFilter aSamplingFilter,
                     const gfx::IntRect& aClipRect,
                     const nsIntRegion* aVisibleRegion)
{
  if (!GetCompositor()) {
    // should only happen when a tab is dragged to another window and
    // async-video is still sending frames but we haven't attached the
    // set the new compositor yet.
    return;
  }

  int imageIndex = ChooseImageIndex();
  if (imageIndex < 0) {
    return;
  }

  if (uint32_t(imageIndex) + 1 < mImages.Length()) {
    GetCompositor()->CompositeUntil(mImages[imageIndex + 1].mTimeStamp + TimeDuration::FromMilliseconds(BIAS_TIME_MS));
  }

  TimedImage* img = &mImages[imageIndex];
  img->mTextureHost->SetCompositor(GetCompositor());
  SetCurrentTextureHost(img->mTextureHost);

  {
    AutoLockCompositableHost autoLock(this);
    if (autoLock.Failed()) {
      NS_WARNING("failed to lock front buffer");
      return;
    }

    if (!mCurrentTextureHost->BindTextureSource(mCurrentTextureSource)) {
      return;
    }

    if (!mCurrentTextureSource) {
      // BindTextureSource above should have returned false!
      MOZ_ASSERT(false);
      return;
    }

    bool isAlphaPremultiplied =
        !(mCurrentTextureHost->GetFlags() & TextureFlags::NON_PREMULTIPLIED);
    RefPtr<TexturedEffect> effect =
        CreateTexturedEffect(mCurrentTextureHost,
            mCurrentTextureSource.get(), aSamplingFilter, isAlphaPremultiplied,
            GetRenderState());
    if (!effect) {
      return;
    }

    if (!GetCompositor()->SupportsEffect(effect->mType)) {
      return;
    }

    DiagnosticFlags diagnosticFlags = DiagnosticFlags::IMAGE;
    if (effect->mType == EffectTypes::NV12) {
      diagnosticFlags |= DiagnosticFlags::NV12;
    } else if (effect->mType == EffectTypes::YCBCR) {
      diagnosticFlags |= DiagnosticFlags::YCBCR;
    }

    if (mLastFrameID != img->mFrameID || mLastProducerID != img->mProducerID) {
      if (mImageContainer) {
        static_cast<LayerManagerComposite*>(aLayer->GetLayerManager())->
            AppendImageCompositeNotification(ImageCompositeNotification(
                mImageContainer, nullptr,
                img->mTimeStamp, GetCompositor()->GetCompositionTime(),
                img->mFrameID, img->mProducerID));
      }
      mLastFrameID = img->mFrameID;
      mLastProducerID = img->mProducerID;
    }
    aEffectChain.mPrimaryEffect = effect;
    gfx::Rect pictureRect(0, 0, img->mPictureRect.width, img->mPictureRect.height);
    BigImageIterator* it = mCurrentTextureSource->AsBigImageIterator();
    if (it) {

      // This iteration does not work if we have multiple texture sources here
      // (e.g. 3 YCbCr textures). There's nothing preventing the different
      // planes from having different resolutions or tile sizes. For example, a
      // YCbCr frame could have Cb and Cr planes that are half the resolution of
      // the Y plane, in such a way that the Y plane overflows the maximum
      // texture size and the Cb and Cr planes do not. Then the Y plane would be
      // split into multiple tiles and the Cb and Cr planes would just be one
      // tile each.
      // To handle the general case correctly, we'd have to create a grid of
      // intersected tiles over all planes, and then draw each grid tile using
      // the corresponding source tiles from all planes, with appropriate
      // per-plane per-tile texture coords.
      // DrawQuad currently assumes that all planes use the same texture coords.
      MOZ_ASSERT(it->GetTileCount() == 1 || !mCurrentTextureSource->GetNextSibling(),
                 "Can't handle multi-plane BigImages");

      it->BeginBigImageIteration();
      do {
        IntRect tileRect = it->GetTileRect();
        gfx::Rect rect(tileRect.x, tileRect.y, tileRect.width, tileRect.height);
        rect = rect.Intersect(pictureRect);
        effect->mTextureCoords = Rect(Float(rect.x - tileRect.x) / tileRect.width,
                                      Float(rect.y - tileRect.y) / tileRect.height,
                                      Float(rect.width) / tileRect.width,
                                      Float(rect.height) / tileRect.height);
        if (img->mTextureHost->GetFlags() & TextureFlags::ORIGIN_BOTTOM_LEFT) {
          effect->mTextureCoords.y = effect->mTextureCoords.YMost();
          effect->mTextureCoords.height = -effect->mTextureCoords.height;
        }
        GetCompositor()->DrawQuad(rect, aClipRect, aEffectChain,
                                  aOpacity, aTransform);
        GetCompositor()->DrawDiagnostics(diagnosticFlags | DiagnosticFlags::BIGIMAGE,
                                         rect, aClipRect, aTransform, mFlashCounter);
      } while (it->NextTile());
      it->EndBigImageIteration();
      // layer border
      GetCompositor()->DrawDiagnostics(diagnosticFlags, pictureRect,
                                       aClipRect, aTransform, mFlashCounter);
    } else {
      IntSize textureSize = mCurrentTextureSource->GetSize();
      effect->mTextureCoords = Rect(Float(img->mPictureRect.x) / textureSize.width,
                                    Float(img->mPictureRect.y) / textureSize.height,
                                    Float(img->mPictureRect.width) / textureSize.width,
                                    Float(img->mPictureRect.height) / textureSize.height);

      if (img->mTextureHost->GetFlags() & TextureFlags::ORIGIN_BOTTOM_LEFT) {
        effect->mTextureCoords.y = effect->mTextureCoords.YMost();
        effect->mTextureCoords.height = -effect->mTextureCoords.height;
      }

      GetCompositor()->DrawQuad(pictureRect, aClipRect, aEffectChain,
                                aOpacity, aTransform);
      GetCompositor()->DrawDiagnostics(diagnosticFlags,
                                       pictureRect, aClipRect,
                                       aTransform, mFlashCounter);
    }
  }

  // Update mBias last. This can change which frame ChooseImage(Index) would
  // return, and we don't want to do that until we've finished compositing
  // since callers of ChooseImage(Index) assume the same image will be chosen
  // during a given composition. This must happen after autoLock's
  // destructor!
  mBias = UpdateBias(
      GetCompositor()->GetCompositionTime(), mImages[imageIndex].mTimeStamp,
      uint32_t(imageIndex + 1) < mImages.Length() ?
          mImages[imageIndex + 1].mTimeStamp : TimeStamp(),
      mBias);
}