void
RtspMediaCodecReader::HandleResourceAllocated()
{
  EnsureActive();
  MediaCodecReader::HandleResourceAllocated();
  mRtspResource->EnablePlayoutDelay();;
}
void
RtspMediaCodecReader::RequestVideoData(bool aSkipToNextKeyframe,
                                       int64_t aTimeThreshold)
{
  EnsureActive();
  MediaCodecReader::RequestVideoData(aSkipToNextKeyframe, aTimeThreshold);
}
nsRefPtr<MediaDecoderReader::VideoDataPromise>
RtspMediaCodecReader::RequestVideoData(bool aSkipToNextKeyframe,
                                       int64_t aTimeThreshold)
{
  EnsureActive();
  return MediaCodecReader::RequestVideoData(aSkipToNextKeyframe, aTimeThreshold);
}
Beispiel #4
0
bool MediaOmxReader::DecodeAudioData()
{
  MOZ_ASSERT(OnTaskQueue());
  EnsureActive();

  MOZ_ASSERT(mStreamSource);
  // This is the approximate byte position in the stream.
  int64_t pos = mStreamSource->Tell();

  // Read next frame
  MPAPI::AudioFrame source;
  if (!mOmxDecoder->ReadAudio(&source, mAudioSeekTimeUs)) {
    return false;
  }
  mAudioSeekTimeUs = -1;

  // Ignore empty buffer which stagefright media read will sporadically return
  if (source.mSize == 0) {
    return true;
  }

  uint32_t frames = source.mSize / (source.mAudioChannels *
                                    sizeof(AudioDataValue));

  typedef AudioCompactor::NativeCopy OmxCopy;
  return mAudioCompactor.Push(pos,
                              source.mTimeUs,
                              source.mAudioSampleRate,
                              frames,
                              source.mAudioChannels,
                              OmxCopy(static_cast<uint8_t *>(source.mData),
                                      source.mSize,
                                      source.mAudioChannels));
}
Beispiel #5
0
nsRefPtr<MediaDecoderReader::SeekPromise>
MediaOmxReader::Seek(int64_t aTarget, int64_t aEndTime)
{
  NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
  EnsureActive();

  VideoFrameContainer* container = mDecoder->GetVideoFrameContainer();
  if (container && container->GetImageContainer()) {
    container->GetImageContainer()->ClearAllImagesExceptFront();
  }

  if (mHasAudio && mHasVideo) {
    // The OMXDecoder seeks/demuxes audio and video streams separately. So if
    // we seek both audio and video to aTarget, the audio stream can typically
    // seek closer to the seek target, since typically every audio block is
    // a sync point, whereas for video there are only keyframes once every few
    // seconds. So if we have both audio and video, we must seek the video
    // stream to the preceeding keyframe first, get the stream time, and then
    // seek the audio stream to match the video stream's time. Otherwise, the
    // audio and video streams won't be in sync after the seek.
    mVideoSeekTimeUs = aTarget;
    const VideoData* v = DecodeToFirstVideoData();
    mAudioSeekTimeUs = v ? v->mTime : aTarget;
  } else {
    mAudioSeekTimeUs = mVideoSeekTimeUs = aTarget;
  }

  return SeekPromise::CreateAndResolve(mAudioSeekTimeUs, __func__);
}
Beispiel #6
0
nsRefPtr<MediaDecoderReader::SeekPromise>
MediaOmxReader::Seek(int64_t aTarget, int64_t aEndTime)
{
  MOZ_ASSERT(OnTaskQueue());
  EnsureActive();
  nsRefPtr<SeekPromise> p = mSeekPromise.Ensure(__func__);

  if (mHasAudio && mHasVideo) {
    // The OMXDecoder seeks/demuxes audio and video streams separately. So if
    // we seek both audio and video to aTarget, the audio stream can typically
    // seek closer to the seek target, since typically every audio block is
    // a sync point, whereas for video there are only keyframes once every few
    // seconds. So if we have both audio and video, we must seek the video
    // stream to the preceeding keyframe first, get the stream time, and then
    // seek the audio stream to match the video stream's time. Otherwise, the
    // audio and video streams won't be in sync after the seek.
    mVideoSeekTimeUs = aTarget;

    nsRefPtr<MediaOmxReader> self = this;
    mSeekRequest.Begin(DecodeToFirstVideoData()->Then(OwnerThread(), __func__, [self] (VideoData* v) {
      self->mSeekRequest.Complete();
      self->mAudioSeekTimeUs = v->mTime;
      self->mSeekPromise.Resolve(self->mAudioSeekTimeUs, __func__);
    }, [self, aTarget] () {
      self->mSeekRequest.Complete();
      self->mAudioSeekTimeUs = aTarget;
      self->mSeekPromise.Resolve(aTarget, __func__);
    }));
  } else {
    mAudioSeekTimeUs = mVideoSeekTimeUs = aTarget;
    mSeekPromise.Resolve(aTarget, __func__);
  }

  return p;
}
Beispiel #7
0
void
PathBuilderD2D::LineTo(const Point &aPoint)
{
  EnsureActive(aPoint);
  mSink->AddLine(D2DPoint(aPoint));

  mCurrentPoint = aPoint;
}
Beispiel #8
0
void
PathBuilderD2D::Arc(const Point &aOrigin, Float aRadius, Float aStartAngle,
                 Float aEndAngle, bool aAntiClockwise)
{
  if (aAntiClockwise && aStartAngle < aEndAngle) {
    // D2D does things a little differently, and draws the arc by specifying an
    // beginning and an end point. This means the circle will be the wrong way
    // around if the start angle is smaller than the end angle. It might seem
    // tempting to invert aAntiClockwise but that would change the sweeping
    // direction of the arc to instead we exchange start/begin.
    Float oldStart = aStartAngle;
    aStartAngle = aEndAngle;
    aEndAngle = oldStart;
  }

  // XXX - Workaround for now, D2D does not appear to do the desired thing when
  // the angle sweeps a complete circle.
  if (aEndAngle - aStartAngle >= 2 * M_PI) {
    aEndAngle = Float(aStartAngle + M_PI * 1.9999);
  } else if (aStartAngle - aEndAngle >= 2 * M_PI) {
    aStartAngle = Float(aEndAngle + M_PI * 1.9999);
  }

  Point startPoint;
  startPoint.x = aOrigin.x + aRadius * cos(aStartAngle);
  startPoint.y = aOrigin.y + aRadius * sin(aStartAngle);

  if (!mFigureActive) {
    EnsureActive(startPoint);
  } else {
    mSink->AddLine(D2DPoint(startPoint));
  }

  Point endPoint;
  endPoint.x = aOrigin.x + aRadius * cos(aEndAngle);
  endPoint.y = aOrigin.y + aRadius * sin(aEndAngle);

  D2D1_ARC_SIZE arcSize = D2D1_ARC_SIZE_SMALL;

  if (aAntiClockwise) {
    if (aStartAngle - aEndAngle > M_PI) {
      arcSize = D2D1_ARC_SIZE_LARGE;
    }
  } else {
    if (aEndAngle - aStartAngle > M_PI) {
      arcSize = D2D1_ARC_SIZE_LARGE;
    }
  }

  mSink->AddArc(D2D1::ArcSegment(D2DPoint(endPoint),
                                 D2D1::SizeF(aRadius, aRadius),
                                 0.0f,
                                 aAntiClockwise ? D2D1_SWEEP_DIRECTION_COUNTER_CLOCKWISE :
                                                  D2D1_SWEEP_DIRECTION_CLOCKWISE,
                                 arcSize));

  mCurrentPoint = endPoint;
}
Beispiel #9
0
void
PathBuilderD2D::QuadraticBezierTo(const Point &aCP1,
                                  const Point &aCP2)
{
  EnsureActive(aCP1);
  mSink->AddQuadraticBezier(D2D1::QuadraticBezierSegment(D2DPoint(aCP1),
                                                         D2DPoint(aCP2)));

  mCurrentPoint = aCP2;
}
Beispiel #10
0
void
PathBuilderD2D::MoveTo(const Point &aPoint)
{
  if (mFigureActive) {
    mSink->EndFigure(D2D1_FIGURE_END_OPEN);
    mFigureActive = false;
  }
  EnsureActive(aPoint);
  mCurrentPoint = aPoint;
}
Beispiel #11
0
void
PathBuilderD2D::Close()
{
  if (mFigureActive) {
    mSink->EndFigure(D2D1_FIGURE_END_CLOSED);

    mFigureActive = false;

    EnsureActive(mBeginPoint);
  }
}
Beispiel #12
0
void
PathBuilderD2D::BezierTo(const Point &aCP1,
                         const Point &aCP2,
                         const Point &aCP3)
  {
  EnsureActive(aCP1);
  mSink->AddBezier(D2D1::BezierSegment(D2DPoint(aCP1),
                                       D2DPoint(aCP2),
                                       D2DPoint(aCP3)));

  mCurrentPoint = aCP3;
}
nsRefPtr<MediaDecoderReader::MetadataPromise>
RtspMediaCodecReader::AsyncReadMetadata()
{
  mRtspResource->DisablePlayoutDelay();
  EnsureActive();

  nsRefPtr<MediaDecoderReader::MetadataPromise> p =
    MediaCodecReader::AsyncReadMetadata();

  // Send a PAUSE to the RTSP server because the underlying media resource is
  // not ready.
  SetIdle();

  return p;
}
nsresult
RtspMediaCodecReader::ReadMetadata(MediaInfo* aInfo,
                                   MetadataTags** aTags)
{
  mRtspResource->DisablePlayoutDelay();
  EnsureActive();
  nsresult rv = MediaCodecReader::ReadMetadata(aInfo, aTags);
  SetIdle();

  if (rv == NS_OK && !IsWaitingMediaResources()) {
    mRtspResource->EnablePlayoutDelay();
  }

  return rv;
}
Beispiel #15
0
void MessageQueue::Post(MessageHandler *phandler, uint32 id,
    MessageData *pdata, bool time_sensitive) {
  if (fStop_)
    return;

  // Keep thread safe
  // Add the message to the end of the queue
  // Signal for the multiplexer to return

  CritScope cs(&crit_);
  EnsureActive();
  Message msg;
  msg.phandler = phandler;
  msg.message_id = id;
  msg.pdata = pdata;
  if (time_sensitive) {
    msg.ts_sensitive = Time() + kMaxMsgLatency;
  }
  msgq_.push_back(msg);
  ss_->WakeUp();
}
Beispiel #16
0
void MessageQueue::DoDelayPost(int cmsDelay, uint32 tstamp,
    MessageHandler *phandler, uint32 id, MessageData* pdata) {
  if (fStop_)
    return;

  // Keep thread safe
  // Add to the priority queue. Gets sorted soonest first.
  // Signal for the multiplexer to return.

  CritScope cs(&crit_);
  EnsureActive();
  Message msg;
  msg.phandler = phandler;
  msg.message_id = id;
  msg.pdata = pdata;
  DelayedMessage dmsg(cmsDelay, tstamp, dmsgq_next_num_, msg);
  dmsgq_.push(dmsg);
  // If this message queue processes 1 message every millisecond for 50 days,
  // we will wrap this number.  Even then, only messages with identical times
  // will be misordered, and then only briefly.  This is probably ok.
  VERIFY(0 != ++dmsgq_next_num_);
  ss_->WakeUp();
}
Beispiel #17
0
nsRefPtr<MediaDecoderReader::MetadataPromise>
MediaOmxReader::AsyncReadMetadata()
{
  MOZ_ASSERT(OnTaskQueue());
  EnsureActive();

  // Initialize the internal OMX Decoder.
  nsresult rv = InitOmxDecoder();
  if (NS_FAILED(rv)) {
    return MediaDecoderReader::MetadataPromise::CreateAndReject(
             ReadMetadataFailureReason::METADATA_ERROR, __func__);
  }

  bool isMP3 = mDecoder->GetResource()->GetContentType().EqualsASCII(AUDIO_MP3);
  if (isMP3) {
    // When read sdcard's file on b2g platform at constructor,
    // the mDecoder->GetResource()->GetLength() would return -1.
    // Delay set the total duration on this function.
    mMP3FrameParser.SetLength(mDecoder->GetResource()->GetLength());
    ProcessCachedData(0);
  }

  nsRefPtr<MediaDecoderReader::MetadataPromise> p = mMetadataPromise.Ensure(__func__);

  nsRefPtr<MediaOmxReader> self = this;
  mMediaResourceRequest.Begin(mOmxDecoder->AllocateMediaResources()
    ->Then(OwnerThread(), __func__,
      [self] (bool) -> void {
        self->mMediaResourceRequest.Complete();
        self->HandleResourceAllocated();
      }, [self] (bool) -> void {
        self->mMediaResourceRequest.Complete();
        self->mMetadataPromise.Reject(ReadMetadataFailureReason::METADATA_ERROR, __func__);
      }));

  return p;
}
Beispiel #18
0
bool MediaOmxReader::DecodeVideoFrame(bool &aKeyframeSkip,
                                      int64_t aTimeThreshold)
{
  MOZ_ASSERT(OnTaskQueue());
  EnsureActive();

  // Record number of frames decoded and parsed. Automatically update the
  // stats counters using the AutoNotifyDecoded stack-based class.
  AbstractMediaDecoder::AutoNotifyDecoded a(mDecoder);

  bool doSeek = mVideoSeekTimeUs != -1;
  if (doSeek) {
    aTimeThreshold = mVideoSeekTimeUs;
  }

  TimeStamp start = TimeStamp::Now();

  // Read next frame. Don't let this loop run for too long.
  while ((TimeStamp::Now() - start) < TimeDuration::FromSeconds(MAX_VIDEO_DECODE_SECONDS)) {
    MPAPI::VideoFrame frame;
    frame.mGraphicBuffer = nullptr;
    frame.mShouldSkip = false;
    if (!mOmxDecoder->ReadVideo(&frame, aTimeThreshold, aKeyframeSkip, doSeek)) {
      return false;
    }
    doSeek = false;
    mVideoSeekTimeUs = -1;

    // Ignore empty buffer which stagefright media read will sporadically return
    if (frame.mSize == 0 && !frame.mGraphicBuffer) {
      continue;
    }

    a.mParsed++;
    if (frame.mShouldSkip && mSkipCount < MAX_DROPPED_FRAMES) {
      mSkipCount++;
      continue;
    }

    mSkipCount = 0;

    aKeyframeSkip = false;

    IntRect picture = mPicture;
    if (frame.Y.mWidth != mInitialFrame.width ||
        frame.Y.mHeight != mInitialFrame.height) {

      // Frame size is different from what the container reports. This is legal,
      // and we will preserve the ratio of the crop rectangle as it
      // was reported relative to the picture size reported by the container.
      picture.x = (mPicture.x * frame.Y.mWidth) / mInitialFrame.width;
      picture.y = (mPicture.y * frame.Y.mHeight) / mInitialFrame.height;
      picture.width = (frame.Y.mWidth * mPicture.width) / mInitialFrame.width;
      picture.height = (frame.Y.mHeight * mPicture.height) / mInitialFrame.height;
    }

    MOZ_ASSERT(mStreamSource);
    // This is the approximate byte position in the stream.
    int64_t pos = mStreamSource->Tell();

    nsRefPtr<VideoData> v;
    if (!frame.mGraphicBuffer) {

      VideoData::YCbCrBuffer b;
      b.mPlanes[0].mData = static_cast<uint8_t *>(frame.Y.mData);
      b.mPlanes[0].mStride = frame.Y.mStride;
      b.mPlanes[0].mHeight = frame.Y.mHeight;
      b.mPlanes[0].mWidth = frame.Y.mWidth;
      b.mPlanes[0].mOffset = frame.Y.mOffset;
      b.mPlanes[0].mSkip = frame.Y.mSkip;

      b.mPlanes[1].mData = static_cast<uint8_t *>(frame.Cb.mData);
      b.mPlanes[1].mStride = frame.Cb.mStride;
      b.mPlanes[1].mHeight = frame.Cb.mHeight;
      b.mPlanes[1].mWidth = frame.Cb.mWidth;
      b.mPlanes[1].mOffset = frame.Cb.mOffset;
      b.mPlanes[1].mSkip = frame.Cb.mSkip;

      b.mPlanes[2].mData = static_cast<uint8_t *>(frame.Cr.mData);
      b.mPlanes[2].mStride = frame.Cr.mStride;
      b.mPlanes[2].mHeight = frame.Cr.mHeight;
      b.mPlanes[2].mWidth = frame.Cr.mWidth;
      b.mPlanes[2].mOffset = frame.Cr.mOffset;
      b.mPlanes[2].mSkip = frame.Cr.mSkip;

      v = VideoData::Create(mInfo.mVideo,
                            mDecoder->GetImageContainer(),
                            pos,
                            frame.mTimeUs,
                            1, // We don't know the duration.
                            b,
                            frame.mKeyFrame,
                            -1,
                            picture);
    } else {
      v = VideoData::Create(mInfo.mVideo,
                            mDecoder->GetImageContainer(),
                            pos,
                            frame.mTimeUs,
                            1, // We don't know the duration.
                            frame.mGraphicBuffer,
                            frame.mKeyFrame,
                            -1,
                            picture);
    }

    if (!v) {
      NS_WARNING("Unable to create VideoData");
      return false;
    }

    a.mDecoded++;
    NS_ASSERTION(a.mDecoded <= a.mParsed, "Expect to decode fewer frames than parsed in OMX decoder...");

    mVideoQueue.Push(v);

    break;
  }

  return true;
}
Beispiel #19
0
void MediaOmxReader::HandleResourceAllocated()
{
  EnsureActive();

  // After resources are available, set the metadata.
  if (!mOmxDecoder->EnsureMetadata()) {
    mMetadataPromise.Reject(ReadMetadataFailureReason::METADATA_ERROR, __func__);
    return;
  }

  bool isMP3 = mDecoder->GetResource()->GetContentType().EqualsASCII(AUDIO_MP3);
  if (isMP3 && mMP3FrameParser.IsMP3()) {
    // Check if the MP3 frame parser found a duration.
    mLastParserDuration = mMP3FrameParser.GetDuration();
  }

  if (mLastParserDuration >= 0) {
    // Prefer the parser duration if we have it.
    mInfo.mMetadataDuration = Some(TimeUnit::FromMicroseconds(mLastParserDuration));
  } else {
    // MP3 parser failed to find a duration.
    // Set the total duration (the max of the audio and video track).
    int64_t durationUs;
    mOmxDecoder->GetDuration(&durationUs);
    if (durationUs) {
      mInfo.mMetadataDuration = Some(TimeUnit::FromMicroseconds(durationUs));
    }
  }

  if (mOmxDecoder->HasVideo()) {
    int32_t displayWidth, displayHeight, width, height;
    mOmxDecoder->GetVideoParameters(&displayWidth, &displayHeight,
                                    &width, &height);
    nsIntRect pictureRect(0, 0, width, height);

    // Validate the container-reported frame and pictureRect sizes. This ensures
    // that our video frame creation code doesn't overflow.
    nsIntSize displaySize(displayWidth, displayHeight);
    nsIntSize frameSize(width, height);
    if (!IsValidVideoRegion(frameSize, pictureRect, displaySize)) {
      mMetadataPromise.Reject(ReadMetadataFailureReason::METADATA_ERROR, __func__);
      return;
    }

    // Video track's frame sizes will not overflow. Activate the video track.
    mHasVideo = true;
    mInfo.mVideo.mDisplay = displaySize;
    mPicture = pictureRect;
    mInitialFrame = frameSize;
    VideoFrameContainer* container = mDecoder->GetVideoFrameContainer();
    if (container) {
      container->ClearCurrentFrame(gfxIntSize(displaySize.width, displaySize.height));
    }
  }

  if (mOmxDecoder->HasAudio()) {
    int32_t numChannels, sampleRate;
    mOmxDecoder->GetAudioParameters(&numChannels, &sampleRate);
    mHasAudio = true;
    mInfo.mAudio.mChannels = numChannels;
    mInfo.mAudio.mRate = sampleRate;
  }

  nsRefPtr<MetadataHolder> metadata = new MetadataHolder();
  metadata->mInfo = mInfo;
  metadata->mTags = nullptr;

#ifdef MOZ_AUDIO_OFFLOAD
  CheckAudioOffload();
#endif

  mMetadataPromise.Resolve(metadata, __func__);
}
void
RtspMediaCodecReader::RequestAudioData()
{
  EnsureActive();
  MediaCodecReader::RequestAudioData();
}
nsRefPtr<MediaDecoderReader::AudioDataPromise>
RtspMediaCodecReader::RequestAudioData()
{
  EnsureActive();
  return MediaCodecReader::RequestAudioData();
}
Beispiel #22
0
void
PathBuilderD2D::Arc(const Point &aOrigin, Float aRadius, Float aStartAngle,
                 Float aEndAngle, bool aAntiClockwise)
{
  if (aAntiClockwise && aStartAngle < aEndAngle) {
    // D2D does things a little differently, and draws the arc by specifying an
    // beginning and an end point. This means the circle will be the wrong way
    // around if the start angle is smaller than the end angle. It might seem
    // tempting to invert aAntiClockwise but that would change the sweeping
    // direction of the arc so instead we exchange start/begin.
    Float oldStart = aStartAngle;
    aStartAngle = aEndAngle;
    aEndAngle = oldStart;
  }

  // XXX - Workaround for now, D2D does not appear to do the desired thing when
  // the angle sweeps a complete circle.
  bool fullCircle = false;
  if (aEndAngle - aStartAngle >= 2 * M_PI) {
    fullCircle = true;
    aEndAngle = Float(aStartAngle + M_PI * 1.9999);
  } else if (aStartAngle - aEndAngle >= 2 * M_PI) {
    fullCircle = true;
    aStartAngle = Float(aEndAngle + M_PI * 1.9999);
  }

  Point startPoint;
  startPoint.x = aOrigin.x + aRadius * cos(aStartAngle);
  startPoint.y = aOrigin.y + aRadius * sin(aStartAngle);

  if (!mFigureActive) {
    EnsureActive(startPoint);
  } else {
    mSink->AddLine(D2DPoint(startPoint));
  }

  Point endPoint;
  endPoint.x = aOrigin.x + aRadius * cosf(aEndAngle);
  endPoint.y = aOrigin.y + aRadius * sinf(aEndAngle);

  D2D1_ARC_SIZE arcSize = D2D1_ARC_SIZE_SMALL;
  D2D1_SWEEP_DIRECTION direction =
    aAntiClockwise ? D2D1_SWEEP_DIRECTION_COUNTER_CLOCKWISE :
                     D2D1_SWEEP_DIRECTION_CLOCKWISE;

  // if startPoint and endPoint of our circle are too close there are D2D issues
  // with drawing the circle as a single arc
  const Float kEpsilon = 1e-5f;
  if (!fullCircle ||
      (std::abs(startPoint.x - endPoint.x) +
       std::abs(startPoint.y - endPoint.y) > kEpsilon)) {

    if (aAntiClockwise) {
      if (aStartAngle - aEndAngle > M_PI) {
        arcSize = D2D1_ARC_SIZE_LARGE;
      }
    } else {
      if (aEndAngle - aStartAngle > M_PI) {
        arcSize = D2D1_ARC_SIZE_LARGE;
      }
    }

    mSink->AddArc(D2D1::ArcSegment(D2DPoint(endPoint),
                                   D2D1::SizeF(aRadius, aRadius),
                                   0.0f,
                                   direction,
                                   arcSize));
  }
  else {
    // our first workaround attempt didn't work, so instead draw the circle as
    // two half-circles
    Float midAngle = aEndAngle > aStartAngle ?
      Float(aStartAngle + M_PI) : Float(aEndAngle + M_PI);
    Point midPoint;
    midPoint.x = aOrigin.x + aRadius * cosf(midAngle);
    midPoint.y = aOrigin.y + aRadius * sinf(midAngle);

    mSink->AddArc(D2D1::ArcSegment(D2DPoint(midPoint),
                                   D2D1::SizeF(aRadius, aRadius),
                                   0.0f,
                                   direction,
                                   arcSize));

    // if the adjusted endPoint computed above is used here and endPoint !=
    // startPoint then this half of the circle won't render...
    mSink->AddArc(D2D1::ArcSegment(D2DPoint(startPoint),
                                   D2D1::SizeF(aRadius, aRadius),
                                   0.0f,
                                   direction,
                                   arcSize));
  }

  mCurrentPoint = endPoint;
}
Beispiel #23
0
nsresult MediaOmxReader::ReadMetadata(MediaInfo* aInfo,
                                      MetadataTags** aTags)
{
  NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
  EnsureActive();

  *aTags = nullptr;

  // Initialize the internal OMX Decoder.
  nsresult rv = InitOmxDecoder();
  if (NS_FAILED(rv)) {
    return rv;
  }

  if (!mOmxDecoder->TryLoad()) {
    return NS_ERROR_FAILURE;
  }

#ifdef MOZ_AUDIO_OFFLOAD
  CheckAudioOffload();
#endif

  if (IsWaitingMediaResources()) {
    return NS_OK;
  }

  // Set the total duration (the max of the audio and video track).
  int64_t durationUs;
  mOmxDecoder->GetDuration(&durationUs);
  if (durationUs) {
    ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
    mDecoder->SetMediaDuration(durationUs);
  }

  if (mOmxDecoder->HasVideo()) {
    int32_t displayWidth, displayHeight, width, height;
    mOmxDecoder->GetVideoParameters(&displayWidth, &displayHeight,
                                    &width, &height);
    nsIntRect pictureRect(0, 0, width, height);

    // Validate the container-reported frame and pictureRect sizes. This ensures
    // that our video frame creation code doesn't overflow.
    nsIntSize displaySize(displayWidth, displayHeight);
    nsIntSize frameSize(width, height);
    if (!IsValidVideoRegion(frameSize, pictureRect, displaySize)) {
      return NS_ERROR_FAILURE;
    }

    // Video track's frame sizes will not overflow. Activate the video track.
    mHasVideo = mInfo.mVideo.mHasVideo = true;
    mInfo.mVideo.mDisplay = displaySize;
    mPicture = pictureRect;
    mInitialFrame = frameSize;
    VideoFrameContainer* container = mDecoder->GetVideoFrameContainer();
    if (container) {
      container->SetCurrentFrame(gfxIntSize(displaySize.width, displaySize.height),
                                 nullptr,
                                 mozilla::TimeStamp::Now());
    }
  }

  if (mOmxDecoder->HasAudio()) {
    int32_t numChannels, sampleRate;
    mOmxDecoder->GetAudioParameters(&numChannels, &sampleRate);
    mHasAudio = mInfo.mAudio.mHasAudio = true;
    mInfo.mAudio.mChannels = numChannels;
    mInfo.mAudio.mRate = sampleRate;
  }

 *aInfo = mInfo;

  return NS_OK;
}
Beispiel #24
0
nsresult MediaOmxReader::ReadMetadata(MediaInfo* aInfo,
                                      MetadataTags** aTags)
{
  NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
  EnsureActive();

  *aTags = nullptr;

  // Initialize the internal OMX Decoder.
  nsresult rv = InitOmxDecoder();
  if (NS_FAILED(rv)) {
    return rv;
  }

  bool isMP3 = mDecoder->GetResource()->GetContentType().EqualsASCII(AUDIO_MP3);
  if (isMP3) {
    // When read sdcard's file on b2g platform at constructor,
    // the mDecoder->GetResource()->GetLength() would return -1.
    // Delay set the total duration on this function.
    mMP3FrameParser.SetLength(mDecoder->GetResource()->GetLength());
    ProcessCachedData(0, true);
  }

  if (!mOmxDecoder->AllocateMediaResources()) {
    return NS_ERROR_FAILURE;
  }
  // Bug 1050667, both MediaDecoderStateMachine and MediaOmxReader
  // relies on IsWaitingMediaResources() function. And the waiting state will be
  // changed by binder thread, so we store the waiting state in a cache value to
  // make them in consistent state.
  UpdateIsWaitingMediaResources();
  if (IsWaitingMediaResources()) {
    return NS_OK;
  }
  // After resources are available, set the metadata.
  if (!mOmxDecoder->EnsureMetadata()) {
    return NS_ERROR_FAILURE;
  }

  if (isMP3 && mMP3FrameParser.IsMP3()) {
    int64_t duration = mMP3FrameParser.GetDuration();
    // The MP3FrameParser may reported a duration;
    // return -1 if no frame has been parsed.
    if (duration >= 0) {
      ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
      mUseParserDuration = true;
      mLastParserDuration = duration;
      mDecoder->SetMediaDuration(mLastParserDuration);
    }
  } else {
    // Set the total duration (the max of the audio and video track).
    int64_t durationUs;
    mOmxDecoder->GetDuration(&durationUs);
    if (durationUs) {
      ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
      mDecoder->SetMediaDuration(durationUs);
    }
  }

  if (mOmxDecoder->HasVideo()) {
    int32_t displayWidth, displayHeight, width, height;
    mOmxDecoder->GetVideoParameters(&displayWidth, &displayHeight,
                                    &width, &height);
    nsIntRect pictureRect(0, 0, width, height);

    // Validate the container-reported frame and pictureRect sizes. This ensures
    // that our video frame creation code doesn't overflow.
    nsIntSize displaySize(displayWidth, displayHeight);
    nsIntSize frameSize(width, height);
    if (!IsValidVideoRegion(frameSize, pictureRect, displaySize)) {
      return NS_ERROR_FAILURE;
    }

    // Video track's frame sizes will not overflow. Activate the video track.
    mHasVideo = mInfo.mVideo.mHasVideo = true;
    mInfo.mVideo.mDisplay = displaySize;
    mPicture = pictureRect;
    mInitialFrame = frameSize;
    VideoFrameContainer* container = mDecoder->GetVideoFrameContainer();
    if (container) {
      container->SetCurrentFrame(gfxIntSize(displaySize.width, displaySize.height),
                                 nullptr,
                                 mozilla::TimeStamp::Now());
    }
  }

  if (mOmxDecoder->HasAudio()) {
    int32_t numChannels, sampleRate;
    mOmxDecoder->GetAudioParameters(&numChannels, &sampleRate);
    mHasAudio = mInfo.mAudio.mHasAudio = true;
    mInfo.mAudio.mChannels = numChannels;
    mInfo.mAudio.mRate = sampleRate;
  }

 *aInfo = mInfo;

#ifdef MOZ_AUDIO_OFFLOAD
  CheckAudioOffload();
#endif

  return NS_OK;
}