Example #1
0
nsresult
WMFReader::Seek(int64_t aTargetUs,
                int64_t aStartTime,
                int64_t aEndTime,
                int64_t aCurrentTime)
{
  DECODER_LOG("WMFReader::Seek() %lld", aTargetUs);

  NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
#ifdef DEBUG
  bool canSeek = false;
  GetSourceReaderCanSeek(mSourceReader, canSeek);
  NS_ASSERTION(canSeek, "WMFReader::Seek() should only be called if we can seek!");
#endif

  nsresult rv = ResetDecode();
  NS_ENSURE_SUCCESS(rv, rv);

  // Mark that we must recapture the audio frame count from the next sample.
  // WMF doesn't set a discontinuity marker when we seek to time 0, so we
  // must remember to recapture the audio frame offset and reset the frame
  // sum on the next audio packet we decode.
  mMustRecaptureAudioPosition = true;

  AutoPropVar var;
  HRESULT hr = InitPropVariantFromInt64(UsecsToHNs(aTargetUs), &var);
  NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);

  hr = mSourceReader->SetCurrentPosition(GUID_NULL, var);
  NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);

  return DecodeToTarget(aTargetUs);
}
Example #2
0
MediaDecoderReader::~MediaDecoderReader()
{
  MOZ_ASSERT(mShutdown);
  MOZ_ASSERT(!mDecoder);
  ResetDecode();
  MOZ_COUNT_DTOR(MediaDecoderReader);
}
Example #3
0
nsresult
DirectShowReader::SeekInternal(int64_t aTargetUs)
{
  HRESULT hr;
  MOZ_ASSERT(OnTaskQueue());

  LOG("DirectShowReader::Seek() target=%lld", aTargetUs);

  hr = mControl->Pause();
  NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);

  nsresult rv = ResetDecode();
  NS_ENSURE_SUCCESS(rv, rv);

  LONGLONG seekPosition = UsecsToRefTime(aTargetUs);
  hr = mMediaSeeking->SetPositions(&seekPosition,
                                   AM_SEEKING_AbsolutePositioning,
                                   nullptr,
                                   AM_SEEKING_NoPositioning);
  NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);

  hr = mControl->Run();
  NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);

  return NS_OK;
}
Example #4
0
nsresult
WMFReader::Seek(int64_t aTargetUs,
                int64_t aStartTime,
                int64_t aEndTime,
                int64_t aCurrentTime)
{
  LOG("WMFReader::Seek() %lld", aTargetUs);

  NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
  if (!mCanSeek) {
    return NS_ERROR_FAILURE;
  }

  nsresult rv = ResetDecode();
  NS_ENSURE_SUCCESS(rv, rv);

  AutoPropVar var;
  HRESULT hr = InitPropVariantFromInt64(UsecsToHNs(aTargetUs), &var);
  NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);

  hr = mSourceReader->SetCurrentPosition(GUID_NULL, var);
  NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE);

  return DecodeToTarget(aTargetUs);
}
Example #5
0
nsresult MediaOmxReader::Seek(int64_t aTarget, int64_t aStartTime, int64_t aEndTime, int64_t aCurrentTime)
{
  NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
  EnsureActive();

  ResetDecode();
  VideoFrameContainer* container = mDecoder->GetVideoFrameContainer();
  if (container && container->GetImageContainer()) {
    container->GetImageContainer()->ClearAllImagesExceptFront();
  }

  if (mHasAudio && mHasVideo) {
    // The OMXDecoder seeks/demuxes audio and video streams separately. So if
    // we seek both audio and video to aTarget, the audio stream can typically
    // seek closer to the seek target, since typically every audio block is
    // a sync point, whereas for video there are only keyframes once every few
    // seconds. So if we have both audio and video, we must seek the video
    // stream to the preceeding keyframe first, get the stream time, and then
    // seek the audio stream to match the video stream's time. Otherwise, the
    // audio and video streams won't be in sync after the seek.
    mVideoSeekTimeUs = aTarget;
    const VideoData* v = DecodeToFirstVideoData();
    mAudioSeekTimeUs = v ? v->mTime : aTarget;
  } else {
    mAudioSeekTimeUs = mVideoSeekTimeUs = aTarget;
  }

  return NS_OK;
}
Example #6
0
nsRefPtr<MediaDecoderReader::SeekPromise>
AppleMP3Reader::Seek(int64_t aTime, int64_t aEndTime)
{
  MOZ_ASSERT(OnTaskQueue());

  // Find the exact frame/packet that contains |aTime|.
  mCurrentAudioFrame = aTime * mAudioSampleRate / USECS_PER_S;
  SInt64 packet = mCurrentAudioFrame / mAudioFramesPerCompressedPacket;

  // |AudioFileStreamSeek| will pass back through |byteOffset| the byte offset
  // into the stream it expects next time it reads.
  SInt64 byteOffset;
  UInt32 flags = 0;

  OSStatus rv = AudioFileStreamSeek(mAudioFileStream,
                                    packet,
                                    &byteOffset,
                                    &flags);

  if (rv) {
    LOGE("Couldn't seek demuxer. Error code %x\n", rv);
    return SeekPromise::CreateAndReject(NS_ERROR_FAILURE, __func__);
  }

  LOGD("computed byte offset = %lld; estimated = %s\n",
       byteOffset,
       (flags & kAudioFileStreamSeekFlag_OffsetIsEstimated) ? "YES" : "NO");

  mResource.Seek(nsISeekableStream::NS_SEEK_SET, byteOffset);

  ResetDecode();

  return SeekPromise::CreateAndResolve(aTime, __func__);
}
Example #7
0
void MediaOmxReader::ReleaseMediaResources()
{
  ResetDecode();
  // Before freeing a video codec, all video buffers needed to be released
  // even from graphics pipeline.
  VideoFrameContainer* container = mDecoder->GetVideoFrameContainer();
  if (container) {
    container->ClearCurrentFrame();
  }
  if (mOmxDecoder.get()) {
    mOmxDecoder->ReleaseMediaResources();
  }
}
Example #8
0
GstPadProbeReturn GStreamerReader::EventProbe(GstPad *aPad, GstEvent *aEvent)
{
  GstElement* parent = GST_ELEMENT(gst_pad_get_parent(aPad));

  LOG(PR_LOG_DEBUG, "event probe %s", GST_EVENT_TYPE_NAME (aEvent));

  switch(GST_EVENT_TYPE(aEvent)) {
    case GST_EVENT_SEGMENT:
    {
      const GstSegment *newSegment;
      GstSegment* segment;

      /* Store the segments so we can convert timestamps to stream time, which
       * is what the upper layers sync on.
       */
      ReentrantMonitorAutoEnter mon(mGstThreadsMonitor);
#if GST_VERSION_MINOR <= 1 && GST_VERSION_MICRO < 1
      ResetDecode();
#endif
      gst_event_parse_segment(aEvent, &newSegment);
      if (parent == GST_ELEMENT(mVideoAppSink))
        segment = &mVideoSegment;
      else
        segment = &mAudioSegment;
      gst_segment_copy_into (newSegment, segment);
      break;
    }
    case GST_EVENT_FLUSH_STOP:
      /* Reset on seeks */
      ResetDecode();
      break;
    default:
      break;
  }
  gst_object_unref(parent);

  return GST_PAD_PROBE_OK;
}
Example #9
0
nsresult MediaOmxReader::Seek(int64_t aTarget, int64_t aStartTime, int64_t aEndTime, int64_t aCurrentTime)
{
  NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");

  ResetDecode();
  VideoFrameContainer* container = mDecoder->GetVideoFrameContainer();
  if (container && container->GetImageContainer()) {
    container->GetImageContainer()->ClearAllImagesExceptFront();
  }

  mAudioSeekTimeUs = mVideoSeekTimeUs = aTarget;

  return DecodeToTarget(aTarget);
}
void
MediaSourceReader::AttemptSeek()
{
  // Make sure we don't hold the monitor while calling into the reader
  // Seek methods since it can deadlock.
  {
    ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
    if (!mWaitingForSeekData || !TrackBuffersContainTime(mPendingSeekTime)) {
      return;
    }
  }

  ResetDecode();
  for (uint32_t i = 0; i < mTrackBuffers.Length(); ++i) {
    mTrackBuffers[i]->ResetDecode();
  }

  // Decoding discontinuity upon seek, reset last times to seek target.
  mLastAudioTime = mPendingSeekTime;
  mLastVideoTime = mPendingSeekTime;

  if (mAudioTrack) {
    mAudioIsSeeking = true;
    SwitchAudioReader(mPendingSeekTime);
    mAudioReader->Seek(mPendingSeekTime,
                       mPendingStartTime,
                       mPendingEndTime,
                       mPendingCurrentTime)
                ->Then(GetTaskQueue(), __func__, this,
                       &MediaSourceReader::OnSeekCompleted,
                       &MediaSourceReader::OnSeekFailed);
    MSE_DEBUG("MediaSourceReader(%p)::Seek audio reader=%p", this, mAudioReader.get());
  }
  if (mVideoTrack) {
    mVideoIsSeeking = true;
    SwitchVideoReader(mPendingSeekTime);
    mVideoReader->Seek(mPendingSeekTime,
                       mPendingStartTime,
                       mPendingEndTime,
                       mPendingCurrentTime)
                ->Then(GetTaskQueue(), __func__, this,
                       &MediaSourceReader::OnSeekCompleted,
                       &MediaSourceReader::OnSeekFailed);
    MSE_DEBUG("MediaSourceReader(%p)::Seek video reader=%p", this, mVideoReader.get());
  }
  {
    ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
    mWaitingForSeekData = false;
  }
}
Example #11
0
void MediaOmxReader::ReleaseMediaResources()
{
  mMediaResourceRequest.DisconnectIfExists();
  mMetadataPromise.RejectIfExists(ReadMetadataFailureReason::METADATA_ERROR, __func__);

  ResetDecode();
  // Before freeing a video codec, all video buffers needed to be released
  // even from graphics pipeline.
  VideoFrameContainer* container = mDecoder->GetVideoFrameContainer();
  if (container) {
    container->ClearCurrentFrame();
  }
  if (mOmxDecoder.get()) {
    mOmxDecoder->ReleaseMediaResources();
  }
}
nsresult WaveReader::Seek(int64_t aTarget, int64_t aStartTime, int64_t aEndTime, int64_t aCurrentTime)
{
  NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
  LOG(PR_LOG_DEBUG, ("%p About to seek to %lld", mDecoder, aTarget));
  if (NS_FAILED(ResetDecode())) {
    return NS_ERROR_FAILURE;
  }
  double d = BytesToTime(GetDataLength());
  NS_ASSERTION(d < INT64_MAX / USECS_PER_S, "Duration overflow"); 
  int64_t duration = static_cast<int64_t>(d * USECS_PER_S);
  double seekTime = NS_MIN(aTarget, duration) / static_cast<double>(USECS_PER_S);
  int64_t position = RoundDownToFrame(static_cast<int64_t>(TimeToBytes(seekTime)));
  NS_ASSERTION(INT64_MAX - mWavePCMOffset > position, "Integer overflow during wave seek");
  position += mWavePCMOffset;
  return mDecoder->GetResource()->Seek(nsISeekableStream::NS_SEEK_SET, position);
}
Example #13
0
RefPtr<ReaderProxy::AudioDataPromise>
ReaderProxy::OnAudioDataRequestFailed(const MediaResult& aError)
{
  MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());

  if (mSeamlessLoopingBlocked || !mSeamlessLoopingEnabled ||
      aError.Code() != NS_ERROR_DOM_MEDIA_END_OF_STREAM) {
    return AudioDataPromise::CreateAndReject(aError, __func__);
  }

  // The data time in the audio queue is assumed to be increased linearly,
  // so we need to add the last ending time as the offset to correct the
  // audio data time in the next round when seamless looping is enabled.
  mLoopingOffset = mLastAudioEndTime;

  // Save the duration of the audio track if it hasn't been set.
  if (!mAudioDuration.IsValid()) {
    mAudioDuration = mLastAudioEndTime;
  }

  // For seamless looping, the demuxer is sought to the beginning and then
  // keep requesting decoded data in advance, upon receiving EOS.
  // The MDSM will not be aware of the EOS and keep receiving decoded data
  // as usual while looping is on.
  RefPtr<ReaderProxy> self = this;
  RefPtr<MediaFormatReader> reader = mReader;
  ResetDecode(TrackInfo::kAudioTrack);
  return SeekInternal(SeekTarget(media::TimeUnit::Zero(), SeekTarget::Accurate))
    ->Then(mReader->OwnerThread(),
           __func__,
           [reader]() { return reader->RequestAudioData(); },
           [](const SeekRejectValue& aReject) {
             return AudioDataPromise::CreateAndReject(aReject.mError, __func__);
           })
    ->Then(mOwnerThread,
           __func__,
           [self](RefPtr<AudioData> aAudio) {
             return self->OnAudioDataRequestCompleted(aAudio.forget());
           },
           [](const MediaResult& aError) {
             return AudioDataPromise::CreateAndReject(aError, __func__);
           });
}
Example #14
0
void
AppleMP3Reader::Seek(int64_t aTime,
                     int64_t aStartTime,
                     int64_t aEndTime,
                     int64_t aCurrentTime)
{
  MOZ_ASSERT(mDecoder->OnDecodeThread(), "Should be on decode thread");
  NS_ASSERTION(aStartTime < aEndTime,
               "Seeking should happen over a positive range");

  // Find the exact frame/packet that contains |aTime|.
  mCurrentAudioFrame = aTime * mAudioSampleRate / USECS_PER_S;
  SInt64 packet = mCurrentAudioFrame / mAudioFramesPerCompressedPacket;

  // |AudioFileStreamSeek| will pass back through |byteOffset| the byte offset
  // into the stream it expects next time it reads.
  SInt64 byteOffset;
  UInt32 flags = 0;

  OSStatus rv = AudioFileStreamSeek(mAudioFileStream,
                                    packet,
                                    &byteOffset,
                                    &flags);

  if (rv) {
    LOGE("Couldn't seek demuxer. Error code %x\n", rv);
    GetCallback()->OnSeekCompleted(NS_ERROR_FAILURE);
    return;
  }

  LOGD("computed byte offset = %lld; estimated = %s\n",
       byteOffset,
       (flags & kAudioFileStreamSeekFlag_OffsetIsEstimated) ? "YES" : "NO");

  mDecoder->GetResource()->Seek(nsISeekableStream::NS_SEEK_SET, byteOffset);

  ResetDecode();

  GetCallback()->OnSeekCompleted(NS_OK);
}
Example #15
0
nsRefPtr<MediaDecoderReader::SeekPromise>
WaveReader::Seek(int64_t aTarget, int64_t aEndTime)
{
  MOZ_ASSERT(OnTaskQueue());
  LOG(LogLevel::Debug, ("%p About to seek to %lld", mDecoder, aTarget));

  if (NS_FAILED(ResetDecode())) {
    return SeekPromise::CreateAndReject(NS_ERROR_FAILURE, __func__);
  }
  double d = BytesToTime(GetDataLength());
  NS_ASSERTION(d < INT64_MAX / USECS_PER_S, "Duration overflow");
  int64_t duration = static_cast<int64_t>(d * USECS_PER_S);
  double seekTime = std::min(aTarget, duration) / static_cast<double>(USECS_PER_S);
  int64_t position = RoundDownToFrame(static_cast<int64_t>(TimeToBytes(seekTime)));
  NS_ASSERTION(INT64_MAX - mWavePCMOffset > position, "Integer overflow during wave seek");
  position += mWavePCMOffset;
  nsresult res = mDecoder->GetResource()->Seek(nsISeekableStream::NS_SEEK_SET, position);
  if (NS_FAILED(res)) {
    return SeekPromise::CreateAndReject(res, __func__);
  } else {
    return SeekPromise::CreateAndResolve(aTarget, __func__);
  }
}
gboolean GStreamerReader::EventProbe(GstPad* aPad, GstEvent* aEvent)
{
  GstElement* parent = GST_ELEMENT(gst_pad_get_parent(aPad));
  switch(GST_EVENT_TYPE(aEvent)) {
    case GST_EVENT_NEWSEGMENT:
    {
      gboolean update;
      gdouble rate;
      GstFormat format;
      gint64 start, stop, position;
      GstSegment* segment;

      /* Store the segments so we can convert timestamps to stream time, which
       * is what the upper layers sync on.
       */
      ReentrantMonitorAutoEnter mon(mGstThreadsMonitor);
      gst_event_parse_new_segment(aEvent, &update, &rate, &format,
          &start, &stop, &position);
      if (parent == GST_ELEMENT(mVideoAppSink))
        segment = &mVideoSegment;
      else
        segment = &mAudioSegment;
      gst_segment_set_newsegment(segment, update, rate, format,
          start, stop, position);
      break;
    }
    case GST_EVENT_FLUSH_STOP:
      /* Reset on seeks */
      ResetDecode();
      break;
    default:
      break;
  }
  gst_object_unref(parent);

  return TRUE;
}
MediaDecoderReader::~MediaDecoderReader()
{
  ResetDecode();
  MOZ_COUNT_DTOR(MediaDecoderReader);
}
MediaOmxReader::~MediaOmxReader()
{
  ResetDecode();
  mOmxDecoder.clear();
}
void MediaOmxReader::ReleaseMediaResources()
{
  ResetDecode();
  mOmxDecoder->ReleaseMediaResources();
}
Example #20
0
MediaPluginReader::~MediaPluginReader()
{
  ResetDecode();
}
Example #21
0
MediaOmxReader::~MediaOmxReader()
{
  ResetDecode();
}