Exemple #1
0
int64_t
MP3TrackDemuxer::FrameIndexFromTime(const media::TimeUnit& aTime) const {
  int64_t frameIndex = 0;
  if (mSamplesPerSecond > 0 && mSamplesPerFrame > 0) {
    frameIndex = aTime.ToSeconds() * mSamplesPerSecond / mSamplesPerFrame - 1;
  }

  MP3LOGV("FrameIndexFromOffset(%fs) -> %" PRId64, aTime.ToSeconds(), frameIndex);
  return std::max<int64_t>(0, frameIndex);
}
Exemple #2
0
void
SeekTask::RequestVideoData()
{
  AssertOwnerThread();
  //These two variables are not used in the SEEKING state.
  const bool skipToNextKeyFrame = false;
  const media::TimeUnit currentTime = media::TimeUnit::FromMicroseconds(0);

  SAMPLE_LOG("Queueing video task - queued=%i, decoder-queued=%o, skip=%i, time=%lld",
               !!mSeekedVideoData, mReader->SizeOfVideoQueueInFrames(), skipToNextKeyFrame,
               currentTime.ToMicroseconds());

  mReader->RequestVideoData(skipToNextKeyFrame, currentTime);
}
  already_AddRefed<MediaData>
  Create(const media::TimeUnit& aDTS, const media::TimeUnit& aDuration, int64_t aOffsetInStream)
  {
    // Create a fake YUV buffer in a 420 format. That is, an 8bpp Y plane,
    // with a U and V plane that are half the size of the Y plane, i.e 8 bit,
    // 2x2 subsampled.
    const int sizeY = mFrameWidth * mFrameHeight;
    const int sizeCbCr = ((mFrameWidth + 1) / 2) * ((mFrameHeight + 1) / 2);
    auto frame = MakeUnique<uint8_t[]>(sizeY + sizeCbCr);
    VideoData::YCbCrBuffer buffer;

    // Y plane.
    buffer.mPlanes[0].mData = frame.get();
    buffer.mPlanes[0].mStride = mFrameWidth;
    buffer.mPlanes[0].mHeight = mFrameHeight;
    buffer.mPlanes[0].mWidth = mFrameWidth;
    buffer.mPlanes[0].mOffset = 0;
    buffer.mPlanes[0].mSkip = 0;

    // Cb plane.
    buffer.mPlanes[1].mData = frame.get() + sizeY;
    buffer.mPlanes[1].mStride = mFrameWidth / 2;
    buffer.mPlanes[1].mHeight = mFrameHeight / 2;
    buffer.mPlanes[1].mWidth = mFrameWidth / 2;
    buffer.mPlanes[1].mOffset = 0;
    buffer.mPlanes[1].mSkip = 0;

    // Cr plane.
    buffer.mPlanes[2].mData = frame.get() + sizeY;
    buffer.mPlanes[2].mStride = mFrameWidth / 2;
    buffer.mPlanes[2].mHeight = mFrameHeight / 2;
    buffer.mPlanes[2].mWidth = mFrameWidth / 2;
    buffer.mPlanes[2].mOffset = 0;
    buffer.mPlanes[2].mSkip = 0;

    // Set to color white.
    memset(buffer.mPlanes[0].mData, 255, sizeY);
    memset(buffer.mPlanes[1].mData, 128, sizeCbCr);

    return VideoData::CreateAndCopyData(mInfo,
                                        mImageContainer,
                                        aOffsetInStream,
                                        aDTS.ToMicroseconds(),
                                        aDuration.ToMicroseconds(),
                                        buffer,
                                        true,
                                        aDTS.ToMicroseconds(),
                                        mPicture);
  }
Exemple #4
0
int64_t
MP4TrackDemuxer::GetEvictionOffset(media::TimeUnit aTime)
{
  MonitorAutoLock mon(mMonitor);
  uint64_t offset = mIndex->GetEvictionOffset(aTime.ToMicroseconds());
  return int64_t(offset == std::numeric_limits<uint64_t>::max() ? 0 : offset);
}
void
VideoDecoderChild::SetSeekThreshold(const media::TimeUnit& aTime)
{
  AssertOnManagerThread();
  if (mCanSend) {
    SendSetSeekThreshold(aTime.ToMicroseconds());
  }
}
  already_AddRefed<MediaData>
  Create(const media::TimeUnit& aDTS, const media::TimeUnit& aDuration, int64_t aOffsetInStream)
  {
    // Create a fake YUV buffer in a 420 format. That is, an 8bpp Y plane,
    // with a U and V plane that are half the size of the Y plane, i.e 8 bit,
    // 2x2 subsampled. Have the data pointers of each frame point to the
    // first plane, they'll always be zero'd memory anyway.
    nsAutoArrayPtr<uint8_t> frame(new uint8_t[mFrameWidth * mFrameHeight]);
    memset(frame, 0, mFrameWidth * mFrameHeight);
    VideoData::YCbCrBuffer buffer;

    // Y plane.
    buffer.mPlanes[0].mData = frame;
    buffer.mPlanes[0].mStride = mFrameWidth;
    buffer.mPlanes[0].mHeight = mFrameHeight;
    buffer.mPlanes[0].mWidth = mFrameWidth;
    buffer.mPlanes[0].mOffset = 0;
    buffer.mPlanes[0].mSkip = 0;

    // Cb plane.
    buffer.mPlanes[1].mData = frame;
    buffer.mPlanes[1].mStride = mFrameWidth / 2;
    buffer.mPlanes[1].mHeight = mFrameHeight / 2;
    buffer.mPlanes[1].mWidth = mFrameWidth / 2;
    buffer.mPlanes[1].mOffset = 0;
    buffer.mPlanes[1].mSkip = 0;

    // Cr plane.
    buffer.mPlanes[2].mData = frame;
    buffer.mPlanes[2].mStride = mFrameWidth / 2;
    buffer.mPlanes[2].mHeight = mFrameHeight / 2;
    buffer.mPlanes[2].mWidth = mFrameWidth / 2;
    buffer.mPlanes[2].mOffset = 0;
    buffer.mPlanes[2].mSkip = 0;

    return VideoData::Create(mInfo,
                             mImageContainer,
                             nullptr,
                             aOffsetInStream,
                             aDTS.ToMicroseconds(),
                             aDuration.ToMicroseconds(),
                             buffer,
                             true,
                             aDTS.ToMicroseconds(),
                             mPicture);
  }
RefPtr<MediaDecoderReader::SeekPromise>
MediaDecoderReaderWrapper::Seek(SeekTarget aTarget, media::TimeUnit aEndTime)
{
  MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
  aTarget.SetTime(aTarget.GetTime() + StartTime());
  return InvokeAsync(mReader->OwnerThread(), mReader.get(), __func__,
                     &MediaDecoderReader::Seek, aTarget,
                     aEndTime.ToMicroseconds());
}
int64_t
WAVTrackDemuxer::ChunkIndexFromTime(const media::TimeUnit& aTime) const
{
  if (!mSamplesPerChunk || !mSamplesPerSecond) {
    return 0;
  }
  int64_t chunkIndex =
    (aTime.ToSeconds() * mSamplesPerSecond / mSamplesPerChunk) - 1;
  return chunkIndex;
}
void
MediaDecoderReaderWrapper::RequestVideoData(bool aSkipToNextKeyframe,
                                            media::TimeUnit aTimeThreshold)
{
  MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
  MOZ_ASSERT(!mShutdown);
  MOZ_ASSERT(mRequestVideoDataCB, "Request video data without callback!");

  // Time the video decode and send this value back to callbacks who accept
  // a TimeStamp as its second parameter.
  TimeStamp videoDecodeStartTime = TimeStamp::Now();

  if (aTimeThreshold.ToMicroseconds() > 0 &&
      mStartTimeRendezvous->HaveStartTime()) {
    aTimeThreshold += StartTime();
  }

  auto p = InvokeAsync(mReader->OwnerThread(), mReader.get(), __func__,
                       &MediaDecoderReader::RequestVideoData,
                       aSkipToNextKeyframe, aTimeThreshold.ToMicroseconds());

  if (!mStartTimeRendezvous->HaveStartTime()) {
    p = p->Then(mOwnerThread, __func__, mStartTimeRendezvous.get(),
                &StartTimeRendezvous::ProcessFirstSample<MediaData::VIDEO_DATA>,
                &StartTimeRendezvous::FirstSampleRejected<MediaData::VIDEO_DATA>)
         ->CompletionPromise();
  }

  RefPtr<MediaDecoderReaderWrapper> self = this;
  mVideoDataRequest.Begin(p->Then(mOwnerThread, __func__,
    [self, videoDecodeStartTime] (MediaData* aVideoSample) {
      MOZ_ASSERT(self->mRequestVideoDataCB);
      self->mVideoDataRequest.Complete();
      self->OnSampleDecoded(self->mRequestVideoDataCB.get(), aVideoSample, videoDecodeStartTime);
    },
    [self] (MediaDecoderReader::NotDecodedReason aReason) {
      MOZ_ASSERT(self->mRequestVideoDataCB);
      self->mVideoDataRequest.Complete();
      self->OnNotDecoded(self->mRequestVideoDataCB.get(), aReason);
    }));
}
Exemple #10
0
RefPtr<MP4TrackDemuxer::SeekPromise>
MP4TrackDemuxer::Seek(media::TimeUnit aTime)
{
  int64_t seekTime = aTime.ToMicroseconds();
  mQueuedSample = nullptr;

  mIterator->Seek(seekTime);

  // Check what time we actually seeked to.
  mQueuedSample = mIterator->GetNext();
  if (mQueuedSample) {
    seekTime = mQueuedSample->mTime;
  }
  SetNextKeyFrameTime();

  return SeekPromise::CreateAndResolve(media::TimeUnit::FromMicroseconds(seekTime), __func__);
}
AccurateSeekTask::AccurateSeekTask(const void* aDecoderID,
                                   AbstractThread* aThread,
                                   MediaDecoderReaderWrapper* aReader,
                                   const SeekTarget& aTarget,
                                   const MediaInfo& aInfo,
                                   const media::TimeUnit& aEnd,
                                   int64_t aCurrentMediaTime)
  : SeekTask(aDecoderID, aThread, aReader, aTarget)
  , mCurrentTimeBeforeSeek(media::TimeUnit::FromMicroseconds(aCurrentMediaTime))
  , mAudioRate(aInfo.mAudio.mRate)
  , mDoneAudioSeeking(!aInfo.HasAudio() || aTarget.IsVideoOnly())
  , mDoneVideoSeeking(!aInfo.HasVideo())
{
  AssertOwnerThread();

  // Bound the seek time to be inside the media range.
  NS_ASSERTION(aEnd.ToMicroseconds() != -1, "Should know end time by now");
  mTarget.SetTime(std::max(media::TimeUnit(), std::min(mTarget.GetTime(), aEnd)));

  // Configure MediaDecoderReaderWrapper.
  SetCallbacks();
}
Exemple #12
0
SeekTask::SeekTask(const void* aDecoderID,
                   AbstractThread* aThread,
                   MediaDecoderReaderWrapper* aReader,
                   SeekJob&& aSeekJob,
                   const MediaInfo& aInfo,
                   const media::TimeUnit& aDuration,
                   int64_t aCurrentMediaTime)
  : mDecoderID(aDecoderID)
  , mOwnerThread(aThread)
  , mReader(aReader)
  , mSeekJob(Move(aSeekJob))
  , mCurrentTimeBeforeSeek(aCurrentMediaTime)
  , mAudioRate(aInfo.mAudio.mRate)
  , mHasAudio(aInfo.HasAudio())
  , mHasVideo(aInfo.HasVideo())
  , mDropAudioUntilNextDiscontinuity(false)
  , mDropVideoUntilNextDiscontinuity(false)
  , mIsDiscarded(false)
  , mIsAudioQueueFinished(false)
  , mIsVideoQueueFinished(false)
  , mNeedToStopPrerollingAudio(false)
  , mNeedToStopPrerollingVideo(false)
{
  // Bound the seek time to be inside the media range.
  int64_t end = aDuration.ToMicroseconds();
  NS_ASSERTION(end != -1, "Should know end time by now");
  int64_t seekTime = mSeekJob.mTarget.GetTime().ToMicroseconds();
  seekTime = std::min(seekTime, end);
  seekTime = std::max(int64_t(0), seekTime);
  NS_ASSERTION(seekTime >= 0 && seekTime <= end,
               "Can only seek in range [0,duration]");
  mSeekJob.mTarget.SetTime(media::TimeUnit::FromMicroseconds(seekTime));

  mDropAudioUntilNextDiscontinuity = HasAudio();
  mDropVideoUntilNextDiscontinuity = HasVideo();

  // Configure MediaDecoderReaderWrapper.
  SetMediaDecoderReaderWrapperCallback();
}
Exemple #13
0
RefPtr<MP4TrackDemuxer::SkipAccessPointPromise>
MP4TrackDemuxer::SkipToNextRandomAccessPoint(media::TimeUnit aTimeThreshold)
{
  mQueuedSample = nullptr;
  // Loop until we reach the next keyframe after the threshold.
  uint32_t parsed = 0;
  bool found = false;
  RefPtr<MediaRawData> sample;
  while (!found && (sample = mIterator->GetNext())) {
    parsed++;
    if (sample->mKeyframe && sample->mTime >= aTimeThreshold.ToMicroseconds()) {
      found = true;
      mQueuedSample = sample;
    }
  }
  SetNextKeyFrameTime();
  if (found) {
    return SkipAccessPointPromise::CreateAndResolve(parsed, __func__);
  } else {
    SkipFailureHolder failure(DemuxerFailureReason::END_OF_STREAM, parsed);
    return SkipAccessPointPromise::CreateAndReject(Move(failure), __func__);
  }
}
  nsresult Output(BufferInfo::Param aInfo, void* aBuffer,
                  MediaFormat::Param aFormat,
                  const media::TimeUnit& aDuration) {
    // The output on Android is always 16-bit signed

    nsresult rv;
    int32_t numChannels;
    NS_ENSURE_SUCCESS(rv =
        aFormat->GetInteger(NS_LITERAL_STRING("channel-count"), &numChannels), rv);

    int32_t sampleRate;
    NS_ENSURE_SUCCESS(rv =
        aFormat->GetInteger(NS_LITERAL_STRING("sample-rate"), &sampleRate), rv);

    int32_t size;
    NS_ENSURE_SUCCESS(rv = aInfo->Size(&size), rv);

    const int32_t numFrames = (size / numChannels) / 2;
    AudioDataValue* audio = new AudioDataValue[size];
    PodCopy(audio, static_cast<AudioDataValue*>(aBuffer), size);

    int32_t offset;
    NS_ENSURE_SUCCESS(rv = aInfo->Offset(&offset), rv);

    int64_t presentationTimeUs;
    NS_ENSURE_SUCCESS(rv = aInfo->PresentationTimeUs(&presentationTimeUs), rv);

    nsRefPtr<AudioData> data = new AudioData(offset, presentationTimeUs,
                                             aDuration.ToMicroseconds(),
                                             numFrames,
                                             audio,
                                             numChannels,
                                             sampleRate);
    ENVOKE_CALLBACK(Output, data);
    return NS_OK;
  }
void
AppleVTDecoder::SetSeekThreshold(const media::TimeUnit& aTime)
{
  LOG("SetSeekThreshold %lld", aTime.ToMicroseconds());
  mSeekTargetThreshold = Some(aTime);
}
  virtual nsresult PostOutput(BufferInfo::Param aInfo, MediaFormat::Param aFormat,
                              const media::TimeUnit& aDuration) override {
    if (!EnsureGLContext()) {
      return NS_ERROR_FAILURE;
    }

    nsRefPtr<layers::Image> img = mImageContainer->CreateImage(ImageFormat::SURFACE_TEXTURE);
    layers::SurfaceTextureImage::Data data;
    data.mSurfTex = mSurfaceTexture.get();
    data.mSize = mConfig.mDisplay;
    data.mOriginPos = gl::OriginPos::BottomLeft;

    layers::SurfaceTextureImage* stImg = static_cast<layers::SurfaceTextureImage*>(img.get());
    stImg->SetData(data);

    if (WantCopy()) {
      EGLImage eglImage = CopySurface(img);
      if (!eglImage) {
        return NS_ERROR_FAILURE;
      }

      EGLSync eglSync = nullptr;
      if (sEGLLibrary.IsExtensionSupported(GLLibraryEGL::KHR_fence_sync) &&
          mGLContext->IsExtensionSupported(GLContext::OES_EGL_sync))
      {
        MOZ_ASSERT(mGLContext->IsCurrent());
        eglSync = sEGLLibrary.fCreateSync(EGL_DISPLAY(),
                                          LOCAL_EGL_SYNC_FENCE,
                                          nullptr);
        MOZ_ASSERT(eglSync);
        mGLContext->fFlush();
      } else {
        NS_WARNING("No EGL fence support detected, rendering artifacts may occur!");
      }

      img = mImageContainer->CreateImage(ImageFormat::EGLIMAGE);
      layers::EGLImageImage::Data data;
      data.mImage = eglImage;
      data.mSync = eglSync;
      data.mOwns = true;
      data.mSize = mConfig.mDisplay;
      data.mOriginPos = gl::OriginPos::TopLeft;

      layers::EGLImageImage* typedImg = static_cast<layers::EGLImageImage*>(img.get());
      typedImg->SetData(data);
    }

    nsresult rv;
    int32_t flags;
    NS_ENSURE_SUCCESS(rv = aInfo->Flags(&flags), rv);

    bool isSync = !!(flags & MediaCodec::BUFFER_FLAG_SYNC_FRAME);

    int32_t offset;
    NS_ENSURE_SUCCESS(rv = aInfo->Offset(&offset), rv);

    int64_t presentationTimeUs;
    NS_ENSURE_SUCCESS(rv = aInfo->PresentationTimeUs(&presentationTimeUs), rv);

    nsRefPtr<VideoData> v =
      VideoData::CreateFromImage(mConfig,
                                 mImageContainer,
                                 offset,
                                 presentationTimeUs,
                                 aDuration.ToMicroseconds(),
                                 img,
                                 isSync,
                                 presentationTimeUs,
                                 gfx::IntRect(0, 0,
                                              mConfig.mDisplay.width,
                                              mConfig.mDisplay.height));
    ENVOKE_CALLBACK(Output, v);
    return NS_OK;
  }
Exemple #17
0
// Format TimeUnit as number of frames at given rate.
CheckedInt64 TimeUnitToFrames(const media::TimeUnit& aTime, uint32_t aRate) {
  return UsecsToFrames(aTime.ToMicroseconds(), aRate);
}
int64_t
MP4TrackDemuxer::GetEvictionOffset(media::TimeUnit aTime)
{
  MonitorAutoLock mon(mMonitor);
  return int64_t(mIndex->GetEvictionOffset(aTime.ToMicroseconds()));
}