Esempio n. 1
0
void
WidevineVideoDecoder::Drain()
{
  Log("WidevineVideoDecoder::Drain()");
  if (mReturnOutputCallDepth > 0) {
    Log("Drain call is reentrant, postponing drain");
    mDrainPending = true;
    return;
  }

  Status rv = kSuccess;
  while (rv == kSuccess) {
    WidevineVideoFrame frame;
    InputBuffer sample;
    Status rv = CDM()->DecryptAndDecodeFrame(sample, &frame);
    Log("WidevineVideoDecoder::Drain();  DecryptAndDecodeFrame() rv=%d", rv);
    if (frame.Format() == kUnknownVideoFormat) {
      break;
    }
    if (rv == kSuccess) {
      if (!ReturnOutput(frame)) {
        Log("WidevineVideoDecoder::Decode() Failed in ReturnOutput()");
      }
    }
  }
  // Shouldn't be reset while draining.
  MOZ_ASSERT(!mResetInProgress);

  CDM()->ResetDecoder(kStreamTypeVideo);
  mDrainPending = false;
  mCallback->DrainComplete();
}
Esempio n. 2
0
void
WidevineVideoDecoder::Decode(GMPVideoEncodedFrame* aInputFrame,
                             bool aMissingFrames,
                             const uint8_t* aCodecSpecificInfo,
                             uint32_t aCodecSpecificInfoLength,
                             int64_t aRenderTimeMs)
{
  // We should not be given new input if a drain has been initiated
  MOZ_ASSERT(!mDrainPending);
  // We may not get the same out of the CDM decoder as we put in, and there
  // may be some latency, i.e. we may need to input (say) 30 frames before
  // we receive output. So we need to store the durations of the frames input,
  // and retrieve them on output.
  mFrameDurations[aInputFrame->TimeStamp()] = aInputFrame->Duration();

  mSentInput = true;
  InputBuffer sample;

  RefPtr<MediaRawData> raw(new MediaRawData(aInputFrame->Buffer(), aInputFrame->Size()));
  raw->mExtraData = mExtraData;
  raw->mKeyframe = (aInputFrame->FrameType() == kGMPKeyFrame);
  // Convert input from AVCC, which GMPAPI passes in, to AnnexB, which
  // Chromium uses internally.
  mp4_demuxer::AnnexB::ConvertSampleToAnnexB(raw);

  const GMPEncryptedBufferMetadata* crypto = aInputFrame->GetDecryptionData();
  nsTArray<SubsampleEntry> subsamples;
  InitInputBuffer(crypto, aInputFrame->TimeStamp(), raw->Data(), raw->Size(), sample, subsamples);

  // For keyframes, ConvertSampleToAnnexB will stick the AnnexB extra data
  // at the start of the input. So we need to account for that as clear data
  // in the subsamples.
  if (raw->mKeyframe && !subsamples.IsEmpty()) {
    subsamples[0].clear_bytes += mAnnexB->Length();
  }

  WidevineVideoFrame frame;
  Status rv = CDM()->DecryptAndDecodeFrame(sample, &frame);
  Log("WidevineVideoDecoder::Decode(timestamp=%lld) rv=%d", sample.timestamp, rv);

  // Destroy frame, so that the shmem is now free to be used to return
  // output to the Gecko process.
  aInputFrame->Destroy();
  aInputFrame = nullptr;

  if (rv == kSuccess) {
    if (!ReturnOutput(frame)) {
      Log("WidevineVideoDecoder::Decode() Failed in ReturnOutput()");
      mCallback->Error(GMPDecodeErr);
      return;
    }
    // A reset should only be started at most at level mReturnOutputCallDepth 1,
    // and if it's started it should be finished by that call by the time
    // the it returns, so it should always be false by this point.
    MOZ_ASSERT(!mResetInProgress);
    // Only request more data if we don't have pending samples.
    if (mFrameAllocationQueue.empty()) {
      MOZ_ASSERT(mCDMWrapper);
      mCallback->InputDataExhausted();
    }
  } else if (rv == kNeedMoreData) {
    MOZ_ASSERT(mCDMWrapper);
    mCallback->InputDataExhausted();
  } else {
    mCallback->Error(ToGMPErr(rv));
  }
  // Finish a drain if pending and we have no pending ReturnOutput calls on the stack.
  if (mDrainPending && mReturnOutputCallDepth == 0) {
    Drain();
  }
}
Esempio n. 3
0
void
MP4Reader::Update(TrackType aTrack)
{
  MOZ_ASSERT(GetTaskQueue()->IsCurrentThreadIn());

  if (mShutdown) {
    return;
  }

  // Record number of frames decoded and parsed. Automatically update the
  // stats counters using the AutoNotifyDecoded stack-based class.
  AbstractMediaDecoder::AutoNotifyDecoded a(mDecoder);

  bool needInput = false;
  bool needOutput = false;
  auto& decoder = GetDecoderData(aTrack);
  {
    MonitorAutoLock lock(decoder.mMonitor);
    decoder.mUpdateScheduled = false;
    if (NeedInput(decoder)) {
      needInput = true;
      decoder.mInputExhausted = false;
      decoder.mNumSamplesInput++;
    }
    if (aTrack == kVideo) {
      uint64_t delta = decoder.mNumSamplesOutput - mLastReportedNumDecodedFrames;
      a.mDecoded = static_cast<uint32_t>(delta);
      mLastReportedNumDecodedFrames = decoder.mNumSamplesOutput;
    }
    if (decoder.HasPromise()) {
      needOutput = true;
      if (!decoder.mOutput.IsEmpty()) {
        nsRefPtr<MediaData> output = decoder.mOutput[0];
        decoder.mOutput.RemoveElementAt(0);
        ReturnOutput(output, aTrack);
      } else if (decoder.mDrainComplete) {
        decoder.RejectPromise(END_OF_STREAM, __func__);
      }
    }
  }

  VLOG("Update(%s) ni=%d no=%d iex=%d fl=%d",
       TrackTypeToStr(aTrack),
       needInput,
       needOutput,
       decoder.mInputExhausted,
       decoder.mIsFlushing);

  if (needInput) {
    nsAutoPtr<MediaSample> sample(PopSample(aTrack));

    // Collect telemetry from h264 Annex B SPS.
    if (!mFoundSPSForTelemetry && sample && AnnexB::HasSPS(sample->mMp4Sample)) {
      nsRefPtr<ByteBuffer> extradata = AnnexB::ExtractExtraData(sample->mMp4Sample);
      mFoundSPSForTelemetry = AccumulateSPSTelemetry(extradata);
    }

    if (sample) {
      decoder.mDecoder->Input(sample->mMp4Sample.forget());
      if (aTrack == kVideo) {
        a.mParsed++;
      }
    } else {
      {
        MonitorAutoLock lock(decoder.mMonitor);
        MOZ_ASSERT(!decoder.mDemuxEOS);
        decoder.mDemuxEOS = true;
      }
      // DrainComplete takes care of reporting EOS upwards
      decoder.mDecoder->Drain();
    }
  }
}