コード例 #1
0
void
GMPVideoDecoder::Input(MediaRawData* aSample)
{
  MOZ_ASSERT(IsOnGMPThread());

  RefPtr<MediaRawData> sample(aSample);
  if (!mGMP) {
    mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                                 RESULT_DETAIL("mGMP not initialized")));
    return;
  }

  mAdapter->SetLastStreamOffset(sample->mOffset);

  GMPUniquePtr<GMPVideoEncodedFrame> frame = CreateFrame(sample);
  if (!frame) {
    mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY,
                                 RESULT_DETAIL("CreateFrame returned null")));
    return;
  }
  nsTArray<uint8_t> info; // No codec specific per-frame info to pass.
  nsresult rv = mGMP->Decode(Move(frame), false, info, 0);
  if (NS_FAILED(rv)) {
    mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                                 RESULT_DETAIL("mGMP->Decode:%x", rv)));
  }
}
コード例 #2
0
ファイル: GMPVideoDecoder.cpp プロジェクト: fitzgen/gecko-dev
RefPtr<MediaDataDecoder::DecodePromise>
GMPVideoDecoder::Decode(MediaRawData* aSample)
{
  MOZ_ASSERT(IsOnGMPThread());

  RefPtr<MediaRawData> sample(aSample);
  if (!mGMP) {
    return DecodePromise::CreateAndReject(
      MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                  RESULT_DETAIL("mGMP not initialized")),
      __func__);
  }

  mLastStreamOffset = sample->mOffset;

  GMPUniquePtr<GMPVideoEncodedFrame> frame = CreateFrame(sample);
  if (!frame) {
    return DecodePromise::CreateAndReject(
      MediaResult(NS_ERROR_OUT_OF_MEMORY,
                  RESULT_DETAIL("CreateFrame returned null")),
      __func__);
  }
  RefPtr<DecodePromise> p = mDecodePromise.Ensure(__func__);
  nsTArray<uint8_t> info; // No codec specific per-frame info to pass.
  nsresult rv = mGMP->Decode(std::move(frame), false, info, 0);
  if (NS_FAILED(rv)) {
    mDecodePromise.Reject(MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                                      RESULT_DETAIL("mGMP->Decode:%" PRIx32,
                                                    static_cast<uint32_t>(rv))),
                          __func__);
  }
  return p;
}
コード例 #3
0
ファイル: MP4Metadata.cpp プロジェクト: jasonLaster/gecko-dev
MP4Metadata::ResultAndIndice MP4Metadata::GetTrackIndice(
    mozilla::TrackID aTrackID) {
  Mp4parseByteData indiceRawData = {};

  uint8_t fragmented = false;
  auto rv = mp4parse_is_fragmented(mParser.get(), aTrackID, &fragmented);
  if (rv != MP4PARSE_STATUS_OK) {
    return {MediaResult(NS_ERROR_DOM_MEDIA_METADATA_ERR,
                        RESULT_DETAIL("Cannot parse whether track id %d is "
                                      "fragmented, mp4parse_error=%d",
                                      int(aTrackID), int(rv))),
            nullptr};
  }

  if (!fragmented) {
    rv = mp4parse_get_indice_table(mParser.get(), aTrackID, &indiceRawData);
    if (rv != MP4PARSE_STATUS_OK) {
      return {
          MediaResult(NS_ERROR_DOM_MEDIA_METADATA_ERR,
                      RESULT_DETAIL("Cannot parse index table in track id %d, "
                                    "mp4parse_error=%d",
                                    int(aTrackID), int(rv))),
          nullptr};
    }
  }

  UniquePtr<IndiceWrapper> indice;
  indice = mozilla::MakeUnique<IndiceWrapper>(indiceRawData);

  return {NS_OK, std::move(indice)};
}
コード例 #4
0
ファイル: H264Converter.cpp プロジェクト: luke-chang/gecko-1
MediaResult
H264Converter::CreateDecoder(const VideoInfo& aConfig,
                             DecoderDoctorDiagnostics* aDiagnostics)
{
  if (!H264::HasSPS(aConfig.mExtraData)) {
    // nothing found yet, will try again later
    return NS_ERROR_NOT_INITIALIZED;
  }
  UpdateConfigFromExtraData(aConfig.mExtraData);

  SPSData spsdata;
  if (H264::DecodeSPSFromExtraData(aConfig.mExtraData, spsdata)) {
    // Do some format check here.
    // WMF H.264 Video Decoder and Apple ATDecoder do not support YUV444 format.
    if (spsdata.profile_idc == 244 /* Hi444PP */ ||
        spsdata.chroma_format_idc == PDMFactory::kYUV444) {
      if (aDiagnostics) {
        aDiagnostics->SetVideoNotSupported();
      }
      return MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                         RESULT_DETAIL("No support for YUV444 format."));
    }
  } else {
    return MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                       RESULT_DETAIL("Invalid SPS NAL."));
  }

  MediaResult error = NS_OK;
  mDecoder = mPDM->CreateVideoDecoder({
    aConfig,
    mTaskQueue,
    aDiagnostics,
    mImageContainer,
    mKnowsCompositor,
    mGMPCrashHelper,
    mType,
    mOnWaitingForKeyEvent,
    mDecoderOptions,
    mRate,
    &error
  });

  if (!mDecoder) {
    if (NS_FAILED(error)) {
      // The decoder supports CreateDecoderParam::mError, returns the value.
      return error;
    } else {
      return MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                         RESULT_DETAIL("Unable to create H264 decoder"));
    }
  }

  DDLINKCHILD("decoder", mDecoder.get());

  mNeedKeyframe = true;

  return NS_OK;
}
コード例 #5
0
void
H264Converter::DecodeFirstSample(MediaRawData* aSample)
{
  if (mNeedKeyframe && !aSample->mKeyframe) {
    mDecodePromise.Resolve(DecodedData(), __func__);
    return;
  }

  mNeedAVCC =
    Some(mDecoder->NeedsConversion() == ConversionRequired::kNeedAVCC);

  if (!*mNeedAVCC
      && !mp4_demuxer::AnnexB::ConvertSampleToAnnexB(aSample, mNeedKeyframe)) {
    mDecodePromise.Reject(
      MediaResult(NS_ERROR_OUT_OF_MEMORY,
                  RESULT_DETAIL("ConvertSampleToAnnexB")),
      __func__);
    return;
  }

  mNeedKeyframe = false;

  RefPtr<H264Converter> self = this;
  mDecoder->Decode(aSample)
    ->Then(AbstractThread::GetCurrent()->AsTaskQueue(), __func__,
           [self, this](const MediaDataDecoder::DecodedData& aResults) {
             mDecodePromiseRequest.Complete();
             mDecodePromise.Resolve(aResults, __func__);
           },
           [self, this](const MediaResult& aError) {
             mDecodePromiseRequest.Complete();
             mDecodePromise.Reject(aError, __func__);
           })
    ->Track(mDecodePromiseRequest);
}
コード例 #6
0
void
VideoCallbackAdapter::Terminated()
{
  // Note that this *may* be called from the proxy thread also.
  mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                               RESULT_DETAIL("Video GMP decoder terminated.")));
}
コード例 #7
0
void
H264Converter::OnDecoderInitDone(const TrackType aTrackType)
{
  mInitPromiseRequest.Complete();
  bool gotInput = false;
  for (uint32_t i = 0 ; i < mMediaRawSamples.Length(); i++) {
    const RefPtr<MediaRawData>& sample = mMediaRawSamples[i];
    if (mNeedKeyframe) {
      if (!sample->mKeyframe) {
        continue;
      }
      mNeedKeyframe = false;
    }
    if (!mNeedAVCC &&
        !mp4_demuxer::AnnexB::ConvertSampleToAnnexB(sample, mNeedKeyframe)) {
      mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY,
                                   RESULT_DETAIL("ConvertSampleToAnnexB")));
      mMediaRawSamples.Clear();
      return;
    }
    mDecoder->Input(sample);
  }
  if (!gotInput) {
    mCallback->InputExhausted();
  }
  mMediaRawSamples.Clear();
}
コード例 #8
0
ファイル: PDMFactory.cpp プロジェクト: bgrins/gecko-dev
 void
 AddMediaFormatChecker(const TrackInfo& aTrackConfig)
 {
   if (aTrackConfig.IsVideo()) {
     auto mimeType = aTrackConfig.GetAsVideoInfo()->mMimeType;
     RefPtr<MediaByteBuffer> extraData =
       aTrackConfig.GetAsVideoInfo()->mExtraData;
     AddToCheckList([mimeType, extraData]() {
       if (MP4Decoder::IsH264(mimeType)) {
         mp4_demuxer::SPSData spsdata;
         // WMF H.264 Video Decoder and Apple ATDecoder
         // do not support YUV444 format.
         // For consistency, all decoders should be checked.
         if (mp4_demuxer::H264::DecodeSPSFromExtraData(extraData, spsdata)
             && (spsdata.profile_idc == 244 /* Hi444PP */
                 || spsdata.chroma_format_idc == PDMFactory::kYUV444)) {
           return CheckResult(
             SupportChecker::Reason::kVideoFormatNotSupported,
             MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                         RESULT_DETAIL("Decoder may not have the capability "
                                       "to handle the requested video format "
                                       "with YUV444 chroma subsampling.")));
         }
       }
       return CheckResult(SupportChecker::Reason::kSupported);
     });
   }
 }
コード例 #9
0
void
H264Converter::OnDecoderInitFailed(MediaResult aError)
{
  mInitPromiseRequest.Complete();
  mCallback->Error(
    MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                RESULT_DETAIL("Unable to initialize H264 decoder")));
}
コード例 #10
0
ファイル: H264Converter.cpp プロジェクト: luke-chang/gecko-1
MediaResult
H264Converter::CreateDecoderAndInit(MediaRawData* aSample)
{
  RefPtr<MediaByteBuffer> extra_data =
    H264::ExtractExtraData(aSample);
  bool inbandExtradata = H264::HasSPS(extra_data);
  if (!inbandExtradata &&
      !H264::HasSPS(mCurrentConfig.mExtraData)) {
    return NS_ERROR_NOT_INITIALIZED;
  }

  if (inbandExtradata) {
    UpdateConfigFromExtraData(extra_data);
  }

  MediaResult rv =
    CreateDecoder(mCurrentConfig, /* DecoderDoctorDiagnostics* */ nullptr);

  if (NS_SUCCEEDED(rv)) {
    RefPtr<H264Converter> self = this;
    RefPtr<MediaRawData> sample = aSample;
    mDecoder->Init()
      ->Then(
        AbstractThread::GetCurrent()->AsTaskQueue(),
        __func__,
        [self, sample, this](const TrackType aTrackType) {
          mInitPromiseRequest.Complete();
          mNeedAVCC =
            Some(mDecoder->NeedsConversion() == ConversionRequired::kNeedAVCC);
          mCanRecycleDecoder = Some(CanRecycleDecoder());

          if (!mFlushPromise.IsEmpty()) {
            // A Flush is pending, abort the current operation.
            mFlushPromise.Resolve(true, __func__);
            return;
          }

          DecodeFirstSample(sample);
        },
        [self, this](const MediaResult& aError) {
          mInitPromiseRequest.Complete();

          if (!mFlushPromise.IsEmpty()) {
            // A Flush is pending, abort the current operation.
            mFlushPromise.Reject(aError, __func__);
            return;
          }

          mDecodePromise.Reject(
            MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                        RESULT_DETAIL("Unable to initialize H264 decoder")),
            __func__);
        })
      ->Track(mInitPromiseRequest);
    return NS_ERROR_DOM_MEDIA_INITIALIZING_DECODER;
  }
  return rv;
}
コード例 #11
0
ファイル: VPXDecoder.cpp プロジェクト: heiher/gecko-dev
RefPtr<MediaDataDecoder::DecodePromise>
VPXDecoder::ProcessDecode(MediaRawData* aSample)
{
  MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());

  if (vpx_codec_err_t r = vpx_codec_decode(&mVPX, aSample->Data(), aSample->Size(), nullptr, 0)) {
    LOG("VPX Decode error: %s", vpx_codec_err_to_string(r));
    return DecodePromise::CreateAndReject(
      MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                  RESULT_DETAIL("VPX error: %s", vpx_codec_err_to_string(r))),
      __func__);
  }

  vpx_codec_iter_t iter = nullptr;
  vpx_image_t *img;
  vpx_image_t *img_alpha = nullptr;
  bool alpha_decoded = false;
  DecodedData results;

  while ((img = vpx_codec_get_frame(&mVPX, &iter))) {
    NS_ASSERTION(img->fmt == VPX_IMG_FMT_I420 ||
                 img->fmt == VPX_IMG_FMT_I444,
                 "WebM image format not I420 or I444");
    NS_ASSERTION(!alpha_decoded,
                 "Multiple frames per packet that contains alpha");

    if (aSample->AlphaSize() > 0) {
      if (!alpha_decoded){
        MediaResult rv = DecodeAlpha(&img_alpha, aSample);
        if (NS_FAILED(rv)) {
          return DecodePromise::CreateAndReject(rv, __func__);
        }
        alpha_decoded = true;
      }
    }
    // Chroma shifts are rounded down as per the decoding examples in the SDK
    VideoData::YCbCrBuffer b;
    b.mPlanes[0].mData = img->planes[0];
    b.mPlanes[0].mStride = img->stride[0];
    b.mPlanes[0].mHeight = img->d_h;
    b.mPlanes[0].mWidth = img->d_w;
    b.mPlanes[0].mOffset = b.mPlanes[0].mSkip = 0;

    b.mPlanes[1].mData = img->planes[1];
    b.mPlanes[1].mStride = img->stride[1];
    b.mPlanes[1].mOffset = b.mPlanes[1].mSkip = 0;

    b.mPlanes[2].mData = img->planes[2];
    b.mPlanes[2].mStride = img->stride[2];
    b.mPlanes[2].mOffset = b.mPlanes[2].mSkip = 0;

    if (img->fmt == VPX_IMG_FMT_I420) {
      b.mPlanes[1].mHeight = (img->d_h + 1) >> img->y_chroma_shift;
      b.mPlanes[1].mWidth = (img->d_w + 1) >> img->x_chroma_shift;

      b.mPlanes[2].mHeight = (img->d_h + 1) >> img->y_chroma_shift;
      b.mPlanes[2].mWidth = (img->d_w + 1) >> img->x_chroma_shift;
    } else if (img->fmt == VPX_IMG_FMT_I444) {
コード例 #12
0
void
H264Converter::OnDecoderInitFailed(const MediaResult& aError)
{
  mInitPromiseRequest.Complete();
  mDecodePromise.Reject(
    MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                RESULT_DETAIL("Unable to initialize H264 decoder")),
    __func__);
}
コード例 #13
0
void
VideoCallbackAdapter::Error(GMPErr aErr)
{
  MOZ_ASSERT(IsOnGMPThread());
  mCallback->Error(MediaResult(aErr == GMPDecodeErr
                               ? NS_ERROR_DOM_MEDIA_DECODE_ERR
                               : NS_ERROR_DOM_MEDIA_FATAL_ERR,
                               RESULT_DETAIL("GMPErr:%x", aErr)));
}
コード例 #14
0
GMPUniquePtr<GMPVideoEncodedFrame>
GMPVideoDecoder::CreateFrame(MediaRawData* aSample)
{
  GMPVideoFrame* ftmp = nullptr;
  GMPErr err = mHost->CreateFrame(kGMPEncodedVideoFrame, &ftmp);
  if (GMP_FAILED(err)) {
    mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY,
                                 RESULT_DETAIL("Host::CreateFrame:%x", err)));
    return nullptr;
  }

  GMPUniquePtr<GMPVideoEncodedFrame> frame(static_cast<GMPVideoEncodedFrame*>(ftmp));
  err = frame->CreateEmptyFrame(aSample->Size());
  if (GMP_FAILED(err)) {
    mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY,
                                 RESULT_DETAIL("GMPVideoEncodedFrame::CreateEmptyFrame:%x", err)));
    return nullptr;
  }

  memcpy(frame->Buffer(), aSample->Data(), frame->Size());

  // Convert 4-byte NAL unit lengths to host-endian 4-byte buffer lengths to
  // suit the GMP API.
  if (mConvertNALUnitLengths) {
    const int kNALLengthSize = 4;
    uint8_t* buf = frame->Buffer();
    while (buf < frame->Buffer() + frame->Size() - kNALLengthSize) {
      uint32_t length = BigEndian::readUint32(buf) + kNALLengthSize;
      *reinterpret_cast<uint32_t *>(buf) = length;
      buf += length;
    }
  }

  frame->SetBufferType(GMP_BufferLength32);

  frame->SetEncodedWidth(mConfig.mDisplay.width);
  frame->SetEncodedHeight(mConfig.mDisplay.height);
  frame->SetTimeStamp(aSample->mTime);
  frame->SetCompleteFrame(true);
  frame->SetDuration(aSample->mDuration);
  frame->SetFrameType(aSample->mKeyframe ? kGMPKeyFrame : kGMPDeltaFrame);

  return frame;
}
コード例 #15
0
ファイル: GMPVideoDecoder.cpp プロジェクト: fitzgen/gecko-dev
void
GMPVideoDecoder::Error(GMPErr aErr)
{
  MOZ_ASSERT(IsOnGMPThread());
  auto error = MediaResult(aErr == GMPDecodeErr ? NS_ERROR_DOM_MEDIA_DECODE_ERR
                                                : NS_ERROR_DOM_MEDIA_FATAL_ERR,
                           RESULT_DETAIL("GMPErr:%x", aErr));
  mDecodePromise.RejectIfExists(error, __func__);
  mDrainPromise.RejectIfExists(error, __func__);
  mFlushPromise.RejectIfExists(error, __func__);
}
コード例 #16
0
ファイル: MP4Metadata.cpp プロジェクト: jasonLaster/gecko-dev
/*static*/ MP4Metadata::ResultAndByteBuffer MP4Metadata::Metadata(
    ByteStream* aSource) {
  auto parser = mozilla::MakeUnique<MoofParser>(
      aSource, AsVariant(ParseAllTracks{}), false);
  RefPtr<mozilla::MediaByteBuffer> buffer = parser->Metadata();
  if (!buffer) {
    return {MediaResult(NS_ERROR_DOM_MEDIA_METADATA_ERR,
                        RESULT_DETAIL("Cannot parse metadata")),
            nullptr};
  }
  return {NS_OK, std::move(buffer)};
}
コード例 #17
0
ファイル: AppleATDecoder.cpp プロジェクト: luke-chang/gecko-1
RefPtr<MediaDataDecoder::InitPromise>
AppleATDecoder::Init()
{
  if (!mFormatID) {
    return InitPromise::CreateAndReject(
      MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                  RESULT_DETAIL("Non recognised format")),
      __func__);
  }

  return InitPromise::CreateAndResolve(TrackType::kAudioTrack, __func__);
}
コード例 #18
0
ファイル: AOMDecoder.cpp プロジェクト: bgrins/gecko-dev
RefPtr<MediaDataDecoder::DecodePromise>
AOMDecoder::ProcessDecode(MediaRawData* aSample)
{
  MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());

#if defined(DEBUG)
  NS_ASSERTION(IsKeyframe(*aSample) == aSample->mKeyframe,
               "AOM Decode Keyframe error sample->mKeyframe and si.si_kf out of sync");
#endif

  if (aom_codec_err_t r = aom_codec_decode(&mCodec, aSample->Data(), aSample->Size(), nullptr, 0)) {
    LOG_RESULT(r, "Decode error!");
    return DecodePromise::CreateAndReject(
      MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                  RESULT_DETAIL("AOM error decoding AV1 sample: %s",
                                aom_codec_err_to_string(r))),
      __func__);
  }

  aom_codec_iter_t iter = nullptr;
  aom_image_t *img;
  DecodedData results;

  while ((img = aom_codec_get_frame(&mCodec, &iter))) {
    NS_ASSERTION(img->fmt == AOM_IMG_FMT_I420 ||
                 img->fmt == AOM_IMG_FMT_I444,
                 "WebM image format not I420 or I444");

    // Chroma shifts are rounded down as per the decoding examples in the SDK
    VideoData::YCbCrBuffer b;
    b.mPlanes[0].mData = img->planes[0];
    b.mPlanes[0].mStride = img->stride[0];
    b.mPlanes[0].mHeight = img->d_h;
    b.mPlanes[0].mWidth = img->d_w;
    b.mPlanes[0].mOffset = b.mPlanes[0].mSkip = 0;

    b.mPlanes[1].mData = img->planes[1];
    b.mPlanes[1].mStride = img->stride[1];
    b.mPlanes[1].mOffset = b.mPlanes[1].mSkip = 0;

    b.mPlanes[2].mData = img->planes[2];
    b.mPlanes[2].mStride = img->stride[2];
    b.mPlanes[2].mOffset = b.mPlanes[2].mSkip = 0;

    if (img->fmt == AOM_IMG_FMT_I420) {
      b.mPlanes[1].mHeight = (img->d_h + 1) >> img->y_chroma_shift;
      b.mPlanes[1].mWidth = (img->d_w + 1) >> img->x_chroma_shift;

      b.mPlanes[2].mHeight = (img->d_h + 1) >> img->y_chroma_shift;
      b.mPlanes[2].mWidth = (img->d_w + 1) >> img->x_chroma_shift;
    } else if (img->fmt == AOM_IMG_FMT_I444) {
コード例 #19
0
ファイル: GMPVideoDecoder.cpp プロジェクト: fitzgen/gecko-dev
void
GMPVideoDecoder::Decoded(GMPVideoi420Frame* aDecodedFrame)
{
  GMPUniquePtr<GMPVideoi420Frame> decodedFrame(aDecodedFrame);

  MOZ_ASSERT(IsOnGMPThread());

  VideoData::YCbCrBuffer b;
  for (int i = 0; i < kGMPNumOfPlanes; ++i) {
    b.mPlanes[i].mData = decodedFrame->Buffer(GMPPlaneType(i));
    b.mPlanes[i].mStride = decodedFrame->Stride(GMPPlaneType(i));
    if (i == kGMPYPlane) {
      b.mPlanes[i].mWidth = decodedFrame->Width();
      b.mPlanes[i].mHeight = decodedFrame->Height();
    } else {
      b.mPlanes[i].mWidth = (decodedFrame->Width() + 1) / 2;
      b.mPlanes[i].mHeight = (decodedFrame->Height() + 1) / 2;
    }
    b.mPlanes[i].mOffset = 0;
    b.mPlanes[i].mSkip = 0;
  }

  gfx::IntRect pictureRegion(
    0, 0, decodedFrame->Width(), decodedFrame->Height());
  RefPtr<VideoData> v = VideoData::CreateAndCopyData(
    mConfig,
    mImageContainer,
    mLastStreamOffset,
    media::TimeUnit::FromMicroseconds(decodedFrame->Timestamp()),
    media::TimeUnit::FromMicroseconds(decodedFrame->Duration()),
    b,
    false,
    media::TimeUnit::FromMicroseconds(-1),
    pictureRegion);
  RefPtr<GMPVideoDecoder> self = this;
  if (v) {
    mDecodedData.AppendElement(std::move(v));
  } else {
    mDecodedData.Clear();
    mDecodePromise.RejectIfExists(
      MediaResult(NS_ERROR_OUT_OF_MEMORY,
                  RESULT_DETAIL("CallBack::CreateAndCopyData")),
      __func__);
  }
}
コード例 #20
0
ファイル: H264Converter.cpp プロジェクト: luke-chang/gecko-1
void
H264Converter::DecodeFirstSample(MediaRawData* aSample)
{
  if (mNeedKeyframe && !aSample->mKeyframe) {
    mDecodePromise.Resolve(mPendingFrames, __func__);
    mPendingFrames.Clear();
    return;
  }

  auto res = !*mNeedAVCC
             ? AnnexB::ConvertSampleToAnnexB(aSample, mNeedKeyframe)
             : Ok();
  if (res.isErr()) {
    mDecodePromise.Reject(
      MediaResult(res.unwrapErr(), RESULT_DETAIL("ConvertSampleToAnnexB")),
      __func__);
    return;
  }

  mNeedKeyframe = false;

  RefPtr<H264Converter> self = this;
  mDecoder->Decode(aSample)
    ->Then(AbstractThread::GetCurrent()->AsTaskQueue(), __func__,
           [self, this](const MediaDataDecoder::DecodedData& aResults) {
             mDecodePromiseRequest.Complete();
             mPendingFrames.AppendElements(aResults);
             mDecodePromise.Resolve(mPendingFrames, __func__);
             mPendingFrames.Clear();
           },
           [self, this](const MediaResult& aError) {
             mDecodePromiseRequest.Complete();
             mDecodePromise.Reject(aError, __func__);
           })
    ->Track(mDecodePromiseRequest);
}
コード例 #21
0
void
H264Converter::Input(MediaRawData* aSample)
{
  if (!mp4_demuxer::AnnexB::ConvertSampleToAVCC(aSample)) {
    // We need AVCC content to be able to later parse the SPS.
    // This is a no-op if the data is already AVCC.
    mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY,
                                 RESULT_DETAIL("ConvertSampleToAVCC")));
    return;
  }

  if (mInitPromiseRequest.Exists()) {
    if (mNeedKeyframe) {
      if (!aSample->mKeyframe) {
        // Frames dropped, we need a new one.
        mCallback->InputExhausted();
        return;
      }
      mNeedKeyframe = false;
    }
    mMediaRawSamples.AppendElement(aSample);
    return;
  }

  nsresult rv;
  if (!mDecoder) {
    // It is not possible to create an AVCC H264 decoder without SPS.
    // As such, creation will fail if the extra_data just extracted doesn't
    // contain a SPS.
    rv = CreateDecoderAndInit(aSample);
    if (rv == NS_ERROR_NOT_INITIALIZED) {
      // We are missing the required SPS to create the decoder.
      // Ignore for the time being, the MediaRawData will be dropped.
      mCallback->InputExhausted();
      return;
    }
  } else {
    rv = CheckForSPSChange(aSample);
    if (rv == NS_ERROR_NOT_INITIALIZED) {
      // The decoder is pending initialization.
      mCallback->InputExhausted();
      return;
    }
  }
  if (NS_FAILED(rv)) {
    mCallback->Error(
      MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                  RESULT_DETAIL("Unable to create H264 decoder")));
    return;
  }

  if (mNeedKeyframe && !aSample->mKeyframe) {
    mCallback->InputExhausted();
    return;
  }

  if (!mNeedAVCC &&
      !mp4_demuxer::AnnexB::ConvertSampleToAnnexB(aSample, mNeedKeyframe)) {
    mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY,
                                 RESULT_DETAIL("ConvertSampleToAnnexB")));
    return;
  }

  mNeedKeyframe = false;

  aSample->mExtraData = mCurrentConfig.mExtraData;

  mDecoder->Input(aSample);
}
コード例 #22
0
ファイル: VorbisDecoder.cpp プロジェクト: luke-chang/gecko-1
RefPtr<MediaDataDecoder::InitPromise>
VorbisDataDecoder::Init()
{
  vorbis_info_init(&mVorbisInfo);
  vorbis_comment_init(&mVorbisComment);
  PodZero(&mVorbisDsp);
  PodZero(&mVorbisBlock);

  AutoTArray<unsigned char*,4> headers;
  AutoTArray<size_t,4> headerLens;
  if (!XiphExtradataToHeaders(headers, headerLens,
                              mInfo.mCodecSpecificConfig->Elements(),
                              mInfo.mCodecSpecificConfig->Length())) {
    return InitPromise::CreateAndReject(
      MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                  RESULT_DETAIL("Could not get vorbis header.")),
      __func__);
  }
  for (size_t i = 0; i < headers.Length(); i++) {
    if (NS_FAILED(DecodeHeader(headers[i], headerLens[i]))) {
      return InitPromise::CreateAndReject(
        MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                    RESULT_DETAIL("Could not decode vorbis header.")),
        __func__);
    }
  }

  MOZ_ASSERT(mPacketCount == 3);

  int r = vorbis_synthesis_init(&mVorbisDsp, &mVorbisInfo);
  if (r) {
    return InitPromise::CreateAndReject(
      MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                  RESULT_DETAIL("Systhesis init fail.")),
      __func__);
  }

  r = vorbis_block_init(&mVorbisDsp, &mVorbisBlock);
  if (r) {
    return InitPromise::CreateAndReject(
      MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                  RESULT_DETAIL("Block init fail.")),
      __func__);
  }

  if (mInfo.mRate != (uint32_t)mVorbisDsp.vi->rate) {
    LOG(LogLevel::Warning,
        ("Invalid Vorbis header: container and codec rate do not match!"));
  }
  if (mInfo.mChannels != (uint32_t)mVorbisDsp.vi->channels) {
    LOG(LogLevel::Warning,
        ("Invalid Vorbis header: container and codec channels do not match!"));
  }

  AudioConfig::ChannelLayout layout(mVorbisDsp.vi->channels);
  if (!layout.IsValid()) {
    return InitPromise::CreateAndReject(
      MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                  RESULT_DETAIL("Invalid audio layout.")),
      __func__);
  }

  return InitPromise::CreateAndResolve(TrackInfo::kAudioTrack, __func__);
}
コード例 #23
0
ファイル: VorbisDecoder.cpp プロジェクト: luke-chang/gecko-1
RefPtr<MediaDataDecoder::DecodePromise>
VorbisDataDecoder::ProcessDecode(MediaRawData* aSample)
{
  MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());

  const unsigned char* aData = aSample->Data();
  size_t aLength = aSample->Size();
  int64_t aOffset = aSample->mOffset;

  MOZ_ASSERT(mPacketCount >= 3);

  if (!mLastFrameTime ||
      mLastFrameTime.ref() != aSample->mTime.ToMicroseconds()) {
    // We are starting a new block.
    mFrames = 0;
    mLastFrameTime = Some(aSample->mTime.ToMicroseconds());
  }

  ogg_packet pkt = InitVorbisPacket(
    aData, aLength, false, aSample->mEOS,
    aSample->mTimecode.ToMicroseconds(), mPacketCount++);

  int err = vorbis_synthesis(&mVorbisBlock, &pkt);
  if (err) {
    return DecodePromise::CreateAndReject(
      MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                  RESULT_DETAIL("vorbis_synthesis:%d", err)),
      __func__);
  }

  err = vorbis_synthesis_blockin(&mVorbisDsp, &mVorbisBlock);
  if (err) {
    return DecodePromise::CreateAndReject(
      MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                  RESULT_DETAIL("vorbis_synthesis_blockin:%d", err)),
      __func__);
  }

  VorbisPCMValue** pcm = 0;
  int32_t frames = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm);
  if (frames == 0) {
    return DecodePromise::CreateAndResolve(DecodedData(), __func__);
  }

  DecodedData results;
  while (frames > 0) {
    uint32_t channels = mVorbisDsp.vi->channels;
    uint32_t rate = mVorbisDsp.vi->rate;
    AlignedAudioBuffer buffer(frames*channels);
    if (!buffer) {
      return DecodePromise::CreateAndReject(
        MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
    }
    for (uint32_t j = 0; j < channels; ++j) {
      VorbisPCMValue* channel = pcm[j];
      for (uint32_t i = 0; i < uint32_t(frames); ++i) {
        buffer[i*channels + j] = MOZ_CONVERT_VORBIS_SAMPLE(channel[i]);
      }
    }

    auto duration = FramesToTimeUnit(frames, rate);
    if (!duration.IsValid()) {
      return DecodePromise::CreateAndReject(
        MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
                    RESULT_DETAIL("Overflow converting audio duration")),
        __func__);
    }
    auto total_duration = FramesToTimeUnit(mFrames, rate);
    if (!total_duration.IsValid()) {
      return DecodePromise::CreateAndReject(
        MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
                    RESULT_DETAIL("Overflow converting audio total_duration")),
        __func__);
    }

    auto time = total_duration + aSample->mTime;
    if (!time.IsValid()) {
      return DecodePromise::CreateAndReject(
        MediaResult(
          NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
          RESULT_DETAIL("Overflow adding total_duration and aSample->mTime")),
        __func__);
    };

    if (!mAudioConverter) {
      AudioConfig in(
        AudioConfig::ChannelLayout(channels, VorbisLayout(channels)), rate);
      AudioConfig out(channels, rate);
      if (!in.IsValid() || !out.IsValid()) {
        return DecodePromise::CreateAndReject(
          MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                      RESULT_DETAIL("Invalid channel layout:%u", channels)),
          __func__);
      }
      mAudioConverter = MakeUnique<AudioConverter>(in, out);
    }
    MOZ_ASSERT(mAudioConverter->CanWorkInPlace());
    AudioSampleBuffer data(Move(buffer));
    data = mAudioConverter->Process(Move(data));

    results.AppendElement(new AudioData(aOffset, time, duration,
                                        frames, data.Forget(), channels, rate));
    mFrames += frames;
    err = vorbis_synthesis_read(&mVorbisDsp, frames);
    if (err) {
      return DecodePromise::CreateAndReject(
        MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                    RESULT_DETAIL("vorbis_synthesis_read:%d", err)),
        __func__);
    }

    frames = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm);
  }
  return DecodePromise::CreateAndResolve(Move(results), __func__);
}
コード例 #24
0
MediaResult
FFmpegAudioDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample)
{
  AVPacket packet;
  mLib->av_init_packet(&packet);

  packet.data = const_cast<uint8_t*>(aSample->Data());
  packet.size = aSample->Size();

  if (!PrepareFrame()) {
    return MediaResult(
      NS_ERROR_OUT_OF_MEMORY,
      RESULT_DETAIL("FFmpeg audio decoder failed to allocate frame"));
  }

  int64_t samplePosition = aSample->mOffset;
  media::TimeUnit pts = media::TimeUnit::FromMicroseconds(aSample->mTime);

  while (packet.size > 0) {
    int decoded;
    int bytesConsumed =
      mLib->avcodec_decode_audio4(mCodecContext, mFrame, &decoded, &packet);

    if (bytesConsumed < 0) {
      NS_WARNING("FFmpeg audio decoder error.");
      return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                         RESULT_DETAIL("FFmpeg audio error:%d", bytesConsumed));
    }

    if (mFrame->format != AV_SAMPLE_FMT_FLT &&
        mFrame->format != AV_SAMPLE_FMT_FLTP &&
        mFrame->format != AV_SAMPLE_FMT_S16 &&
        mFrame->format != AV_SAMPLE_FMT_S16P &&
        mFrame->format != AV_SAMPLE_FMT_S32 &&
        mFrame->format != AV_SAMPLE_FMT_S32P) {
      return MediaResult(
        NS_ERROR_DOM_MEDIA_DECODE_ERR,
        RESULT_DETAIL("FFmpeg audio decoder outputs unsupported audio format"));
    }

    if (decoded) {
      uint32_t numChannels = mCodecContext->channels;
      AudioConfig::ChannelLayout layout(numChannels);
      if (!layout.IsValid()) {
        return MediaResult(
          NS_ERROR_DOM_MEDIA_FATAL_ERR,
          RESULT_DETAIL("Unsupported channel layout:%u", numChannels));
      }

      uint32_t samplingRate = mCodecContext->sample_rate;

      AlignedAudioBuffer audio =
        CopyAndPackAudio(mFrame, numChannels, mFrame->nb_samples);
      if (!audio) {
        return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
      }

      media::TimeUnit duration =
        FramesToTimeUnit(mFrame->nb_samples, samplingRate);
      if (!duration.IsValid()) {
        return MediaResult(
          NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
          RESULT_DETAIL("Invalid sample duration"));
      }

      RefPtr<AudioData> data = new AudioData(samplePosition,
                                             pts.ToMicroseconds(),
                                             duration.ToMicroseconds(),
                                             mFrame->nb_samples,
                                             Move(audio),
                                             numChannels,
                                             samplingRate);
      mCallback->Output(data);
      pts += duration;
      if (!pts.IsValid()) {
        return MediaResult(
          NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
          RESULT_DETAIL("Invalid count of accumulated audio samples"));
      }
    }
    packet.data += bytesConsumed;
    packet.size -= bytesConsumed;
    samplePosition += bytesConsumed;
  }
  return NS_OK;
}
コード例 #25
0
MediaResult
AppleATDecoder::SetupDecoder(MediaRawData* aSample)
{
  MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
  static const uint32_t MAX_FRAMES = 2;

  if (mFormatID == kAudioFormatMPEG4AAC &&
      mConfig.mExtendedProfile == 2 &&
      mParsedFramesForAACMagicCookie < MAX_FRAMES) {
    // Check for implicit SBR signalling if stream is AAC-LC
    // This will provide us with an updated magic cookie for use with
    // GetInputAudioDescription.
    if (NS_SUCCEEDED(GetImplicitAACMagicCookie(aSample)) &&
        !mMagicCookie.Length()) {
      // nothing found yet, will try again later
      mParsedFramesForAACMagicCookie++;
      return NS_ERROR_NOT_INITIALIZED;
    }
    // An error occurred, fallback to using default stream description
  }

  LOG("Initializing Apple AudioToolbox decoder");

  AudioStreamBasicDescription inputFormat;
  PodZero(&inputFormat);
  MediaResult rv =
    GetInputAudioDescription(inputFormat,
                             mMagicCookie.Length() ?
                                 mMagicCookie : *mConfig.mExtraData);
  if (NS_FAILED(rv)) {
    return rv;
  }
  // Fill in the output format manually.
  PodZero(&mOutputFormat);
  mOutputFormat.mFormatID = kAudioFormatLinearPCM;
  mOutputFormat.mSampleRate = inputFormat.mSampleRate;
  mOutputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
#if defined(MOZ_SAMPLE_TYPE_FLOAT32)
  mOutputFormat.mBitsPerChannel = 32;
  mOutputFormat.mFormatFlags =
    kLinearPCMFormatFlagIsFloat |
    0;
#elif defined(MOZ_SAMPLE_TYPE_S16)
  mOutputFormat.mBitsPerChannel = 16;
  mOutputFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | 0;
#else
# error Unknown audio sample type
#endif
  // Set up the decoder so it gives us one sample per frame
  mOutputFormat.mFramesPerPacket = 1;
  mOutputFormat.mBytesPerPacket = mOutputFormat.mBytesPerFrame
        = mOutputFormat.mChannelsPerFrame * mOutputFormat.mBitsPerChannel / 8;

  OSStatus status = AudioConverterNew(&inputFormat, &mOutputFormat, &mConverter);
  if (status) {
    LOG("Error %d constructing AudioConverter", status);
    mConverter = nullptr;
    return MediaResult(
      NS_ERROR_FAILURE,
      RESULT_DETAIL("Error constructing AudioConverter:%lld", int64_t(status)));
  }

  if (NS_FAILED(SetupChannelLayout())) {
    NS_WARNING("Couldn't retrieve channel layout, will use default layout");
  }

  return NS_OK;
}
コード例 #26
0
ファイル: PDMFactory.cpp プロジェクト: bgrins/gecko-dev
already_AddRefed<MediaDataDecoder>
PDMFactory::CreateDecoderWithPDM(PlatformDecoderModule* aPDM,
                                 const CreateDecoderParams& aParams)
{
  MOZ_ASSERT(aPDM);
  RefPtr<MediaDataDecoder> m;
  MediaResult* result = aParams.mError;

  SupportChecker supportChecker;
  const TrackInfo& config = aParams.mConfig;
  supportChecker.AddMediaFormatChecker(config);

  auto checkResult = supportChecker.Check();
  if (checkResult.mReason != SupportChecker::Reason::kSupported) {
    DecoderDoctorDiagnostics* diagnostics = aParams.mDiagnostics;
    if (checkResult.mReason
        == SupportChecker::Reason::kVideoFormatNotSupported) {
      if (diagnostics) {
        diagnostics->SetVideoNotSupported();
      }
      if (result) {
        *result = checkResult.mMediaResult;
      }
    } else if (checkResult.mReason
               == SupportChecker::Reason::kAudioFormatNotSupported) {
      if (diagnostics) {
        diagnostics->SetAudioNotSupported();
      }
      if (result) {
        *result = checkResult.mMediaResult;
      }
    }
    return nullptr;
  }

  if (config.IsAudio()) {
    m = aPDM->CreateAudioDecoder(aParams);
    return m.forget();
  }

  if (!config.IsVideo()) {
    *result = MediaResult(
      NS_ERROR_DOM_MEDIA_FATAL_ERR,
      RESULT_DETAIL("Decoder configuration error, expected audio or video."));
    return nullptr;
  }

  if (MP4Decoder::IsH264(config.mMimeType) && !aParams.mUseNullDecoder) {
    RefPtr<H264Converter> h = new H264Converter(aPDM, aParams);
    const nsresult rv = h->GetLastError();
    if (NS_SUCCEEDED(rv) || rv == NS_ERROR_NOT_INITIALIZED) {
      // The H264Converter either successfully created the wrapped decoder,
      // or there wasn't enough AVCC data to do so. Otherwise, there was some
      // problem, for example WMF DLLs were missing.
      m = h.forget();
    }
  } else {
    m = aPDM->CreateVideoDecoder(aParams);
  }

  return m.forget();
}
コード例 #27
0
MediaResult
FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample,
                                        uint8_t* aData, int aSize,
                                        bool* aGotFrame,
                                        MediaDataDecoder::DecodedData& aResults)
{
  AVPacket packet;
  mLib->av_init_packet(&packet);

  packet.data = aData;
  packet.size = aSize;
  packet.dts = mLastInputDts = aSample->mTimecode;
  packet.pts = aSample->mTime;
  packet.flags = aSample->mKeyframe ? AV_PKT_FLAG_KEY : 0;
  packet.pos = aSample->mOffset;

  // LibAV provides no API to retrieve the decoded sample's duration.
  // (FFmpeg >= 1.0 provides av_frame_get_pkt_duration)
  // As such we instead use a map using the dts as key that we will retrieve
  // later.
  // The map will have a typical size of 16 entry.
  mDurationMap.Insert(aSample->mTimecode, aSample->mDuration);

  if (!PrepareFrame()) {
    NS_WARNING("FFmpeg h264 decoder failed to allocate frame.");
    return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
  }

  // Required with old version of FFmpeg/LibAV
  mFrame->reordered_opaque = AV_NOPTS_VALUE;

  int decoded;
  int bytesConsumed =
    mLib->avcodec_decode_video2(mCodecContext, mFrame, &decoded, &packet);

  FFMPEG_LOG("DoDecodeFrame:decode_video: rv=%d decoded=%d "
             "(Input: pts(%" PRId64 ") dts(%" PRId64 ") Output: pts(%" PRId64 ") "
             "opaque(%" PRId64 ") pkt_pts(%" PRId64 ") pkt_dts(%" PRId64 "))",
             bytesConsumed, decoded, packet.pts, packet.dts, mFrame->pts,
             mFrame->reordered_opaque, mFrame->pkt_pts, mFrame->pkt_dts);

  if (bytesConsumed < 0) {
    return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                       RESULT_DETAIL("FFmpeg video error:%d", bytesConsumed));
  }

  if (!decoded) {
    if (aGotFrame) {
      *aGotFrame = false;
    }
    return NS_OK;
  }

  // If we've decoded a frame then we need to output it
  int64_t pts = mPtsContext.GuessCorrectPts(mFrame->pkt_pts, mFrame->pkt_dts);
  // Retrieve duration from dts.
  // We use the first entry found matching this dts (this is done to
  // handle damaged file with multiple frames with the same dts)

  int64_t duration;
  if (!mDurationMap.Find(mFrame->pkt_dts, duration)) {
    NS_WARNING("Unable to retrieve duration from map");
    duration = aSample->mDuration;
    // dts are probably incorrectly reported ; so clear the map as we're
    // unlikely to find them in the future anyway. This also guards
    // against the map becoming extremely big.
    mDurationMap.Clear();
  }
  FFMPEG_LOG(
    "Got one frame output with pts=%" PRId64 " dts=%" PRId64
    " duration=%" PRId64 " opaque=%" PRId64,
    pts, mFrame->pkt_dts, duration, mCodecContext->reordered_opaque);

  VideoData::YCbCrBuffer b;
  b.mPlanes[0].mData = mFrame->data[0];
  b.mPlanes[1].mData = mFrame->data[1];
  b.mPlanes[2].mData = mFrame->data[2];

  b.mPlanes[0].mStride = mFrame->linesize[0];
  b.mPlanes[1].mStride = mFrame->linesize[1];
  b.mPlanes[2].mStride = mFrame->linesize[2];

  b.mPlanes[0].mOffset = b.mPlanes[0].mSkip = 0;
  b.mPlanes[1].mOffset = b.mPlanes[1].mSkip = 0;
  b.mPlanes[2].mOffset = b.mPlanes[2].mSkip = 0;

  b.mPlanes[0].mWidth = mFrame->width;
  b.mPlanes[0].mHeight = mFrame->height;
  if (mCodecContext->pix_fmt == AV_PIX_FMT_YUV444P) {
    b.mPlanes[1].mWidth = b.mPlanes[2].mWidth = mFrame->width;
    b.mPlanes[1].mHeight = b.mPlanes[2].mHeight = mFrame->height;
  } else {
    b.mPlanes[1].mWidth = b.mPlanes[2].mWidth = (mFrame->width + 1) >> 1;
    b.mPlanes[1].mHeight = b.mPlanes[2].mHeight = (mFrame->height + 1) >> 1;
  }
  if (mLib->av_frame_get_colorspace) {
    switch (mLib->av_frame_get_colorspace(mFrame)) {
      case AVCOL_SPC_BT709:
        b.mYUVColorSpace = YUVColorSpace::BT709;
        break;
      case AVCOL_SPC_SMPTE170M:
      case AVCOL_SPC_BT470BG:
        b.mYUVColorSpace = YUVColorSpace::BT601;
        break;
      case AVCOL_SPC_UNSPECIFIED:
#if LIBAVCODEC_VERSION_MAJOR >= 55
        if (mCodecContext->codec_id == AV_CODEC_ID_VP9) {
          b.mYUVColorSpace = YUVColorSpace::BT709;
        }
#endif
        break;
      default:
        break;
    }
  }
  RefPtr<VideoData> v =
    VideoData::CreateAndCopyData(mInfo,
                                  mImageContainer,
                                  aSample->mOffset,
                                  pts,
                                  duration,
                                  b,
                                  !!mFrame->key_frame,
                                  -1,
                                  mInfo.ScaledImageRect(mFrame->width,
                                                        mFrame->height));

  if (!v) {
    return MediaResult(NS_ERROR_OUT_OF_MEMORY,
                       RESULT_DETAIL("image allocation error"));
  }
  aResults.AppendElement(Move(v));
  if (aGotFrame) {
    *aGotFrame = true;
  }
  return NS_OK;
}
コード例 #28
0
ファイル: MP4Metadata.cpp プロジェクト: jasonLaster/gecko-dev
MP4Metadata::ResultAndTrackInfo MP4Metadata::GetTrackInfo(
    mozilla::TrackInfo::TrackType aType, size_t aTrackNumber) const {
  Maybe<uint32_t> trackIndex = TrackTypeToGlobalTrackIndex(aType, aTrackNumber);
  if (trackIndex.isNothing()) {
    return {MediaResult(NS_ERROR_DOM_MEDIA_METADATA_ERR,
                        RESULT_DETAIL("No %s tracks", TrackTypeToStr(aType))),
            nullptr};
  }

  Mp4parseTrackInfo info;
  auto rv = mp4parse_get_track_info(mParser.get(), trackIndex.value(), &info);
  if (rv != MP4PARSE_STATUS_OK) {
    MOZ_LOG(gMP4MetadataLog, LogLevel::Warning,
            ("mp4parse_get_track_info returned %d", rv));
    return {MediaResult(NS_ERROR_DOM_MEDIA_METADATA_ERR,
                        RESULT_DETAIL("Cannot find %s track #%zu",
                                      TrackTypeToStr(aType), aTrackNumber)),
            nullptr};
  }
#ifdef DEBUG
  bool haveSampleInfo = false;
  const char* codecString = "unrecognized";
  Mp4parseCodec codecType = MP4PARSE_CODEC_UNKNOWN;
  if (info.track_type == MP4PARSE_TRACK_TYPE_AUDIO) {
    Mp4parseTrackAudioInfo audio;
    auto rv = mp4parse_get_track_audio_info(mParser.get(), trackIndex.value(),
                                            &audio);
    if (rv == MP4PARSE_STATUS_OK && audio.sample_info_count > 0) {
      codecType = audio.sample_info[0].codec_type;
      haveSampleInfo = true;
    }
  } else if (info.track_type == MP4PARSE_TRACK_TYPE_VIDEO) {
    Mp4parseTrackVideoInfo video;
    auto rv = mp4parse_get_track_video_info(mParser.get(), trackIndex.value(),
                                            &video);
    if (rv == MP4PARSE_STATUS_OK && video.sample_info_count > 0) {
      codecType = video.sample_info[0].codec_type;
      haveSampleInfo = true;
    }
  }
  if (haveSampleInfo) {
    switch (codecType) {
      case MP4PARSE_CODEC_UNKNOWN:
        codecString = "unknown";
        break;
      case MP4PARSE_CODEC_AAC:
        codecString = "aac";
        break;
      case MP4PARSE_CODEC_OPUS:
        codecString = "opus";
        break;
      case MP4PARSE_CODEC_FLAC:
        codecString = "flac";
        break;
      case MP4PARSE_CODEC_ALAC:
        codecString = "alac";
        break;
      case MP4PARSE_CODEC_AVC:
        codecString = "h.264";
        break;
      case MP4PARSE_CODEC_VP9:
        codecString = "vp9";
        break;
      case MP4PARSE_CODEC_AV1:
        codecString = "av1";
        break;
      case MP4PARSE_CODEC_MP3:
        codecString = "mp3";
        break;
      case MP4PARSE_CODEC_MP4V:
        codecString = "mp4v";
        break;
      case MP4PARSE_CODEC_JPEG:
        codecString = "jpeg";
        break;
      case MP4PARSE_CODEC_AC3:
        codecString = "ac-3";
        break;
      case MP4PARSE_CODEC_EC3:
        codecString = "ec-3";
        break;
    }
  }
  MOZ_LOG(gMP4MetadataLog, LogLevel::Debug,
          ("track codec %s (%u)\n", codecString, codecType));
#endif

  // This specialization interface is crazy.
  UniquePtr<mozilla::TrackInfo> e;
  switch (aType) {
    case TrackInfo::TrackType::kAudioTrack: {
      Mp4parseTrackAudioInfo audio;
      auto rv = mp4parse_get_track_audio_info(mParser.get(), trackIndex.value(),
                                              &audio);
      if (rv != MP4PARSE_STATUS_OK) {
        MOZ_LOG(gMP4MetadataLog, LogLevel::Warning,
                ("mp4parse_get_track_audio_info returned error %d", rv));
        return {MediaResult(NS_ERROR_DOM_MEDIA_METADATA_ERR,
                            RESULT_DETAIL("Cannot parse %s track #%zu",
                                          TrackTypeToStr(aType), aTrackNumber)),
                nullptr};
      }
      auto track = mozilla::MakeUnique<MP4AudioInfo>();
      MediaResult updateStatus = track->Update(&info, &audio);
      if (NS_FAILED(updateStatus)) {
        MOZ_LOG(gMP4MetadataLog, LogLevel::Warning,
                ("Updating audio track failed with %s",
                 updateStatus.Message().get()));
        return {MediaResult(NS_ERROR_DOM_MEDIA_METADATA_ERR,
                            RESULT_DETAIL(
                                "Failed to update %s track #%zu with error: %s",
                                TrackTypeToStr(aType), aTrackNumber,
                                updateStatus.Message().get())),
                nullptr};
      }
      e = std::move(track);
    } break;
    case TrackInfo::TrackType::kVideoTrack: {
      Mp4parseTrackVideoInfo video;
      auto rv = mp4parse_get_track_video_info(mParser.get(), trackIndex.value(),
                                              &video);
      if (rv != MP4PARSE_STATUS_OK) {
        MOZ_LOG(gMP4MetadataLog, LogLevel::Warning,
                ("mp4parse_get_track_video_info returned error %d", rv));
        return {MediaResult(NS_ERROR_DOM_MEDIA_METADATA_ERR,
                            RESULT_DETAIL("Cannot parse %s track #%zu",
                                          TrackTypeToStr(aType), aTrackNumber)),
                nullptr};
      }
      auto track = mozilla::MakeUnique<MP4VideoInfo>();
      MediaResult updateStatus = track->Update(&info, &video);
      if (NS_FAILED(updateStatus)) {
        MOZ_LOG(gMP4MetadataLog, LogLevel::Warning,
                ("Updating video track failed with %s",
                 updateStatus.Message().get()));
        return {MediaResult(NS_ERROR_DOM_MEDIA_METADATA_ERR,
                            RESULT_DETAIL(
                                "Failed to update %s track #%zu with error: %s",
                                TrackTypeToStr(aType), aTrackNumber,
                                updateStatus.Message().get())),
                nullptr};
      }
      e = std::move(track);
    } break;
    default:
      MOZ_LOG(gMP4MetadataLog, LogLevel::Warning,
              ("unhandled track type %d", aType));
      return {MediaResult(NS_ERROR_DOM_MEDIA_METADATA_ERR,
                          RESULT_DETAIL("Cannot handle %s track #%zu",
                                        TrackTypeToStr(aType), aTrackNumber)),
              nullptr};
  }

  // No duration in track, use fragment_duration.
  if (e && !e->mDuration.IsPositive()) {
    Mp4parseFragmentInfo info;
    auto rv = mp4parse_get_fragment_info(mParser.get(), &info);
    if (rv == MP4PARSE_STATUS_OK) {
      e->mDuration = TimeUnit::FromMicroseconds(info.fragment_duration);
    }
  }

  if (e && e->IsValid()) {
    return {NS_OK, std::move(e)};
  }
  MOZ_LOG(gMP4MetadataLog, LogLevel::Debug, ("TrackInfo didn't validate"));

  return {MediaResult(NS_ERROR_DOM_MEDIA_METADATA_ERR,
                      RESULT_DETAIL("Invalid %s track #%zu",
                                    TrackTypeToStr(aType), aTrackNumber)),
          nullptr};
}
コード例 #29
0
ファイル: MP4Metadata.cpp プロジェクト: jasonLaster/gecko-dev
MP4Metadata::ResultAndTrackCount MP4Metadata::GetNumberTracks(
    mozilla::TrackInfo::TrackType aType) const {
  uint32_t tracks;
  auto rv = mp4parse_get_track_count(mParser.get(), &tracks);
  if (rv != MP4PARSE_STATUS_OK) {
    MOZ_LOG(gMP4MetadataLog, LogLevel::Warning,
            ("rust parser error %d counting tracks", rv));
    return {MediaResult(NS_ERROR_DOM_MEDIA_METADATA_ERR,
                        RESULT_DETAIL("Rust parser error %d", rv)),
            MP4Metadata::NumberTracksError()};
  }

  uint32_t total = 0;
  for (uint32_t i = 0; i < tracks; ++i) {
    Mp4parseTrackInfo track_info;
    rv = mp4parse_get_track_info(mParser.get(), i, &track_info);
    if (rv != MP4PARSE_STATUS_OK) {
      continue;
    }

    if (track_info.track_type == MP4PARSE_TRACK_TYPE_AUDIO) {
      Mp4parseTrackAudioInfo audio;
      auto rv = mp4parse_get_track_audio_info(mParser.get(), i, &audio);
      if (rv != MP4PARSE_STATUS_OK) {
        MOZ_LOG(gMP4MetadataLog, LogLevel::Warning,
                ("mp4parse_get_track_audio_info returned error %d", rv));
        continue;
      }
      MOZ_DIAGNOSTIC_ASSERT(audio.sample_info_count > 0,
                            "Must have at least one audio sample info");
      if (audio.sample_info_count == 0) {
        return {
            MediaResult(
                NS_ERROR_DOM_MEDIA_METADATA_ERR,
                RESULT_DETAIL(
                    "Got 0 audio sample info while checking number tracks")),
            MP4Metadata::NumberTracksError()};
      }
      // We assume the codec of the first sample info is representative of the
      // whole track and skip it if we don't recognize the codec.
      if (audio.sample_info[0].codec_type == MP4PARSE_CODEC_UNKNOWN) {
        continue;
      }
    } else if (track_info.track_type == MP4PARSE_TRACK_TYPE_VIDEO) {
      Mp4parseTrackVideoInfo video;
      auto rv = mp4parse_get_track_video_info(mParser.get(), i, &video);
      if (rv != MP4PARSE_STATUS_OK) {
        MOZ_LOG(gMP4MetadataLog, LogLevel::Warning,
                ("mp4parse_get_track_video_info returned error %d", rv));
        continue;
      }
      MOZ_DIAGNOSTIC_ASSERT(video.sample_info_count > 0,
                            "Must have at least one video sample info");
      if (video.sample_info_count == 0) {
        return {
            MediaResult(
                NS_ERROR_DOM_MEDIA_METADATA_ERR,
                RESULT_DETAIL(
                    "Got 0 video sample info while checking number tracks")),
            MP4Metadata::NumberTracksError()};
      }
      // We assume the codec of the first sample info is representative of the
      // whole track and skip it if we don't recognize the codec.
      if (video.sample_info[0].codec_type == MP4PARSE_CODEC_UNKNOWN) {
        continue;
      }
    } else {
      // Only audio and video are supported
      continue;
    }
    if (TrackTypeEqual(aType, track_info.track_type)) {
      total += 1;
    }
  }

  MOZ_LOG(gMP4MetadataLog, LogLevel::Info,
          ("%s tracks found: %u", TrackTypeToString(aType), total));

  return {NS_OK, total};
}
コード例 #30
0
ファイル: MP4Decoder.cpp プロジェクト: fitzgen/gecko-dev
/* statis */ nsTArray<UniquePtr<TrackInfo>>
MP4Decoder::GetTracksInfo(const MediaContainerType& aType, MediaResult& aError)
{
  nsTArray<UniquePtr<TrackInfo>> tracks;

  if (!IsTypeValid(aType)) {
    aError = MediaResult(
      NS_ERROR_DOM_MEDIA_FATAL_ERR,
      RESULT_DETAIL("Invalid type:%s", aType.Type().AsString().get()));
    return tracks;
  }

  aError = NS_OK;

  const MediaCodecs& codecs = aType.ExtendedType().Codecs();
  if (codecs.IsEmpty()) {
    return tracks;
  }

  const bool isVideo = aType.Type() == MEDIAMIMETYPE("video/mp4") ||
                       aType.Type() == MEDIAMIMETYPE("video/quicktime") ||
                       aType.Type() == MEDIAMIMETYPE("video/x-m4v");

  for (const auto& codec : codecs.Range()) {
    if (IsAACCodecString(codec)) {
      tracks.AppendElement(
        CreateTrackInfoWithMIMETypeAndContainerTypeExtraParameters(
          NS_LITERAL_CSTRING("audio/mp4a-latm"), aType));
      continue;
    }
    if (codec.EqualsLiteral("mp3")) {
      tracks.AppendElement(
        CreateTrackInfoWithMIMETypeAndContainerTypeExtraParameters(
          NS_LITERAL_CSTRING("audio/mpeg"), aType));
      continue;
    }
    if (codec.EqualsLiteral("opus") || codec.EqualsLiteral("flac")) {
      tracks.AppendElement(
        CreateTrackInfoWithMIMETypeAndContainerTypeExtraParameters(
          NS_LITERAL_CSTRING("audio/") + NS_ConvertUTF16toUTF8(codec), aType));
      continue;
    }
    if (IsVP9CodecString(codec)) {
      auto trackInfo =
        CreateTrackInfoWithMIMETypeAndContainerTypeExtraParameters(
          NS_LITERAL_CSTRING("video/vp9"), aType);
      uint8_t profile = 0;
      uint8_t level = 0;
      uint8_t bitDepth = 0;
      if (ExtractVPXCodecDetails(codec, profile, level, bitDepth)) {
        trackInfo->GetAsVideoInfo()->mBitDepth = bitDepth;
      }
      tracks.AppendElement(std::move(trackInfo));
      continue;
    }
    if (isVideo && IsWhitelistedH264Codec(codec)) {
      auto trackInfo =
        CreateTrackInfoWithMIMETypeAndContainerTypeExtraParameters(
          NS_LITERAL_CSTRING("video/avc"), aType);
      uint8_t profile = 0, constraint = 0, level = 0;
      MOZ_ALWAYS_TRUE(
        ExtractH264CodecDetails(codec, profile, constraint, level));
      uint32_t width = aType.ExtendedType().GetWidth().refOr(1280);
      uint32_t height = aType.ExtendedType().GetHeight().refOr(720);
      trackInfo->GetAsVideoInfo()->mExtraData =
        H264::CreateExtraData(profile, constraint, level, { width, height });
      tracks.AppendElement(std::move(trackInfo));
      continue;
    }
    // Unknown codec
    aError =
      MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                  RESULT_DETAIL("Unknown codec:%s",
                                NS_ConvertUTF16toUTF8(codec).get()));
  }
  return tracks;
}