Exemplo n.º 1
0
RefPtr<MediaDataDecoder::DecodePromise> VPXDecoder::ProcessDecode(
    MediaRawData* aSample) {
  MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());

  if (vpx_codec_err_t r = vpx_codec_decode(&mVPX, aSample->Data(),
                                           aSample->Size(), nullptr, 0)) {
    LOG("VPX Decode error: %s", vpx_codec_err_to_string(r));
    return DecodePromise::CreateAndReject(
        MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                    RESULT_DETAIL("VPX error: %s", vpx_codec_err_to_string(r))),
        __func__);
  }

  vpx_codec_iter_t iter = nullptr;
  vpx_image_t* img;
  vpx_image_t* img_alpha = nullptr;
  bool alpha_decoded = false;
  DecodedData results;

  while ((img = vpx_codec_get_frame(&mVPX, &iter))) {
    NS_ASSERTION(img->fmt == VPX_IMG_FMT_I420 || img->fmt == VPX_IMG_FMT_I444,
                 "WebM image format not I420 or I444");
    NS_ASSERTION(!alpha_decoded,
                 "Multiple frames per packet that contains alpha");

    if (aSample->AlphaSize() > 0) {
      if (!alpha_decoded) {
        MediaResult rv = DecodeAlpha(&img_alpha, aSample);
        if (NS_FAILED(rv)) {
          return DecodePromise::CreateAndReject(rv, __func__);
        }
        alpha_decoded = true;
      }
    }
    // Chroma shifts are rounded down as per the decoding examples in the SDK
    VideoData::YCbCrBuffer b;
    b.mPlanes[0].mData = img->planes[0];
    b.mPlanes[0].mStride = img->stride[0];
    b.mPlanes[0].mHeight = img->d_h;
    b.mPlanes[0].mWidth = img->d_w;
    b.mPlanes[0].mOffset = b.mPlanes[0].mSkip = 0;

    b.mPlanes[1].mData = img->planes[1];
    b.mPlanes[1].mStride = img->stride[1];
    b.mPlanes[1].mOffset = b.mPlanes[1].mSkip = 0;

    b.mPlanes[2].mData = img->planes[2];
    b.mPlanes[2].mStride = img->stride[2];
    b.mPlanes[2].mOffset = b.mPlanes[2].mSkip = 0;

    if (img->fmt == VPX_IMG_FMT_I420) {
      b.mPlanes[1].mHeight = (img->d_h + 1) >> img->y_chroma_shift;
      b.mPlanes[1].mWidth = (img->d_w + 1) >> img->x_chroma_shift;

      b.mPlanes[2].mHeight = (img->d_h + 1) >> img->y_chroma_shift;
      b.mPlanes[2].mWidth = (img->d_w + 1) >> img->x_chroma_shift;
    } else if (img->fmt == VPX_IMG_FMT_I444) {
Exemplo n.º 2
0
void
VideoCallbackAdapter::Error(GMPErr aErr)
{
  MOZ_ASSERT(IsOnGMPThread());
  mCallback->Error(MediaResult(aErr == GMPDecodeErr
                               ? NS_ERROR_DOM_MEDIA_DECODE_ERR
                               : NS_ERROR_DOM_MEDIA_FATAL_ERR,
                               RESULT_DETAIL("GMPErr:%x", aErr)));
}
Exemplo n.º 3
0
void
H264Converter::OnDecoderInitFailed(const MediaResult& aError)
{
  mInitPromiseRequest.Complete();
  mDecodePromise.Reject(
    MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                RESULT_DETAIL("Unable to initialize H264 decoder")),
    __func__);
}
Exemplo n.º 4
0
GMPUniquePtr<GMPVideoEncodedFrame>
GMPVideoDecoder::CreateFrame(MediaRawData* aSample)
{
  GMPVideoFrame* ftmp = nullptr;
  GMPErr err = mHost->CreateFrame(kGMPEncodedVideoFrame, &ftmp);
  if (GMP_FAILED(err)) {
    mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY,
                                 RESULT_DETAIL("Host::CreateFrame:%x", err)));
    return nullptr;
  }

  GMPUniquePtr<GMPVideoEncodedFrame> frame(static_cast<GMPVideoEncodedFrame*>(ftmp));
  err = frame->CreateEmptyFrame(aSample->Size());
  if (GMP_FAILED(err)) {
    mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY,
                                 RESULT_DETAIL("GMPVideoEncodedFrame::CreateEmptyFrame:%x", err)));
    return nullptr;
  }

  memcpy(frame->Buffer(), aSample->Data(), frame->Size());

  // Convert 4-byte NAL unit lengths to host-endian 4-byte buffer lengths to
  // suit the GMP API.
  if (mConvertNALUnitLengths) {
    const int kNALLengthSize = 4;
    uint8_t* buf = frame->Buffer();
    while (buf < frame->Buffer() + frame->Size() - kNALLengthSize) {
      uint32_t length = BigEndian::readUint32(buf) + kNALLengthSize;
      *reinterpret_cast<uint32_t *>(buf) = length;
      buf += length;
    }
  }

  frame->SetBufferType(GMP_BufferLength32);

  frame->SetEncodedWidth(mConfig.mDisplay.width);
  frame->SetEncodedHeight(mConfig.mDisplay.height);
  frame->SetTimeStamp(aSample->mTime);
  frame->SetCompleteFrame(true);
  frame->SetDuration(aSample->mDuration);
  frame->SetFrameType(aSample->mKeyframe ? kGMPKeyFrame : kGMPDeltaFrame);

  return frame;
}
void
GonkDecoderManager::ResetEOS()
{
  // After eos, android::MediaCodec needs to be flushed to receive next input
  mWaitOutput.Clear();
  if (mDecoder->flush() != OK) {
    GMDD_LOG("flush error");
    mDecodeCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                                            __func__));
  }
}
Exemplo n.º 6
0
void
GMPVideoDecoder::Error(GMPErr aErr)
{
  MOZ_ASSERT(IsOnGMPThread());
  auto error = MediaResult(aErr == GMPDecodeErr ? NS_ERROR_DOM_MEDIA_DECODE_ERR
                                                : NS_ERROR_DOM_MEDIA_FATAL_ERR,
                           RESULT_DETAIL("GMPErr:%x", aErr));
  mDecodePromise.RejectIfExists(error, __func__);
  mDrainPromise.RejectIfExists(error, __func__);
  mFlushPromise.RejectIfExists(error, __func__);
}
Exemplo n.º 7
0
/*static*/ MP4Metadata::ResultAndByteBuffer MP4Metadata::Metadata(
    ByteStream* aSource) {
  auto parser = mozilla::MakeUnique<MoofParser>(
      aSource, AsVariant(ParseAllTracks{}), false);
  RefPtr<mozilla::MediaByteBuffer> buffer = parser->Metadata();
  if (!buffer) {
    return {MediaResult(NS_ERROR_DOM_MEDIA_METADATA_ERR,
                        RESULT_DETAIL("Cannot parse metadata")),
            nullptr};
  }
  return {NS_OK, std::move(buffer)};
}
Exemplo n.º 8
0
RefPtr<MediaDataDecoder::InitPromise>
AppleATDecoder::Init()
{
  if (!mFormatID) {
    return InitPromise::CreateAndReject(
      MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                  RESULT_DETAIL("Non recognised format")),
      __func__);
  }

  return InitPromise::CreateAndResolve(TrackType::kAudioTrack, __func__);
}
Exemplo n.º 9
0
RefPtr<MediaDataDecoder::DecodePromise>
AOMDecoder::ProcessDecode(MediaRawData* aSample)
{
  MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());

#if defined(DEBUG)
  NS_ASSERTION(IsKeyframe(*aSample) == aSample->mKeyframe,
               "AOM Decode Keyframe error sample->mKeyframe and si.si_kf out of sync");
#endif

  if (aom_codec_err_t r = aom_codec_decode(&mCodec, aSample->Data(), aSample->Size(), nullptr, 0)) {
    LOG_RESULT(r, "Decode error!");
    return DecodePromise::CreateAndReject(
      MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                  RESULT_DETAIL("AOM error decoding AV1 sample: %s",
                                aom_codec_err_to_string(r))),
      __func__);
  }

  aom_codec_iter_t iter = nullptr;
  aom_image_t *img;
  DecodedData results;

  while ((img = aom_codec_get_frame(&mCodec, &iter))) {
    NS_ASSERTION(img->fmt == AOM_IMG_FMT_I420 ||
                 img->fmt == AOM_IMG_FMT_I444,
                 "WebM image format not I420 or I444");

    // Chroma shifts are rounded down as per the decoding examples in the SDK
    VideoData::YCbCrBuffer b;
    b.mPlanes[0].mData = img->planes[0];
    b.mPlanes[0].mStride = img->stride[0];
    b.mPlanes[0].mHeight = img->d_h;
    b.mPlanes[0].mWidth = img->d_w;
    b.mPlanes[0].mOffset = b.mPlanes[0].mSkip = 0;

    b.mPlanes[1].mData = img->planes[1];
    b.mPlanes[1].mStride = img->stride[1];
    b.mPlanes[1].mOffset = b.mPlanes[1].mSkip = 0;

    b.mPlanes[2].mData = img->planes[2];
    b.mPlanes[2].mStride = img->stride[2];
    b.mPlanes[2].mOffset = b.mPlanes[2].mSkip = 0;

    if (img->fmt == AOM_IMG_FMT_I420) {
      b.mPlanes[1].mHeight = (img->d_h + 1) >> img->y_chroma_shift;
      b.mPlanes[1].mWidth = (img->d_w + 1) >> img->x_chroma_shift;

      b.mPlanes[2].mHeight = (img->d_h + 1) >> img->y_chroma_shift;
      b.mPlanes[2].mWidth = (img->d_w + 1) >> img->x_chroma_shift;
    } else if (img->fmt == AOM_IMG_FMT_I444) {
Exemplo n.º 10
0
void BenchmarkPlayback::InitDecoder(TrackInfo&& aInfo) {
  MOZ_ASSERT(OnThread());

  RefPtr<PDMFactory> platform = new PDMFactory();
  mDecoder = platform->CreateDecoder({aInfo, mDecoderTaskQueue});
  if (!mDecoder) {
    Error(MediaResult(NS_ERROR_FAILURE, "Failed to create decoder"));
    return;
  }
  RefPtr<Benchmark> ref(mGlobalState);
  mDecoder->Init()->Then(
      Thread(), __func__,
      [this, ref](TrackInfo::TrackType aTrackType) { InputExhausted(); },
      [this, ref](const MediaResult& aError) { Error(aError); });
}
void
GonkDecoderManager::ProcessFlush()
{
  MOZ_ASSERT(OnTaskLooper());

  mLastTime = INT64_MIN;
  MonitorAutoLock lock(mFlushMonitor);
  mWaitOutput.Clear();
  if (mDecoder->flush() != OK) {
    GMDD_LOG("flush error");
    mDecodeCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                                            __func__));
  }
  mIsFlushing = false;
  lock.NotifyAll();
}
Exemplo n.º 12
0
void BenchmarkPlayback::InputExhausted() {
  MOZ_ASSERT(OnThread());
  MOZ_ASSERT(!mFinished);

  if (mSampleIndex >= mSamples.Length()) {
    Error(MediaResult(NS_ERROR_FAILURE, "Nothing left to decode"));
    return;
  }

  RefPtr<MediaRawData> sample = mSamples[mSampleIndex];
  RefPtr<Benchmark> ref(mGlobalState);
  RefPtr<MediaDataDecoder::DecodePromise> p = mDecoder->Decode(sample);

  mSampleIndex++;
  if (mSampleIndex == mSamples.Length() && !ref->mParameters.mStopAtFrame) {
    // Complete current frame decode then drain if still necessary.
    p->Then(Thread(), __func__,
            [ref, this](MediaDataDecoder::DecodedData&& aResults) {
              Output(std::move(aResults));
              if (!mFinished) {
                mDecoder->Drain()->Then(
                    Thread(), __func__,
                    [ref, this](MediaDataDecoder::DecodedData&& aResults) {
                      mDrained = true;
                      Output(std::move(aResults));
                      MOZ_ASSERT(mFinished, "We must be done now");
                    },
                    [ref, this](const MediaResult& aError) { Error(aError); });
              }
            },
            [ref, this](const MediaResult& aError) { Error(aError); });
  } else {
    if (mSampleIndex == mSamples.Length() && ref->mParameters.mStopAtFrame) {
      mSampleIndex = 0;
    }
    // Continue decoding
    p->Then(Thread(), __func__,
            [ref, this](MediaDataDecoder::DecodedData&& aResults) {
              Output(std::move(aResults));
              if (!mFinished) {
                InputExhausted();
              }
            },
            [ref, this](const MediaResult& aError) { Error(aError); });
  }
}
Exemplo n.º 13
0
void
GMPVideoDecoder::Decoded(GMPVideoi420Frame* aDecodedFrame)
{
  GMPUniquePtr<GMPVideoi420Frame> decodedFrame(aDecodedFrame);

  MOZ_ASSERT(IsOnGMPThread());

  VideoData::YCbCrBuffer b;
  for (int i = 0; i < kGMPNumOfPlanes; ++i) {
    b.mPlanes[i].mData = decodedFrame->Buffer(GMPPlaneType(i));
    b.mPlanes[i].mStride = decodedFrame->Stride(GMPPlaneType(i));
    if (i == kGMPYPlane) {
      b.mPlanes[i].mWidth = decodedFrame->Width();
      b.mPlanes[i].mHeight = decodedFrame->Height();
    } else {
      b.mPlanes[i].mWidth = (decodedFrame->Width() + 1) / 2;
      b.mPlanes[i].mHeight = (decodedFrame->Height() + 1) / 2;
    }
    b.mPlanes[i].mOffset = 0;
    b.mPlanes[i].mSkip = 0;
  }

  gfx::IntRect pictureRegion(
    0, 0, decodedFrame->Width(), decodedFrame->Height());
  RefPtr<VideoData> v = VideoData::CreateAndCopyData(
    mConfig,
    mImageContainer,
    mLastStreamOffset,
    media::TimeUnit::FromMicroseconds(decodedFrame->Timestamp()),
    media::TimeUnit::FromMicroseconds(decodedFrame->Duration()),
    b,
    false,
    media::TimeUnit::FromMicroseconds(-1),
    pictureRegion);
  RefPtr<GMPVideoDecoder> self = this;
  if (v) {
    mDecodedData.AppendElement(std::move(v));
  } else {
    mDecodedData.Clear();
    mDecodePromise.RejectIfExists(
      MediaResult(NS_ERROR_OUT_OF_MEMORY,
                  RESULT_DETAIL("CallBack::CreateAndCopyData")),
      __func__);
  }
}
Exemplo n.º 14
0
void BenchmarkPlayback::DemuxSamples() {
  MOZ_ASSERT(OnThread());

  RefPtr<Benchmark> ref(mGlobalState);
  mDemuxer->Init()->Then(
      Thread(), __func__,
      [this, ref](nsresult aResult) {
        MOZ_ASSERT(OnThread());
        if (mDemuxer->GetNumberTracks(TrackInfo::kVideoTrack)) {
          mTrackDemuxer = mDemuxer->GetTrackDemuxer(TrackInfo::kVideoTrack, 0);
        } else if (mDemuxer->GetNumberTracks(TrackInfo::kAudioTrack)) {
          mTrackDemuxer = mDemuxer->GetTrackDemuxer(TrackInfo::kAudioTrack, 0);
        }
        if (!mTrackDemuxer) {
          Error(MediaResult(NS_ERROR_FAILURE, "Can't create track demuxer"));
          return;
        }
        DemuxNextSample();
      },
      [this, ref](const MediaResult& aError) { Error(aError); });
}
Exemplo n.º 15
0
void
ChannelMediaDecoder::NotifyDownloadEnded(nsresult aStatus)
{
  MOZ_ASSERT(NS_IsMainThread());
  MOZ_DIAGNOSTIC_ASSERT(!IsShutdown());
  AbstractThread::AutoEnter context(AbstractMainThread());

  LOG("NotifyDownloadEnded, status=%" PRIx32, static_cast<uint32_t>(aStatus));

  if (NS_SUCCEEDED(aStatus)) {
    // Download ends successfully. This is a stream with a finite length.
    GetStateMachine()->DispatchIsLiveStream(false);
  }

  MediaDecoderOwner* owner = GetOwner();
  if (NS_SUCCEEDED(aStatus) || aStatus == NS_BASE_STREAM_CLOSED) {
    nsCOMPtr<nsIRunnable> r =
      NS_NewRunnableFunction("ChannelMediaDecoder::UpdatePlaybackRate", [
        stats = mPlaybackStatistics,
        res = RefPtr<BaseMediaResource>(mResource),
        duration = mDuration
      ]() {
        auto rate = ComputePlaybackRate(stats, res, duration);
        UpdatePlaybackRate(rate, res);
      });
    nsresult rv = GetStateMachine()->OwnerThread()->Dispatch(r.forget());
    MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
    Unused << rv;
    owner->DownloadSuspended();
    // NotifySuspendedStatusChanged will tell the element that download
    // has been suspended "by the cache", which is true since we never
    // download anything. The element can then transition to HAVE_ENOUGH_DATA.
    owner->NotifySuspendedByCache(true);
  } else if (aStatus == NS_BINDING_ABORTED) {
    // Download has been cancelled by user.
    owner->LoadAborted();
  } else {
    NetworkError(MediaResult(aStatus, "Download aborted"));
  }
}
Exemplo n.º 16
0
void
ChannelMediaDecoder::NotifyPrincipalChanged()
{
  MOZ_ASSERT(NS_IsMainThread());
  MediaDecoder::NotifyPrincipalChanged();
  if (!mInitialChannelPrincipalKnown) {
    // We'll receive one notification when the channel's initial principal
    // is known, after all HTTP redirects have resolved. This isn't really a
    // principal change, so return here to avoid the mSameOriginMedia check
    // below.
    mInitialChannelPrincipalKnown = true;
    return;
  }
  if (!mSameOriginMedia &&
      Preferences::GetBool("media.block-midflight-redirects", true)) {
    // Block mid-flight redirects to non CORS same origin destinations.
    // See bugs 1441153, 1443942.
    LOG("ChannnelMediaDecoder prohibited cross origin redirect blocked.");
    NetworkError(MediaResult(NS_ERROR_DOM_BAD_URI,
                             "Prohibited cross origin redirect blocked"));
  }
}
Exemplo n.º 17
0
void
VideoCallbackAdapter::Decoded(GMPVideoi420Frame* aDecodedFrame)
{
  GMPUniquePtr<GMPVideoi420Frame> decodedFrame(aDecodedFrame);

  MOZ_ASSERT(IsOnGMPThread());

  VideoData::YCbCrBuffer b;
  for (int i = 0; i < kGMPNumOfPlanes; ++i) {
    b.mPlanes[i].mData = decodedFrame->Buffer(GMPPlaneType(i));
    b.mPlanes[i].mStride = decodedFrame->Stride(GMPPlaneType(i));
    if (i == kGMPYPlane) {
      b.mPlanes[i].mWidth = decodedFrame->Width();
      b.mPlanes[i].mHeight = decodedFrame->Height();
    } else {
      b.mPlanes[i].mWidth = (decodedFrame->Width() + 1) / 2;
      b.mPlanes[i].mHeight = (decodedFrame->Height() + 1) / 2;
    }
    b.mPlanes[i].mOffset = 0;
    b.mPlanes[i].mSkip = 0;
  }

  gfx::IntRect pictureRegion(0, 0, decodedFrame->Width(), decodedFrame->Height());
  RefPtr<VideoData> v =
    VideoData::CreateAndCopyData(mVideoInfo,
                                 mImageContainer,
                                 mLastStreamOffset,
                                 decodedFrame->Timestamp(),
                                 decodedFrame->Duration(),
                                 b,
                                 false,
                                 -1,
                                 pictureRegion);
  if (v) {
    mCallback->Output(v);
  } else {
    mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__));
  }
}
Exemplo n.º 18
0
void
H264Converter::DecodeFirstSample(MediaRawData* aSample)
{
  if (mNeedKeyframe && !aSample->mKeyframe) {
    mDecodePromise.Resolve(DecodedData(), __func__);
    return;
  }
  mNeedKeyframe = false;

  mNeedAVCC =
    Some(mDecoder->NeedsConversion() == ConversionRequired::kNeedAVCC);

  if (!*mNeedAVCC
      && !mp4_demuxer::AnnexB::ConvertSampleToAnnexB(aSample, mNeedKeyframe)) {
    mDecodePromise.Reject(
      MediaResult(NS_ERROR_OUT_OF_MEMORY,
                  RESULT_DETAIL("ConvertSampleToAnnexB")),
      __func__);
    return;
  }
  if (CanRecycleDecoder()) {
    mDecoder->ConfigurationChanged(mCurrentConfig);
  }
  RefPtr<H264Converter> self = this;
  mDecoder->Decode(aSample)
    ->Then(AbstractThread::GetCurrent()->AsTaskQueue(), __func__,
           [self, this](const MediaDataDecoder::DecodedData& aResults) {
             mDecodePromiseRequest.Complete();
             mDecodePromise.Resolve(aResults, __func__);
           },
           [self, this](const MediaResult& aError) {
             mDecodePromiseRequest.Complete();
             mDecodePromise.Reject(aError, __func__);
           })
    ->Track(mDecodePromiseRequest);
}
Exemplo n.º 19
0
void
H264Converter::DecodeFirstSample(MediaRawData* aSample)
{
  if (mNeedKeyframe && !aSample->mKeyframe) {
    mDecodePromise.Resolve(mPendingFrames, __func__);
    mPendingFrames.Clear();
    return;
  }

  auto res = !*mNeedAVCC
             ? AnnexB::ConvertSampleToAnnexB(aSample, mNeedKeyframe)
             : Ok();
  if (res.isErr()) {
    mDecodePromise.Reject(
      MediaResult(res.unwrapErr(), RESULT_DETAIL("ConvertSampleToAnnexB")),
      __func__);
    return;
  }

  mNeedKeyframe = false;

  RefPtr<H264Converter> self = this;
  mDecoder->Decode(aSample)
    ->Then(AbstractThread::GetCurrent()->AsTaskQueue(), __func__,
           [self, this](const MediaDataDecoder::DecodedData& aResults) {
             mDecodePromiseRequest.Complete();
             mPendingFrames.AppendElements(aResults);
             mDecodePromise.Resolve(mPendingFrames, __func__);
             mPendingFrames.Clear();
           },
           [self, this](const MediaResult& aError) {
             mDecodePromiseRequest.Complete();
             mDecodePromise.Reject(aError, __func__);
           })
    ->Track(mDecodePromiseRequest);
}
Exemplo n.º 20
0
MediaResult
FFmpegAudioDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample)
{
  AVPacket packet;
  mLib->av_init_packet(&packet);

  packet.data = const_cast<uint8_t*>(aSample->Data());
  packet.size = aSample->Size();

  if (!PrepareFrame()) {
    return MediaResult(
      NS_ERROR_OUT_OF_MEMORY,
      RESULT_DETAIL("FFmpeg audio decoder failed to allocate frame"));
  }

  int64_t samplePosition = aSample->mOffset;
  media::TimeUnit pts = media::TimeUnit::FromMicroseconds(aSample->mTime);

  while (packet.size > 0) {
    int decoded;
    int bytesConsumed =
      mLib->avcodec_decode_audio4(mCodecContext, mFrame, &decoded, &packet);

    if (bytesConsumed < 0) {
      NS_WARNING("FFmpeg audio decoder error.");
      return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                         RESULT_DETAIL("FFmpeg audio error:%d", bytesConsumed));
    }

    if (mFrame->format != AV_SAMPLE_FMT_FLT &&
        mFrame->format != AV_SAMPLE_FMT_FLTP &&
        mFrame->format != AV_SAMPLE_FMT_S16 &&
        mFrame->format != AV_SAMPLE_FMT_S16P &&
        mFrame->format != AV_SAMPLE_FMT_S32 &&
        mFrame->format != AV_SAMPLE_FMT_S32P) {
      return MediaResult(
        NS_ERROR_DOM_MEDIA_DECODE_ERR,
        RESULT_DETAIL("FFmpeg audio decoder outputs unsupported audio format"));
    }

    if (decoded) {
      uint32_t numChannels = mCodecContext->channels;
      AudioConfig::ChannelLayout layout(numChannels);
      if (!layout.IsValid()) {
        return MediaResult(
          NS_ERROR_DOM_MEDIA_FATAL_ERR,
          RESULT_DETAIL("Unsupported channel layout:%u", numChannels));
      }

      uint32_t samplingRate = mCodecContext->sample_rate;

      AlignedAudioBuffer audio =
        CopyAndPackAudio(mFrame, numChannels, mFrame->nb_samples);
      if (!audio) {
        return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
      }

      media::TimeUnit duration =
        FramesToTimeUnit(mFrame->nb_samples, samplingRate);
      if (!duration.IsValid()) {
        return MediaResult(
          NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
          RESULT_DETAIL("Invalid sample duration"));
      }

      RefPtr<AudioData> data = new AudioData(samplePosition,
                                             pts.ToMicroseconds(),
                                             duration.ToMicroseconds(),
                                             mFrame->nb_samples,
                                             Move(audio),
                                             numChannels,
                                             samplingRate);
      mCallback->Output(data);
      pts += duration;
      if (!pts.IsValid()) {
        return MediaResult(
          NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
          RESULT_DETAIL("Invalid count of accumulated audio samples"));
      }
    }
    packet.data += bytesConsumed;
    packet.size -= bytesConsumed;
    samplePosition += bytesConsumed;
  }
  return NS_OK;
}
Exemplo n.º 21
0
/* statis */ nsTArray<UniquePtr<TrackInfo>>
MP4Decoder::GetTracksInfo(const MediaContainerType& aType, MediaResult& aError)
{
  nsTArray<UniquePtr<TrackInfo>> tracks;

  if (!IsTypeValid(aType)) {
    aError = MediaResult(
      NS_ERROR_DOM_MEDIA_FATAL_ERR,
      RESULT_DETAIL("Invalid type:%s", aType.Type().AsString().get()));
    return tracks;
  }

  aError = NS_OK;

  const MediaCodecs& codecs = aType.ExtendedType().Codecs();
  if (codecs.IsEmpty()) {
    return tracks;
  }

  const bool isVideo = aType.Type() == MEDIAMIMETYPE("video/mp4") ||
                       aType.Type() == MEDIAMIMETYPE("video/quicktime") ||
                       aType.Type() == MEDIAMIMETYPE("video/x-m4v");

  for (const auto& codec : codecs.Range()) {
    if (IsAACCodecString(codec)) {
      tracks.AppendElement(
        CreateTrackInfoWithMIMETypeAndContainerTypeExtraParameters(
          NS_LITERAL_CSTRING("audio/mp4a-latm"), aType));
      continue;
    }
    if (codec.EqualsLiteral("mp3")) {
      tracks.AppendElement(
        CreateTrackInfoWithMIMETypeAndContainerTypeExtraParameters(
          NS_LITERAL_CSTRING("audio/mpeg"), aType));
      continue;
    }
    if (codec.EqualsLiteral("opus") || codec.EqualsLiteral("flac")) {
      tracks.AppendElement(
        CreateTrackInfoWithMIMETypeAndContainerTypeExtraParameters(
          NS_LITERAL_CSTRING("audio/") + NS_ConvertUTF16toUTF8(codec), aType));
      continue;
    }
    if (IsVP9CodecString(codec)) {
      auto trackInfo =
        CreateTrackInfoWithMIMETypeAndContainerTypeExtraParameters(
          NS_LITERAL_CSTRING("video/vp9"), aType);
      uint8_t profile = 0;
      uint8_t level = 0;
      uint8_t bitDepth = 0;
      if (ExtractVPXCodecDetails(codec, profile, level, bitDepth)) {
        trackInfo->GetAsVideoInfo()->mBitDepth = bitDepth;
      }
      tracks.AppendElement(std::move(trackInfo));
      continue;
    }
    if (isVideo && IsWhitelistedH264Codec(codec)) {
      auto trackInfo =
        CreateTrackInfoWithMIMETypeAndContainerTypeExtraParameters(
          NS_LITERAL_CSTRING("video/avc"), aType);
      uint8_t profile = 0, constraint = 0, level = 0;
      MOZ_ALWAYS_TRUE(
        ExtractH264CodecDetails(codec, profile, constraint, level));
      uint32_t width = aType.ExtendedType().GetWidth().refOr(1280);
      uint32_t height = aType.ExtendedType().GetHeight().refOr(720);
      trackInfo->GetAsVideoInfo()->mExtraData =
        H264::CreateExtraData(profile, constraint, level, { width, height });
      tracks.AppendElement(std::move(trackInfo));
      continue;
    }
    // Unknown codec
    aError =
      MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                  RESULT_DETAIL("Unknown codec:%s",
                                NS_ConvertUTF16toUTF8(codec).get()));
  }
  return tracks;
}
Exemplo n.º 22
0
RefPtr<MediaDataDecoder::DecodePromise>
WaveDataDecoder::ProcessDecode(MediaRawData* aSample)
{
  size_t aLength = aSample->Size();
  BufferReader aReader(aSample->Data(), aLength);
  int64_t aOffset = aSample->mOffset;

  int32_t frames = aLength * 8 / mInfo.mBitDepth / mInfo.mChannels;

  AlignedAudioBuffer buffer(frames * mInfo.mChannels);
  if (!buffer) {
    return DecodePromise::CreateAndReject(
      MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
  }
  for (int i = 0; i < frames; ++i) {
    for (unsigned int j = 0; j < mInfo.mChannels; ++j) {
      if (mInfo.mProfile == 6) {                              //ALAW Data
        auto res = aReader.ReadU8();
        if (res.isErr()) {
          return DecodePromise::CreateAndReject(
            MediaResult(res.unwrapErr(), __func__), __func__);
        }
        int16_t decoded = DecodeALawSample(res.unwrap());
        buffer[i * mInfo.mChannels + j] =
            IntegerToAudioSample<AudioDataValue>(decoded);
      } else if (mInfo.mProfile == 7) {                       //ULAW Data
        auto res = aReader.ReadU8();
        if (res.isErr()) {
          return DecodePromise::CreateAndReject(
            MediaResult(res.unwrapErr(), __func__), __func__);
        }
        int16_t decoded = DecodeULawSample(res.unwrap());
        buffer[i * mInfo.mChannels + j] =
            IntegerToAudioSample<AudioDataValue>(decoded);
      } else {                                                //PCM Data
        if (mInfo.mBitDepth == 8) {
          auto res = aReader.ReadU8();
          if (res.isErr()) {
            return DecodePromise::CreateAndReject(
              MediaResult(res.unwrapErr(), __func__), __func__);
          }
          buffer[i * mInfo.mChannels + j] =
              UInt8bitToAudioSample<AudioDataValue>(res.unwrap());
        } else if (mInfo.mBitDepth == 16) {
          auto res = aReader.ReadLE16();
          if (res.isErr()) {
            return DecodePromise::CreateAndReject(
              MediaResult(res.unwrapErr(), __func__), __func__);
          }
          buffer[i * mInfo.mChannels + j] =
              IntegerToAudioSample<AudioDataValue>(res.unwrap());
        } else if (mInfo.mBitDepth == 24) {
          auto res = aReader.ReadLE24();
          if (res.isErr()) {
            return DecodePromise::CreateAndReject(
              MediaResult(res.unwrapErr(), __func__), __func__);
          }
          buffer[i * mInfo.mChannels + j] =
              Int24bitToAudioSample<AudioDataValue>(res.unwrap());
        }
      }
    }
  }

  auto duration = FramesToTimeUnit(frames, mInfo.mRate);

  return DecodePromise::CreateAndResolve(
    DecodedData{ new AudioData(aOffset, aSample->mTime, duration, frames,
                               Move(buffer), mInfo.mChannels, mInfo.mRate) },
    __func__);
}
Exemplo n.º 23
0
MP4Metadata::ResultAndTrackCount MP4Metadata::GetNumberTracks(
    mozilla::TrackInfo::TrackType aType) const {
  uint32_t tracks;
  auto rv = mp4parse_get_track_count(mParser.get(), &tracks);
  if (rv != MP4PARSE_STATUS_OK) {
    MOZ_LOG(gMP4MetadataLog, LogLevel::Warning,
            ("rust parser error %d counting tracks", rv));
    return {MediaResult(NS_ERROR_DOM_MEDIA_METADATA_ERR,
                        RESULT_DETAIL("Rust parser error %d", rv)),
            MP4Metadata::NumberTracksError()};
  }

  uint32_t total = 0;
  for (uint32_t i = 0; i < tracks; ++i) {
    Mp4parseTrackInfo track_info;
    rv = mp4parse_get_track_info(mParser.get(), i, &track_info);
    if (rv != MP4PARSE_STATUS_OK) {
      continue;
    }

    if (track_info.track_type == MP4PARSE_TRACK_TYPE_AUDIO) {
      Mp4parseTrackAudioInfo audio;
      auto rv = mp4parse_get_track_audio_info(mParser.get(), i, &audio);
      if (rv != MP4PARSE_STATUS_OK) {
        MOZ_LOG(gMP4MetadataLog, LogLevel::Warning,
                ("mp4parse_get_track_audio_info returned error %d", rv));
        continue;
      }
      MOZ_DIAGNOSTIC_ASSERT(audio.sample_info_count > 0,
                            "Must have at least one audio sample info");
      if (audio.sample_info_count == 0) {
        return {
            MediaResult(
                NS_ERROR_DOM_MEDIA_METADATA_ERR,
                RESULT_DETAIL(
                    "Got 0 audio sample info while checking number tracks")),
            MP4Metadata::NumberTracksError()};
      }
      // We assume the codec of the first sample info is representative of the
      // whole track and skip it if we don't recognize the codec.
      if (audio.sample_info[0].codec_type == MP4PARSE_CODEC_UNKNOWN) {
        continue;
      }
    } else if (track_info.track_type == MP4PARSE_TRACK_TYPE_VIDEO) {
      Mp4parseTrackVideoInfo video;
      auto rv = mp4parse_get_track_video_info(mParser.get(), i, &video);
      if (rv != MP4PARSE_STATUS_OK) {
        MOZ_LOG(gMP4MetadataLog, LogLevel::Warning,
                ("mp4parse_get_track_video_info returned error %d", rv));
        continue;
      }
      MOZ_DIAGNOSTIC_ASSERT(video.sample_info_count > 0,
                            "Must have at least one video sample info");
      if (video.sample_info_count == 0) {
        return {
            MediaResult(
                NS_ERROR_DOM_MEDIA_METADATA_ERR,
                RESULT_DETAIL(
                    "Got 0 video sample info while checking number tracks")),
            MP4Metadata::NumberTracksError()};
      }
      // We assume the codec of the first sample info is representative of the
      // whole track and skip it if we don't recognize the codec.
      if (video.sample_info[0].codec_type == MP4PARSE_CODEC_UNKNOWN) {
        continue;
      }
    } else {
      // Only audio and video are supported
      continue;
    }
    if (TrackTypeEqual(aType, track_info.track_type)) {
      total += 1;
    }
  }

  MOZ_LOG(gMP4MetadataLog, LogLevel::Info,
          ("%s tracks found: %u", TrackTypeToString(aType), total));

  return {NS_OK, total};
}
Exemplo n.º 24
0
MediaResult
FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample,
                                        uint8_t* aData, int aSize,
                                        bool* aGotFrame,
                                        MediaDataDecoder::DecodedData& aResults)
{
  AVPacket packet;
  mLib->av_init_packet(&packet);

  packet.data = aData;
  packet.size = aSize;
  packet.dts = mLastInputDts = aSample->mTimecode;
  packet.pts = aSample->mTime;
  packet.flags = aSample->mKeyframe ? AV_PKT_FLAG_KEY : 0;
  packet.pos = aSample->mOffset;

  // LibAV provides no API to retrieve the decoded sample's duration.
  // (FFmpeg >= 1.0 provides av_frame_get_pkt_duration)
  // As such we instead use a map using the dts as key that we will retrieve
  // later.
  // The map will have a typical size of 16 entry.
  mDurationMap.Insert(aSample->mTimecode, aSample->mDuration);

  if (!PrepareFrame()) {
    NS_WARNING("FFmpeg h264 decoder failed to allocate frame.");
    return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
  }

  // Required with old version of FFmpeg/LibAV
  mFrame->reordered_opaque = AV_NOPTS_VALUE;

  int decoded;
  int bytesConsumed =
    mLib->avcodec_decode_video2(mCodecContext, mFrame, &decoded, &packet);

  FFMPEG_LOG("DoDecodeFrame:decode_video: rv=%d decoded=%d "
             "(Input: pts(%" PRId64 ") dts(%" PRId64 ") Output: pts(%" PRId64 ") "
             "opaque(%" PRId64 ") pkt_pts(%" PRId64 ") pkt_dts(%" PRId64 "))",
             bytesConsumed, decoded, packet.pts, packet.dts, mFrame->pts,
             mFrame->reordered_opaque, mFrame->pkt_pts, mFrame->pkt_dts);

  if (bytesConsumed < 0) {
    return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                       RESULT_DETAIL("FFmpeg video error:%d", bytesConsumed));
  }

  if (!decoded) {
    if (aGotFrame) {
      *aGotFrame = false;
    }
    return NS_OK;
  }

  // If we've decoded a frame then we need to output it
  int64_t pts = mPtsContext.GuessCorrectPts(mFrame->pkt_pts, mFrame->pkt_dts);
  // Retrieve duration from dts.
  // We use the first entry found matching this dts (this is done to
  // handle damaged file with multiple frames with the same dts)

  int64_t duration;
  if (!mDurationMap.Find(mFrame->pkt_dts, duration)) {
    NS_WARNING("Unable to retrieve duration from map");
    duration = aSample->mDuration;
    // dts are probably incorrectly reported ; so clear the map as we're
    // unlikely to find them in the future anyway. This also guards
    // against the map becoming extremely big.
    mDurationMap.Clear();
  }
  FFMPEG_LOG(
    "Got one frame output with pts=%" PRId64 " dts=%" PRId64
    " duration=%" PRId64 " opaque=%" PRId64,
    pts, mFrame->pkt_dts, duration, mCodecContext->reordered_opaque);

  VideoData::YCbCrBuffer b;
  b.mPlanes[0].mData = mFrame->data[0];
  b.mPlanes[1].mData = mFrame->data[1];
  b.mPlanes[2].mData = mFrame->data[2];

  b.mPlanes[0].mStride = mFrame->linesize[0];
  b.mPlanes[1].mStride = mFrame->linesize[1];
  b.mPlanes[2].mStride = mFrame->linesize[2];

  b.mPlanes[0].mOffset = b.mPlanes[0].mSkip = 0;
  b.mPlanes[1].mOffset = b.mPlanes[1].mSkip = 0;
  b.mPlanes[2].mOffset = b.mPlanes[2].mSkip = 0;

  b.mPlanes[0].mWidth = mFrame->width;
  b.mPlanes[0].mHeight = mFrame->height;
  if (mCodecContext->pix_fmt == AV_PIX_FMT_YUV444P) {
    b.mPlanes[1].mWidth = b.mPlanes[2].mWidth = mFrame->width;
    b.mPlanes[1].mHeight = b.mPlanes[2].mHeight = mFrame->height;
  } else {
    b.mPlanes[1].mWidth = b.mPlanes[2].mWidth = (mFrame->width + 1) >> 1;
    b.mPlanes[1].mHeight = b.mPlanes[2].mHeight = (mFrame->height + 1) >> 1;
  }
  if (mLib->av_frame_get_colorspace) {
    switch (mLib->av_frame_get_colorspace(mFrame)) {
      case AVCOL_SPC_BT709:
        b.mYUVColorSpace = YUVColorSpace::BT709;
        break;
      case AVCOL_SPC_SMPTE170M:
      case AVCOL_SPC_BT470BG:
        b.mYUVColorSpace = YUVColorSpace::BT601;
        break;
      case AVCOL_SPC_UNSPECIFIED:
#if LIBAVCODEC_VERSION_MAJOR >= 55
        if (mCodecContext->codec_id == AV_CODEC_ID_VP9) {
          b.mYUVColorSpace = YUVColorSpace::BT709;
        }
#endif
        break;
      default:
        break;
    }
  }
  RefPtr<VideoData> v =
    VideoData::CreateAndCopyData(mInfo,
                                  mImageContainer,
                                  aSample->mOffset,
                                  pts,
                                  duration,
                                  b,
                                  !!mFrame->key_frame,
                                  -1,
                                  mInfo.ScaledImageRect(mFrame->width,
                                                        mFrame->height));

  if (!v) {
    return MediaResult(NS_ERROR_OUT_OF_MEMORY,
                       RESULT_DETAIL("image allocation error"));
  }
  aResults.AppendElement(Move(v));
  if (aGotFrame) {
    *aGotFrame = true;
  }
  return NS_OK;
}
Exemplo n.º 25
0
/* static */ RefPtr<AllocationWrapper::AllocateDecoderPromise>
AllocationWrapper::CreateDecoder(const CreateDecoderParams& aParams)
{
  // aParams.mConfig is guaranteed to stay alive during the lifetime of the
  // MediaDataDecoder, so keeping a pointer to the object is safe.
  const TrackInfo* config = &aParams.mConfig;
  RefPtr<TaskQueue> taskQueue = aParams.mTaskQueue;
  DecoderDoctorDiagnostics* diagnostics = aParams.mDiagnostics;
  RefPtr<layers::ImageContainer> imageContainer = aParams.mImageContainer;
  RefPtr<layers::KnowsCompositor> knowsCompositor = aParams.mKnowsCompositor;
  RefPtr<GMPCrashHelper> crashHelper = aParams.mCrashHelper;
  CreateDecoderParams::UseNullDecoder useNullDecoder = aParams.mUseNullDecoder;
  CreateDecoderParams::NoWrapper noWrapper = aParams.mNoWrapper;
  TrackInfo::TrackType type = aParams.mType;
  MediaEventProducer<TrackInfo::TrackType>* onWaitingForKeyEvent =
    aParams.mOnWaitingForKeyEvent;
  CreateDecoderParams::OptionSet options = aParams.mOptions;
  CreateDecoderParams::VideoFrameRate rate = aParams.mRate;

  RefPtr<AllocateDecoderPromise> p =
    GlobalAllocPolicy::Instance(aParams.mType)
      .Alloc()
      ->Then(
        AbstractThread::GetCurrent(),
        __func__,
        [=](RefPtr<Token> aToken) {
          // result may not always be updated by PDMFactory::CreateDecoder
          // either when the creation succeeded or failed, as such it must be
          // initialized to a fatal error by default.
          MediaResult result = MediaResult(
            NS_ERROR_DOM_MEDIA_FATAL_ERR,
            nsPrintfCString("error creating %s decoder", TrackTypeToStr(type)));
          RefPtr<PDMFactory> pdm = new PDMFactory();
          CreateDecoderParams params{ *config,
                                      taskQueue,
                                      diagnostics,
                                      imageContainer,
                                      &result,
                                      knowsCompositor,
                                      crashHelper,
                                      useNullDecoder,
                                      noWrapper,
                                      type,
                                      onWaitingForKeyEvent,
                                      options,
                                      rate };
          RefPtr<MediaDataDecoder> decoder = pdm->CreateDecoder(params);
          if (decoder) {
            RefPtr<AllocationWrapper> wrapper =
              new AllocationWrapper(decoder.forget(), aToken.forget());
            return AllocateDecoderPromise::CreateAndResolve(wrapper, __func__);
          }
          return AllocateDecoderPromise::CreateAndReject(result, __func__);
        },
        []() {
          return AllocateDecoderPromise::CreateAndReject(
            MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                        "Allocation policy expired"),
            __func__);
        });
  return p;
}
Exemplo n.º 26
0
RefPtr<MediaDataDecoder::DecodePromise>
VorbisDataDecoder::ProcessDecode(MediaRawData* aSample)
{
  MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());

  const unsigned char* aData = aSample->Data();
  size_t aLength = aSample->Size();
  int64_t aOffset = aSample->mOffset;

  MOZ_ASSERT(mPacketCount >= 3);

  if (!mLastFrameTime ||
      mLastFrameTime.ref() != aSample->mTime.ToMicroseconds()) {
    // We are starting a new block.
    mFrames = 0;
    mLastFrameTime = Some(aSample->mTime.ToMicroseconds());
  }

  ogg_packet pkt = InitVorbisPacket(
    aData, aLength, false, aSample->mEOS,
    aSample->mTimecode.ToMicroseconds(), mPacketCount++);

  int err = vorbis_synthesis(&mVorbisBlock, &pkt);
  if (err) {
    return DecodePromise::CreateAndReject(
      MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                  RESULT_DETAIL("vorbis_synthesis:%d", err)),
      __func__);
  }

  err = vorbis_synthesis_blockin(&mVorbisDsp, &mVorbisBlock);
  if (err) {
    return DecodePromise::CreateAndReject(
      MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                  RESULT_DETAIL("vorbis_synthesis_blockin:%d", err)),
      __func__);
  }

  VorbisPCMValue** pcm = 0;
  int32_t frames = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm);
  if (frames == 0) {
    return DecodePromise::CreateAndResolve(DecodedData(), __func__);
  }

  DecodedData results;
  while (frames > 0) {
    uint32_t channels = mVorbisDsp.vi->channels;
    uint32_t rate = mVorbisDsp.vi->rate;
    AlignedAudioBuffer buffer(frames*channels);
    if (!buffer) {
      return DecodePromise::CreateAndReject(
        MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
    }
    for (uint32_t j = 0; j < channels; ++j) {
      VorbisPCMValue* channel = pcm[j];
      for (uint32_t i = 0; i < uint32_t(frames); ++i) {
        buffer[i*channels + j] = MOZ_CONVERT_VORBIS_SAMPLE(channel[i]);
      }
    }

    auto duration = FramesToTimeUnit(frames, rate);
    if (!duration.IsValid()) {
      return DecodePromise::CreateAndReject(
        MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
                    RESULT_DETAIL("Overflow converting audio duration")),
        __func__);
    }
    auto total_duration = FramesToTimeUnit(mFrames, rate);
    if (!total_duration.IsValid()) {
      return DecodePromise::CreateAndReject(
        MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
                    RESULT_DETAIL("Overflow converting audio total_duration")),
        __func__);
    }

    auto time = total_duration + aSample->mTime;
    if (!time.IsValid()) {
      return DecodePromise::CreateAndReject(
        MediaResult(
          NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
          RESULT_DETAIL("Overflow adding total_duration and aSample->mTime")),
        __func__);
    };

    if (!mAudioConverter) {
      AudioConfig in(
        AudioConfig::ChannelLayout(channels, VorbisLayout(channels)), rate);
      AudioConfig out(channels, rate);
      if (!in.IsValid() || !out.IsValid()) {
        return DecodePromise::CreateAndReject(
          MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                      RESULT_DETAIL("Invalid channel layout:%u", channels)),
          __func__);
      }
      mAudioConverter = MakeUnique<AudioConverter>(in, out);
    }
    MOZ_ASSERT(mAudioConverter->CanWorkInPlace());
    AudioSampleBuffer data(Move(buffer));
    data = mAudioConverter->Process(Move(data));

    results.AppendElement(new AudioData(aOffset, time, duration,
                                        frames, data.Forget(), channels, rate));
    mFrames += frames;
    err = vorbis_synthesis_read(&mVorbisDsp, frames);
    if (err) {
      return DecodePromise::CreateAndReject(
        MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
                    RESULT_DETAIL("vorbis_synthesis_read:%d", err)),
        __func__);
    }

    frames = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm);
  }
  return DecodePromise::CreateAndResolve(Move(results), __func__);
}
Exemplo n.º 27
0
RefPtr<MediaDataDecoder::InitPromise>
VorbisDataDecoder::Init()
{
  vorbis_info_init(&mVorbisInfo);
  vorbis_comment_init(&mVorbisComment);
  PodZero(&mVorbisDsp);
  PodZero(&mVorbisBlock);

  AutoTArray<unsigned char*,4> headers;
  AutoTArray<size_t,4> headerLens;
  if (!XiphExtradataToHeaders(headers, headerLens,
                              mInfo.mCodecSpecificConfig->Elements(),
                              mInfo.mCodecSpecificConfig->Length())) {
    return InitPromise::CreateAndReject(
      MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                  RESULT_DETAIL("Could not get vorbis header.")),
      __func__);
  }
  for (size_t i = 0; i < headers.Length(); i++) {
    if (NS_FAILED(DecodeHeader(headers[i], headerLens[i]))) {
      return InitPromise::CreateAndReject(
        MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                    RESULT_DETAIL("Could not decode vorbis header.")),
        __func__);
    }
  }

  MOZ_ASSERT(mPacketCount == 3);

  int r = vorbis_synthesis_init(&mVorbisDsp, &mVorbisInfo);
  if (r) {
    return InitPromise::CreateAndReject(
      MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                  RESULT_DETAIL("Systhesis init fail.")),
      __func__);
  }

  r = vorbis_block_init(&mVorbisDsp, &mVorbisBlock);
  if (r) {
    return InitPromise::CreateAndReject(
      MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                  RESULT_DETAIL("Block init fail.")),
      __func__);
  }

  if (mInfo.mRate != (uint32_t)mVorbisDsp.vi->rate) {
    LOG(LogLevel::Warning,
        ("Invalid Vorbis header: container and codec rate do not match!"));
  }
  if (mInfo.mChannels != (uint32_t)mVorbisDsp.vi->channels) {
    LOG(LogLevel::Warning,
        ("Invalid Vorbis header: container and codec channels do not match!"));
  }

  AudioConfig::ChannelLayout layout(mVorbisDsp.vi->channels);
  if (!layout.IsValid()) {
    return InitPromise::CreateAndReject(
      MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                  RESULT_DETAIL("Invalid audio layout.")),
      __func__);
  }

  return InitPromise::CreateAndResolve(TrackInfo::kAudioTrack, __func__);
}
Exemplo n.º 28
0
 explicit CheckResult(Reason aReason,
                      MediaResult aResult = MediaResult(NS_OK))
   : mReason(aReason),
     mMediaResult(mozilla::Move(aResult))
 {
 }
Exemplo n.º 29
0
MP4Metadata::ResultAndTrackInfo MP4Metadata::GetTrackInfo(
    mozilla::TrackInfo::TrackType aType, size_t aTrackNumber) const {
  Maybe<uint32_t> trackIndex = TrackTypeToGlobalTrackIndex(aType, aTrackNumber);
  if (trackIndex.isNothing()) {
    return {MediaResult(NS_ERROR_DOM_MEDIA_METADATA_ERR,
                        RESULT_DETAIL("No %s tracks", TrackTypeToStr(aType))),
            nullptr};
  }

  Mp4parseTrackInfo info;
  auto rv = mp4parse_get_track_info(mParser.get(), trackIndex.value(), &info);
  if (rv != MP4PARSE_STATUS_OK) {
    MOZ_LOG(gMP4MetadataLog, LogLevel::Warning,
            ("mp4parse_get_track_info returned %d", rv));
    return {MediaResult(NS_ERROR_DOM_MEDIA_METADATA_ERR,
                        RESULT_DETAIL("Cannot find %s track #%zu",
                                      TrackTypeToStr(aType), aTrackNumber)),
            nullptr};
  }
#ifdef DEBUG
  bool haveSampleInfo = false;
  const char* codecString = "unrecognized";
  Mp4parseCodec codecType = MP4PARSE_CODEC_UNKNOWN;
  if (info.track_type == MP4PARSE_TRACK_TYPE_AUDIO) {
    Mp4parseTrackAudioInfo audio;
    auto rv = mp4parse_get_track_audio_info(mParser.get(), trackIndex.value(),
                                            &audio);
    if (rv == MP4PARSE_STATUS_OK && audio.sample_info_count > 0) {
      codecType = audio.sample_info[0].codec_type;
      haveSampleInfo = true;
    }
  } else if (info.track_type == MP4PARSE_TRACK_TYPE_VIDEO) {
    Mp4parseTrackVideoInfo video;
    auto rv = mp4parse_get_track_video_info(mParser.get(), trackIndex.value(),
                                            &video);
    if (rv == MP4PARSE_STATUS_OK && video.sample_info_count > 0) {
      codecType = video.sample_info[0].codec_type;
      haveSampleInfo = true;
    }
  }
  if (haveSampleInfo) {
    switch (codecType) {
      case MP4PARSE_CODEC_UNKNOWN:
        codecString = "unknown";
        break;
      case MP4PARSE_CODEC_AAC:
        codecString = "aac";
        break;
      case MP4PARSE_CODEC_OPUS:
        codecString = "opus";
        break;
      case MP4PARSE_CODEC_FLAC:
        codecString = "flac";
        break;
      case MP4PARSE_CODEC_ALAC:
        codecString = "alac";
        break;
      case MP4PARSE_CODEC_AVC:
        codecString = "h.264";
        break;
      case MP4PARSE_CODEC_VP9:
        codecString = "vp9";
        break;
      case MP4PARSE_CODEC_AV1:
        codecString = "av1";
        break;
      case MP4PARSE_CODEC_MP3:
        codecString = "mp3";
        break;
      case MP4PARSE_CODEC_MP4V:
        codecString = "mp4v";
        break;
      case MP4PARSE_CODEC_JPEG:
        codecString = "jpeg";
        break;
      case MP4PARSE_CODEC_AC3:
        codecString = "ac-3";
        break;
      case MP4PARSE_CODEC_EC3:
        codecString = "ec-3";
        break;
    }
  }
  MOZ_LOG(gMP4MetadataLog, LogLevel::Debug,
          ("track codec %s (%u)\n", codecString, codecType));
#endif

  // This specialization interface is crazy.
  UniquePtr<mozilla::TrackInfo> e;
  switch (aType) {
    case TrackInfo::TrackType::kAudioTrack: {
      Mp4parseTrackAudioInfo audio;
      auto rv = mp4parse_get_track_audio_info(mParser.get(), trackIndex.value(),
                                              &audio);
      if (rv != MP4PARSE_STATUS_OK) {
        MOZ_LOG(gMP4MetadataLog, LogLevel::Warning,
                ("mp4parse_get_track_audio_info returned error %d", rv));
        return {MediaResult(NS_ERROR_DOM_MEDIA_METADATA_ERR,
                            RESULT_DETAIL("Cannot parse %s track #%zu",
                                          TrackTypeToStr(aType), aTrackNumber)),
                nullptr};
      }
      auto track = mozilla::MakeUnique<MP4AudioInfo>();
      MediaResult updateStatus = track->Update(&info, &audio);
      if (NS_FAILED(updateStatus)) {
        MOZ_LOG(gMP4MetadataLog, LogLevel::Warning,
                ("Updating audio track failed with %s",
                 updateStatus.Message().get()));
        return {MediaResult(NS_ERROR_DOM_MEDIA_METADATA_ERR,
                            RESULT_DETAIL(
                                "Failed to update %s track #%zu with error: %s",
                                TrackTypeToStr(aType), aTrackNumber,
                                updateStatus.Message().get())),
                nullptr};
      }
      e = std::move(track);
    } break;
    case TrackInfo::TrackType::kVideoTrack: {
      Mp4parseTrackVideoInfo video;
      auto rv = mp4parse_get_track_video_info(mParser.get(), trackIndex.value(),
                                              &video);
      if (rv != MP4PARSE_STATUS_OK) {
        MOZ_LOG(gMP4MetadataLog, LogLevel::Warning,
                ("mp4parse_get_track_video_info returned error %d", rv));
        return {MediaResult(NS_ERROR_DOM_MEDIA_METADATA_ERR,
                            RESULT_DETAIL("Cannot parse %s track #%zu",
                                          TrackTypeToStr(aType), aTrackNumber)),
                nullptr};
      }
      auto track = mozilla::MakeUnique<MP4VideoInfo>();
      MediaResult updateStatus = track->Update(&info, &video);
      if (NS_FAILED(updateStatus)) {
        MOZ_LOG(gMP4MetadataLog, LogLevel::Warning,
                ("Updating video track failed with %s",
                 updateStatus.Message().get()));
        return {MediaResult(NS_ERROR_DOM_MEDIA_METADATA_ERR,
                            RESULT_DETAIL(
                                "Failed to update %s track #%zu with error: %s",
                                TrackTypeToStr(aType), aTrackNumber,
                                updateStatus.Message().get())),
                nullptr};
      }
      e = std::move(track);
    } break;
    default:
      MOZ_LOG(gMP4MetadataLog, LogLevel::Warning,
              ("unhandled track type %d", aType));
      return {MediaResult(NS_ERROR_DOM_MEDIA_METADATA_ERR,
                          RESULT_DETAIL("Cannot handle %s track #%zu",
                                        TrackTypeToStr(aType), aTrackNumber)),
              nullptr};
  }

  // No duration in track, use fragment_duration.
  if (e && !e->mDuration.IsPositive()) {
    Mp4parseFragmentInfo info;
    auto rv = mp4parse_get_fragment_info(mParser.get(), &info);
    if (rv == MP4PARSE_STATUS_OK) {
      e->mDuration = TimeUnit::FromMicroseconds(info.fragment_duration);
    }
  }

  if (e && e->IsValid()) {
    return {NS_OK, std::move(e)};
  }
  MOZ_LOG(gMP4MetadataLog, LogLevel::Debug, ("TrackInfo didn't validate"));

  return {MediaResult(NS_ERROR_DOM_MEDIA_METADATA_ERR,
                      RESULT_DETAIL("Invalid %s track #%zu",
                                    TrackTypeToStr(aType), aTrackNumber)),
          nullptr};
}
Exemplo n.º 30
0
void
H264Converter::Input(MediaRawData* aSample)
{
  if (!mp4_demuxer::AnnexB::ConvertSampleToAVCC(aSample)) {
    // We need AVCC content to be able to later parse the SPS.
    // This is a no-op if the data is already AVCC.
    mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY,
                                 RESULT_DETAIL("ConvertSampleToAVCC")));
    return;
  }

  if (mInitPromiseRequest.Exists()) {
    if (mNeedKeyframe) {
      if (!aSample->mKeyframe) {
        // Frames dropped, we need a new one.
        mCallback->InputExhausted();
        return;
      }
      mNeedKeyframe = false;
    }
    mMediaRawSamples.AppendElement(aSample);
    return;
  }

  nsresult rv;
  if (!mDecoder) {
    // It is not possible to create an AVCC H264 decoder without SPS.
    // As such, creation will fail if the extra_data just extracted doesn't
    // contain a SPS.
    rv = CreateDecoderAndInit(aSample);
    if (rv == NS_ERROR_NOT_INITIALIZED) {
      // We are missing the required SPS to create the decoder.
      // Ignore for the time being, the MediaRawData will be dropped.
      mCallback->InputExhausted();
      return;
    }
  } else {
    rv = CheckForSPSChange(aSample);
    if (rv == NS_ERROR_NOT_INITIALIZED) {
      // The decoder is pending initialization.
      mCallback->InputExhausted();
      return;
    }
  }
  if (NS_FAILED(rv)) {
    mCallback->Error(
      MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                  RESULT_DETAIL("Unable to create H264 decoder")));
    return;
  }

  if (mNeedKeyframe && !aSample->mKeyframe) {
    mCallback->InputExhausted();
    return;
  }

  if (!mNeedAVCC &&
      !mp4_demuxer::AnnexB::ConvertSampleToAnnexB(aSample, mNeedKeyframe)) {
    mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY,
                                 RESULT_DETAIL("ConvertSampleToAnnexB")));
    return;
  }

  mNeedKeyframe = false;

  aSample->mExtraData = mCurrentConfig.mExtraData;

  mDecoder->Input(aSample);
}