void AppleMP3Reader::NotifyDataArrivedInternal(uint32_t aLength, int64_t aOffset) { MOZ_ASSERT(OnTaskQueue()); if (!mMP3FrameParser.NeedsData()) { return; } IntervalSet<int64_t> intervals = mFilter.NotifyDataArrived(aLength, aOffset); for (const auto& interval : intervals) { RefPtr<MediaByteBuffer> bytes = mResource.MediaReadAt(interval.mStart, interval.Length()); NS_ENSURE_TRUE_VOID(bytes); mMP3FrameParser.Parse(bytes->Elements(), interval.Length(), interval.mStart); if (!mMP3FrameParser.IsMP3()) { return; } uint64_t duration = mMP3FrameParser.GetDuration(); if (duration != mDuration) { LOGD("Updating media duration to %lluus\n", duration); MOZ_ASSERT(mDecoder); mDuration = duration; mDecoder->DispatchUpdateEstimatedMediaDuration(duration); } } }
nsresult DirectShowReader::SeekInternal(int64_t aTargetUs) { HRESULT hr; MOZ_ASSERT(OnTaskQueue()); LOG("DirectShowReader::Seek() target=%lld", aTargetUs); hr = mControl->Pause(); NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE); nsresult rv = ResetDecode(); NS_ENSURE_SUCCESS(rv, rv); LONGLONG seekPosition = UsecsToRefTime(aTargetUs); hr = mMediaSeeking->SetPositions(&seekPosition, AM_SEEKING_AbsolutePositioning, nullptr, AM_SEEKING_NoPositioning); NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE); hr = mControl->Run(); NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE); return NS_OK; }
bool AppleMP3Reader::DecodeAudioData() { MOZ_ASSERT(OnTaskQueue()); // Read AUDIO_READ_BYTES if we can char bytes[AUDIO_READ_BYTES]; uint32_t numBytes = AUDIO_READ_BYTES; nsresult readrv = Read(&numBytes, bytes); // This function calls |AudioSampleCallback| above, synchronously, when it // finds compressed MP3 frame. OSStatus rv = AudioFileStreamParseBytes(mAudioFileStream, numBytes, bytes, 0 /* flags */); if (NS_FAILED(readrv)) { mAudioQueue.Finish(); return false; } // DataUnavailable just means there wasn't enough data to demux anything. // We should have more to push into the demuxer next time we're called. if (rv && rv != kAudioFileStreamError_DataUnavailable) { LOGE("AudioFileStreamParseBytes returned unknown error %x", rv); return false; } return true; }
RefPtr<MediaDecoderReader::SeekPromise> AppleMP3Reader::Seek(int64_t aTime, int64_t aEndTime) { MOZ_ASSERT(OnTaskQueue()); // Find the exact frame/packet that contains |aTime|. mCurrentAudioFrame = aTime * mAudioSampleRate / USECS_PER_S; SInt64 packet = mCurrentAudioFrame / mAudioFramesPerCompressedPacket; // |AudioFileStreamSeek| will pass back through |byteOffset| the byte offset // into the stream it expects next time it reads. SInt64 byteOffset; UInt32 flags = 0; OSStatus rv = AudioFileStreamSeek(mAudioFileStream, packet, &byteOffset, &flags); if (rv) { LOGE("Couldn't seek demuxer. Error code %x\n", rv); return SeekPromise::CreateAndReject(NS_ERROR_FAILURE, __func__); } LOGD("computed byte offset = %lld; estimated = %s\n", byteOffset, (flags & kAudioFileStreamSeekFlag_OffsetIsEstimated) ? "YES" : "NO"); mResource.Seek(nsISeekableStream::NS_SEEK_SET, byteOffset); ResetDecode(); return SeekPromise::CreateAndResolve(aTime, __func__); }
int64_t WaveReader::TimeToBytes(double aTime) const { MOZ_ASSERT(OnTaskQueue()); MOZ_ASSERT(aTime >= 0.0f, "Must be >= 0"); return RoundDownToFrame(int64_t(aTime * mSampleRate * mFrameSize)); }
int64_t WaveReader::RoundDownToFrame(int64_t aBytes) const { MOZ_ASSERT(OnTaskQueue()); MOZ_ASSERT(aBytes >= 0, "Must be >= 0"); return aBytes - (aBytes % mFrameSize); }
double WaveReader::BytesToTime(int64_t aBytes) const { MOZ_ASSERT(OnTaskQueue()); MOZ_ASSERT(aBytes >= 0, "Must be >= 0"); return float(aBytes) / mSampleRate / mFrameSize; }
bool WaveReader::LoadRIFFChunk() { MOZ_ASSERT(OnTaskQueue()); char riffHeader[RIFF_INITIAL_SIZE]; const char* p = riffHeader; MOZ_ASSERT(mResource.Tell() == 0, "LoadRIFFChunk called when resource in invalid state"); if (!ReadAll(riffHeader, sizeof(riffHeader))) { return false; } static_assert(sizeof(uint32_t) * 3 <= RIFF_INITIAL_SIZE, "Reads would overflow riffHeader buffer."); if (ReadUint32BE(&p) != RIFF_CHUNK_MAGIC) { NS_WARNING("resource data not in RIFF format"); return false; } // Skip over RIFF size field. p += sizeof(uint32_t); if (ReadUint32BE(&p) != WAVE_CHUNK_MAGIC) { NS_WARNING("Expected WAVE chunk"); return false; } return true; }
media::TimeIntervals WaveReader::GetBuffered() { MOZ_ASSERT(OnTaskQueue()); if (!mInfo.HasAudio()) { return media::TimeIntervals(); } media::TimeIntervals buffered; AutoPinned<MediaResource> resource(mDecoder->GetResource()); int64_t startOffset = resource->GetNextCachedData(mWavePCMOffset); while (startOffset >= 0) { int64_t endOffset = resource->GetCachedDataEnd(startOffset); // Bytes [startOffset..endOffset] are cached. NS_ASSERTION(startOffset >= mWavePCMOffset, "Integer underflow in GetBuffered"); NS_ASSERTION(endOffset >= mWavePCMOffset, "Integer underflow in GetBuffered"); // We need to round the buffered ranges' times to microseconds so that they // have the same precision as the currentTime and duration attribute on // the media element. buffered += media::TimeInterval( media::TimeUnit::FromSeconds(BytesToTime(startOffset - mWavePCMOffset)), media::TimeUnit::FromSeconds(BytesToTime(endOffset - mWavePCMOffset))); startOffset = resource->GetNextCachedData(endOffset); } return buffered; }
media::TimeIntervals MediaSourceReader::GetBuffered() { MOZ_ASSERT(OnTaskQueue()); ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); media::TimeIntervals buffered; media::TimeUnit highestEndTime; nsTArray<media::TimeIntervals> activeRanges; // Must set the capacity of the nsTArray first: bug #1164444 activeRanges.SetCapacity(mTrackBuffers.Length()); for (const auto& trackBuffer : mTrackBuffers) { activeRanges.AppendElement(trackBuffer->Buffered()); highestEndTime = std::max(highestEndTime, activeRanges.LastElement().GetEnd()); } buffered += media::TimeInterval(media::TimeUnit::FromMicroseconds(0), highestEndTime); for (auto& range : activeRanges) { if (IsEnded() && range.Length()) { // Set the end time on the last range to highestEndTime by adding a // new range spanning the current end time to highestEndTime, which // Normalize() will then merge with the old last range. range += media::TimeInterval(range.GetEnd(), highestEndTime); } buffered.Intersection(range); } MSE_DEBUG("ranges=%s", DumpTimeRanges(buffered).get()); return buffered; }
RefPtr<ShutdownPromise> MediaDecoderReader::Shutdown() { MOZ_ASSERT(OnTaskQueue()); mShutdown = true; mBaseAudioPromise.RejectIfExists(END_OF_STREAM, __func__); mBaseVideoPromise.RejectIfExists(END_OF_STREAM, __func__); mDataArrivedListener.DisconnectIfExists(); ReleaseMediaResources(); mDuration.DisconnectIfConnected(); mBuffered.DisconnectAll(); mIsSuspended.DisconnectAll(); // Shut down the watch manager before shutting down our task queue. mWatchManager.Shutdown(); RefPtr<ShutdownPromise> p; mDecoder = nullptr; ReaderQueue::Instance().Remove(this); return mTaskQueue->BeginShutdown(); }
nsRefPtr<MediaDecoderReader::MetadataPromise> MediaDecoderReader::AsyncReadMetadata() { typedef ReadMetadataFailureReason Reason; MOZ_ASSERT(OnTaskQueue()); mDecoder->GetReentrantMonitor().AssertNotCurrentThreadIn(); DECODER_LOG("MediaDecoderReader::AsyncReadMetadata"); if (IsWaitingMediaResources()) { return MetadataPromise::CreateAndReject(Reason::WAITING_FOR_RESOURCES, __func__); } // Attempt to read the metadata. nsRefPtr<MetadataHolder> metadata = new MetadataHolder(); nsresult rv = ReadMetadata(&metadata->mInfo, getter_Transfers(metadata->mTags)); // Reading metadata can cause us to discover that we need resources (a hardware // resource initialized but not yet ready for use). if (IsWaitingMediaResources()) { return MetadataPromise::CreateAndReject(Reason::WAITING_FOR_RESOURCES, __func__); } // We're not waiting for anything. If we didn't get the metadata, that's an // error. if (NS_FAILED(rv) || !metadata->mInfo.HasValidMedia()) { DECODER_WARN("ReadMetadata failed, rv=%x HasValidMedia=%d", rv, metadata->mInfo.HasValidMedia()); return MetadataPromise::CreateAndReject(Reason::METADATA_ERROR, __func__); } // Success! return MetadataPromise::CreateAndResolve(metadata, __func__); }
RefPtr<MediaDecoderReader::SeekPromise> MediaOmxReader::Seek(int64_t aTarget, int64_t aEndTime) { MOZ_ASSERT(OnTaskQueue()); EnsureActive(); RefPtr<SeekPromise> p = mSeekPromise.Ensure(__func__); if (mHasAudio && mHasVideo) { // The OMXDecoder seeks/demuxes audio and video streams separately. So if // we seek both audio and video to aTarget, the audio stream can typically // seek closer to the seek target, since typically every audio block is // a sync point, whereas for video there are only keyframes once every few // seconds. So if we have both audio and video, we must seek the video // stream to the preceeding keyframe first, get the stream time, and then // seek the audio stream to match the video stream's time. Otherwise, the // audio and video streams won't be in sync after the seek. mVideoSeekTimeUs = aTarget; RefPtr<MediaOmxReader> self = this; mSeekRequest.Begin(DecodeToFirstVideoData()->Then(OwnerThread(), __func__, [self] (MediaData* v) { self->mSeekRequest.Complete(); self->mAudioSeekTimeUs = v->mTime; self->mSeekPromise.Resolve(self->mAudioSeekTimeUs, __func__); }, [self, aTarget] () { self->mSeekRequest.Complete(); self->mAudioSeekTimeUs = aTarget; self->mSeekPromise.Resolve(aTarget, __func__); })); } else { mAudioSeekTimeUs = mVideoSeekTimeUs = aTarget; mSeekPromise.Resolve(aTarget, __func__); } return p; }
void MediaOmxReader::NotifyDataArrivedInternal(uint32_t aLength, int64_t aOffset) { MOZ_ASSERT(OnTaskQueue()); RefPtr<AbstractMediaDecoder> decoder = SafeGetDecoder(); if (!decoder) { // reader has shut down return; } if (HasVideo()) { return; } if (!mMP3FrameParser.NeedsData()) { return; } IntervalSet<int64_t> intervals = mFilter.NotifyDataArrived(aLength, aOffset); for (const auto& interval : intervals) { RefPtr<MediaByteBuffer> bytes = mDecoder->GetResource()->MediaReadAt(interval.mStart, interval.Length()); NS_ENSURE_TRUE_VOID(bytes); mMP3FrameParser.Parse(bytes->Elements(), interval.Length(), interval.mStart); if (!mMP3FrameParser.IsMP3()) { return; } int64_t duration = mMP3FrameParser.GetDuration(); if (duration != mLastParserDuration) { mLastParserDuration = duration; decoder->DispatchUpdateEstimatedMediaDuration(mLastParserDuration); } } }
bool MediaOmxReader::DecodeAudioData() { MOZ_ASSERT(OnTaskQueue()); EnsureActive(); MOZ_ASSERT(mStreamSource); // This is the approximate byte position in the stream. int64_t pos = mStreamSource->Tell(); // Read next frame MPAPI::AudioFrame source; if (!mOmxDecoder->ReadAudio(&source, mAudioSeekTimeUs)) { return false; } mAudioSeekTimeUs = -1; // Ignore empty buffer which stagefright media read will sporadically return if (source.mSize == 0) { return true; } uint32_t frames = source.mSize / (source.mAudioChannels * sizeof(AudioDataValue)); typedef AudioCompactor::NativeCopy OmxCopy; return mAudioCompactor.Push(pos, source.mTimeUs, source.mAudioSampleRate, frames, source.mAudioChannels, OmxCopy(static_cast<uint8_t *>(source.mData), source.mSize, source.mAudioChannels)); }
/** * If this is an MP3 stream, pass any new data we get to the MP3 frame parser * for duration estimation. */ void GStreamerReader::NotifyDataArrivedInternal(uint32_t aLength, int64_t aOffset) { MOZ_ASSERT(OnTaskQueue()); if (HasVideo()) { return; } if (!mMP3FrameParser.NeedsData()) { return; } nsRefPtr<MediaByteBuffer> bytes = mResource.MediaReadAt(aOffset, aLength); NS_ENSURE_TRUE_VOID(bytes); mMP3FrameParser.Parse(bytes->Elements(), aLength, aOffset); if (!mMP3FrameParser.IsMP3()) { return; } int64_t duration = mMP3FrameParser.GetDuration(); if (duration != mLastParserDuration && mUseParserDuration) { MOZ_ASSERT(mDecoder); mLastParserDuration = duration; mDecoder->DispatchUpdateEstimatedMediaDuration(mLastParserDuration); } }
nsRefPtr<MediaDecoderReader::SeekPromise> GStreamerReader::Seek(int64_t aTarget, int64_t aEndTime) { MOZ_ASSERT(OnTaskQueue()); gint64 seekPos = aTarget * GST_USECOND; LOG(LogLevel::Debug, "%p About to seek to %" GST_TIME_FORMAT, mDecoder, GST_TIME_ARGS(seekPos)); int flags = GST_SEEK_FLAG_FLUSH | GST_SEEK_FLAG_KEY_UNIT; if (!gst_element_seek_simple(mPlayBin, GST_FORMAT_TIME, static_cast<GstSeekFlags>(flags), seekPos)) { LOG(LogLevel::Error, "seek failed"); return SeekPromise::CreateAndReject(NS_ERROR_FAILURE, __func__); } LOG(LogLevel::Debug, "seek succeeded"); GstMessage* message = gst_bus_timed_pop_filtered(mBus, GST_CLOCK_TIME_NONE, (GstMessageType)(GST_MESSAGE_ASYNC_DONE | GST_MESSAGE_ERROR)); gst_message_unref(message); LOG(LogLevel::Debug, "seek completed"); return SeekPromise::CreateAndResolve(aTarget, __func__); }
RefPtr<MediaDecoderReader::MetadataPromise> MediaDecoderReader::AsyncReadMetadata() { MOZ_ASSERT(OnTaskQueue()); DECODER_LOG("MediaDecoderReader::AsyncReadMetadata"); // Attempt to read the metadata. MetadataHolder metadata; metadata.mInfo = MakeUnique<MediaInfo>(); MetadataTags* tags = nullptr; nsresult rv = ReadMetadata(metadata.mInfo.get(), &tags); metadata.mTags.reset(tags); metadata.mInfo->AssertValid(); // Update the buffer ranges before resolving the metadata promise. Bug 1320258. UpdateBuffered(); // We're not waiting for anything. If we didn't get the metadata, that's an // error. if (NS_FAILED(rv) || !metadata.mInfo->HasValidMedia()) { DECODER_WARN("ReadMetadata failed, rv=%" PRIx32 " HasValidMedia=%d", static_cast<uint32_t>(rv), metadata.mInfo->HasValidMedia()); return MetadataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__); } // Success! return MetadataPromise::CreateAndResolve(Move(metadata), __func__); }
void MediaOmxCommonReader::CheckAudioOffload() { MOZ_ASSERT(OnTaskQueue()); char offloadProp[128]; property_get("audio.offload.disable", offloadProp, "0"); bool offloadDisable = atoi(offloadProp) != 0; if (offloadDisable) { return; } sp<MediaSource> audioOffloadTrack = GetAudioOffloadTrack(); sp<MetaData> meta = audioOffloadTrack.get() ? audioOffloadTrack->getFormat() : nullptr; // Supporting audio offload only when there is no video, no streaming bool hasNoVideo = !HasVideo(); bool isNotStreaming = mDecoder->GetResource()->IsDataCachedToEndOfResource(0); // Not much benefit in trying to offload other channel types. Most of them // aren't supported and also duration would be less than a minute bool isTypeMusic = mAudioChannel == dom::AudioChannel::Content; DECODER_LOG(LogLevel::Debug, ("%s meta %p, no video %d, no streaming %d," " channel type %d", __FUNCTION__, meta.get(), hasNoVideo, isNotStreaming, mAudioChannel)); if ((meta.get()) && hasNoVideo && isNotStreaming && isTypeMusic && canOffloadStream(meta, false, false, AUDIO_STREAM_MUSIC) && !IsMonoAudioEnabled()) { DECODER_LOG(LogLevel::Debug, ("Can offload this audio stream")); mDecoder->SetPlatformCanOffloadAudio(true); } }
RefPtr<MediaDecoderReader::VideoDataPromise> MediaDecoderReader::DecodeToFirstVideoData() { MOZ_ASSERT(OnTaskQueue()); typedef MediaDecoderReader::VideoDataPromise PromiseType; RefPtr<PromiseType::Private> p = new PromiseType::Private(__func__); RefPtr<MediaDecoderReader> self = this; InvokeUntil([self] () -> bool { MOZ_ASSERT(self->OnTaskQueue()); NS_ENSURE_TRUE(!self->mShutdown, false); bool skip = false; if (!self->DecodeVideoFrame(skip, 0)) { self->VideoQueue().Finish(); return !!self->VideoQueue().PeekFront(); } return true; }, [self] () -> bool { MOZ_ASSERT(self->OnTaskQueue()); return self->VideoQueue().GetSize(); })->Then(OwnerThread(), __func__, [self, p] () { p->Resolve(self->VideoQueue().PeekFront(), __func__); }, [p] () { // We don't have a way to differentiate EOS, error, and shutdown here. :-( p->Reject(END_OF_STREAM, __func__); }); return p.forget(); }
void MediaDecoderReader::UpdateBuffered() { MOZ_ASSERT(OnTaskQueue()); NS_ENSURE_TRUE_VOID(!mShutdown); mBuffered = GetBuffered(); }
nsresult WaveReader::ReadMetadata(MediaInfo* aInfo, MetadataTags** aTags) { MOZ_ASSERT(OnTaskQueue()); bool loaded = LoadRIFFChunk(); if (!loaded) { return NS_ERROR_FAILURE; } nsAutoPtr<dom::HTMLMediaElement::MetadataTags> tags; bool loadAllChunks = LoadAllChunks(tags); if (!loadAllChunks) { return NS_ERROR_FAILURE; } mInfo.mAudio.mRate = mSampleRate; mInfo.mAudio.mChannels = mChannels; mInfo.mMetadataDuration.emplace(TimeUnit::FromSeconds(BytesToTime(GetDataLength()))); *aInfo = mInfo; *aTags = tags.forget(); return NS_OK; }
bool WaveReader::DecodeVideoFrame(bool &aKeyframeSkip, int64_t aTimeThreshold) { MOZ_ASSERT(OnTaskQueue()); return false; }
nsRefPtr<ShutdownPromise> MediaDecoderReader::Shutdown() { MOZ_ASSERT(OnTaskQueue()); mShutdown = true; mBaseAudioPromise.RejectIfExists(END_OF_STREAM, __func__); mBaseVideoPromise.RejectIfExists(END_OF_STREAM, __func__); ReleaseMediaResources(); nsRefPtr<ShutdownPromise> p; // Spin down the task queue if necessary. We wait until BreakCycles to null // out mTaskQueue, since otherwise any remaining tasks could crash when they // invoke GetTaskQueue()->IsCurrentThreadIn(). if (mTaskQueue && !mTaskQueueIsBorrowed) { // If we own our task queue, shutdown ends when the task queue is done. p = mTaskQueue->BeginShutdown(); } else { // If we don't own our task queue, we resolve immediately (though // asynchronously). p = ShutdownPromise::CreateAndResolve(true, __func__); } mDecoder = nullptr; return p; }
nsresult WMFReader::SeekInternal(int64_t aTargetUs) { DECODER_LOG("WMFReader::Seek() %lld", aTargetUs); MOZ_ASSERT(OnTaskQueue()); #ifdef DEBUG bool canSeek = false; GetSourceReaderCanSeek(mSourceReader, canSeek); NS_ASSERTION(canSeek, "WMFReader::Seek() should only be called if we can seek!"); #endif nsresult rv = ResetDecode(); NS_ENSURE_SUCCESS(rv, rv); // Mark that we must recapture the audio frame count from the next sample. // WMF doesn't set a discontinuity marker when we seek to time 0, so we // must remember to recapture the audio frame offset and reset the frame // sum on the next audio packet we decode. mMustRecaptureAudioPosition = true; AutoPropVar var; HRESULT hr = InitPropVariantFromInt64(UsecsToHNs(aTargetUs), &var); NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE); hr = mSourceReader->SetCurrentPosition(GUID_NULL, var); NS_ENSURE_TRUE(SUCCEEDED(hr), NS_ERROR_FAILURE); return NS_OK; }
int64_t MediaOmxReader::ProcessCachedData(int64_t aOffset) { // Could run on decoder thread or IO thread. RefPtr<AbstractMediaDecoder> decoder = SafeGetDecoder(); if (!decoder) { // reader has shut down return -1; } // We read data in chunks of 32 KiB. We can reduce this // value if media, such as sdcards, is too slow. // Because of SD card's slowness, need to keep sReadSize to small size. // See Bug 914870. static const int64_t sReadSize = 32 * 1024; NS_ASSERTION(!NS_IsMainThread(), "Should not be on main thread."); MOZ_ASSERT(decoder->GetResource()); int64_t resourceLength = decoder->GetResource()->GetCachedDataEnd(0); NS_ENSURE_TRUE(resourceLength >= 0, -1); if (aOffset >= resourceLength) { return 0; // Cache is empty, nothing to do } int64_t bufferLength = std::min<int64_t>(resourceLength-aOffset, sReadSize); RefPtr<NotifyDataArrivedRunnable> runnable( new NotifyDataArrivedRunnable(this, bufferLength, aOffset, resourceLength)); if (OnTaskQueue()) { runnable->Run(); } else { OwnerThread()->Dispatch(runnable.forget()); } return resourceLength - aOffset - bufferLength; }
RefPtr<ShutdownPromise> MediaDecoderReader::Shutdown() { MOZ_ASSERT(OnTaskQueue()); mShutdown = true; mBaseAudioPromise.RejectIfExists(END_OF_STREAM, __func__); mBaseVideoPromise.RejectIfExists(END_OF_STREAM, __func__); mThrottledNotify.DisconnectIfExists(); ReleaseMediaResources(); mDuration.DisconnectIfConnected(); mBuffered.DisconnectAll(); // Shut down the watch manager before shutting down our task queue. mWatchManager.Shutdown(); RefPtr<ShutdownPromise> p; mTimer = nullptr; mDecoder = nullptr; return mTaskQueue->BeginShutdown(); }
bool WaveReader::DecodeAudioData() { MOZ_ASSERT(OnTaskQueue()); int64_t pos = GetPosition() - mWavePCMOffset; int64_t len = GetDataLength(); int64_t remaining = len - pos; NS_ASSERTION(remaining >= 0, "Current wave position is greater than wave file length"); static const int64_t BLOCK_SIZE = 4096; int64_t readSize = std::min(BLOCK_SIZE, remaining); int64_t frames = readSize / mFrameSize; static_assert(uint64_t(BLOCK_SIZE) < UINT_MAX / sizeof(AudioDataValue) / MAX_CHANNELS, "bufferSize calculation could overflow."); const size_t bufferSize = static_cast<size_t>(frames * mChannels); nsAutoArrayPtr<AudioDataValue> sampleBuffer(new AudioDataValue[bufferSize]); static_assert(uint64_t(BLOCK_SIZE) < UINT_MAX / sizeof(char), "BLOCK_SIZE too large for enumerator."); nsAutoArrayPtr<char> dataBuffer(new char[static_cast<size_t>(readSize)]); if (!ReadAll(dataBuffer, readSize)) { return false; } // convert data to samples const char* d = dataBuffer.get(); AudioDataValue* s = sampleBuffer.get(); for (int i = 0; i < frames; ++i) { for (unsigned int j = 0; j < mChannels; ++j) { if (mSampleFormat == FORMAT_U8) { uint8_t v = ReadUint8(&d); *s++ = UnsignedByteToAudioSample<AudioDataValue>(v); } else if (mSampleFormat == FORMAT_S16) { int16_t v = ReadInt16LE(&d); *s++ = SignedShortToAudioSample<AudioDataValue>(v); } } } double posTime = BytesToTime(pos); double readSizeTime = BytesToTime(readSize); NS_ASSERTION(posTime <= INT64_MAX / USECS_PER_S, "posTime overflow"); NS_ASSERTION(readSizeTime <= INT64_MAX / USECS_PER_S, "readSizeTime overflow"); NS_ASSERTION(frames < INT32_MAX, "frames overflow"); mAudioQueue.Push(new AudioData(pos, static_cast<int64_t>(posTime * USECS_PER_S), static_cast<int64_t>(readSizeTime * USECS_PER_S), static_cast<int32_t>(frames), sampleBuffer.forget(), mChannels, mSampleRate)); return true; }
nsRefPtr<MediaDecoderReader::WaitForDataPromise> MediaSourceReader::WaitForData(MediaData::Type aType) { MOZ_ASSERT(OnTaskQueue()); ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); nsRefPtr<WaitForDataPromise> p = WaitPromise(aType).Ensure(__func__); MaybeNotifyHaveData(); return p; }
nsRefPtr<MediaDecoderReader::VideoDataPromise> MediaSourceReader::RequestVideoData(bool aSkipToNextKeyframe, int64_t aTimeThreshold, bool aForceDecodeAhead) { MOZ_ASSERT(OnTaskQueue()); MOZ_DIAGNOSTIC_ASSERT(mSeekPromise.IsEmpty(), "No sample requests allowed while seeking"); MOZ_DIAGNOSTIC_ASSERT(mVideoPromise.IsEmpty(), "No duplicate sample requests"); nsRefPtr<VideoDataPromise> p = mVideoPromise.Ensure(__func__); MSE_DEBUGV("RequestVideoData(%d, %lld), mLastVideoTime=%lld", aSkipToNextKeyframe, aTimeThreshold, mLastVideoTime); if (!mVideoTrack) { MSE_DEBUG("called with no video track"); mVideoPromise.Reject(DECODE_ERROR, __func__); return p; } if (aSkipToNextKeyframe) { mTimeThreshold = aTimeThreshold; mDropAudioBeforeThreshold = true; mDropVideoBeforeThreshold = true; } if (IsSeeking()) { MSE_DEBUG("called mid-seek. Rejecting."); mVideoPromise.Reject(CANCELED, __func__); return p; } MOZ_DIAGNOSTIC_ASSERT(!mVideoSeekRequest.Exists()); mForceVideoDecodeAhead = aForceDecodeAhead; SwitchSourceResult ret = SwitchVideoSource(&mLastVideoTime); switch (ret) { case SOURCE_NEW: GetVideoReader()->ResetDecode(); mVideoSeekRequest.Begin(GetVideoReader()->Seek(GetReaderVideoTime(mLastVideoTime), 0) ->Then(OwnerThread(), __func__, this, &MediaSourceReader::CompleteVideoSeekAndDoRequest, &MediaSourceReader::CompleteVideoSeekAndRejectPromise)); break; case SOURCE_NONE: if (!mLastVideoTime) { // This is the first call to RequestVideoData. // Fallback to using decoder with earliest data. mVideoSourceDecoder = FirstDecoder(MediaData::VIDEO_DATA); } if (mLastVideoTime || !mVideoSourceDecoder) { CheckForWaitOrEndOfStream(MediaData::VIDEO_DATA, mLastVideoTime); break; } // Fallback to getting first frame from first decoder. default: DoVideoRequest(); break; } return p; }