void MediaEngineDefaultVideoSource::NotifyPull(MediaStreamGraph* aGraph, SourceMediaStream *aSource, TrackID aID, StreamTime aDesiredTime, TrackTicks &aLastEndTime) { // AddTrack takes ownership of segment VideoSegment segment; MonitorAutoLock lock(mMonitor); if (mState != kStarted) { return; } // Note: we're not giving up mImage here nsRefPtr<layers::Image> image = mImage; TrackTicks target = TimeToTicksRoundUp(USECS_PER_S, aDesiredTime); TrackTicks delta = target - aLastEndTime; if (delta > 0) { // nullptr images are allowed if (image) { segment.AppendFrame(image.forget(), delta, IntSize(mOpts.mWidth, mOpts.mHeight)); } else { segment.AppendFrame(nullptr, delta, IntSize(0, 0)); } // This can fail if either a) we haven't added the track yet, or b) // we've removed or finished the track. if (aSource->AppendToTrack(aID, &segment)) { aLastEndTime = target; } } }
void MediaEngineTabVideoSource:: NotifyPull(MediaStreamGraph*, SourceMediaStream* aSource, mozilla::TrackID aID, mozilla::StreamTime aDesiredTime, mozilla::TrackTicks& aLastEndTime) { VideoSegment segment; MonitorAutoLock mon(mMonitor); // Note: we're not giving up mImage here nsRefPtr<layers::CairoImage> image = mImage; TrackTicks target = TimeToTicksRoundUp(USECS_PER_S, aDesiredTime); TrackTicks delta = target - aLastEndTime; if (delta > 0) { // nullptr images are allowed if (image) { gfx::IntSize size = image->GetSize(); segment.AppendFrame(image.forget(), delta, gfx::ThebesIntSize(size)); } else { segment.AppendFrame(nullptr, delta, gfxIntSize(0,0)); } // This can fail if either a) we haven't added the track yet, or b) // we've removed or finished the track. if (aSource->AppendToTrack(aID, &(segment))) { aLastEndTime = target; } } }
void AudioNodeStream::SetStreamTimeParameterImpl(uint32_t aIndex, MediaStream* aRelativeToStream, double aStreamTime) { StreamTime streamTime = std::max<MediaTime>(0, SecondsToMediaTime(aStreamTime)); GraphTime graphTime = aRelativeToStream->StreamTimeToGraphTime(streamTime); StreamTime thisStreamTime = GraphTimeToStreamTimeOptimistic(graphTime); TrackTicks ticks = TimeToTicksRoundUp(IdealAudioRate(), thisStreamTime); mEngine->SetStreamTimeParameter(aIndex, ticks); }
TrackTicks WebAudioUtils::ConvertDestinationStreamTimeToSourceStreamTime(double aTime, AudioNodeStream* aSource, MediaStream* aDestination) { StreamTime streamTime = std::max<MediaTime>(0, SecondsToMediaTime(aTime)); GraphTime graphTime = aDestination->StreamTimeToGraphTime(streamTime); StreamTime thisStreamTime = aSource->GraphTimeToStreamTimeOptimistic(graphTime); TrackTicks ticks = TimeToTicksRoundUp(aSource->SampleRate(), thisStreamTime); return ticks; }
void MediaEngineWebRTCAudioSource::NotifyPull(MediaStreamGraph* aGraph, SourceMediaStream *aSource, TrackID aID, StreamTime aDesiredTime, TrackTicks &aLastEndTime) { // Ignore - we push audio data #ifdef DEBUG TrackTicks target = TimeToTicksRoundUp(SAMPLE_FREQUENCY, aDesiredTime); TrackTicks delta = target - aLastEndTime; LOG(("Audio: NotifyPull: aDesiredTime %ld, target %ld, delta %ld",(int64_t) aDesiredTime, (int64_t) target, (int64_t) delta)); aLastEndTime = target; #endif }
size_t AudioNodeExternalInputStream::GetTrackMapEntry(const StreamBuffer::Track& aTrack, GraphTime aFrom) { AudioSegment* segment = aTrack.Get<AudioSegment>(); // Check the map for an existing entry corresponding to the input track. for (size_t i = 0; i < mTrackMap.Length(); ++i) { TrackMapEntry* map = &mTrackMap[i]; if (map->mTrackID == aTrack.GetID()) { return i; } } // Determine channel count by finding the first entry with non-silent data. AudioSegment::ChunkIterator ci(*segment); while (!ci.IsEnded() && ci->IsNull()) { ci.Next(); } if (ci.IsEnded()) { // The track is entirely silence so far, we can ignore it for now. return nsTArray<TrackMapEntry>::NoIndex; } // Create a speex resampler with the same sample rate and number of channels // as the track. SpeexResamplerState* resampler = nullptr; size_t channelCount = std::min((*ci).mChannelData.Length(), WebAudioUtils::MaxChannelCount); if (aTrack.GetRate() != mSampleRate) { resampler = speex_resampler_init(channelCount, aTrack.GetRate(), mSampleRate, SPEEX_RESAMPLER_QUALITY_DEFAULT, nullptr); speex_resampler_skip_zeros(resampler); } TrackMapEntry* map = mTrackMap.AppendElement(); map->mEndOfConsumedInputTicks = 0; map->mEndOfLastInputIntervalInInputStream = -1; map->mEndOfLastInputIntervalInOutputStream = -1; map->mSamplesPassedToResampler = TimeToTicksRoundUp(aTrack.GetRate(), GraphTimeToStreamTime(aFrom)); map->mResampler = resampler; map->mResamplerChannelCount = channelCount; map->mTrackID = aTrack.GetID(); return mTrackMap.Length() - 1; }
// Called if the graph thinks it's running out of buffered video; repeat // the last frame for whatever minimum period it think it needs. Note that // this means that no *real* frame can be inserted during this period. void MediaEngineWebRTCVideoSource::NotifyPull(MediaStreamGraph* aGraph, SourceMediaStream *aSource, TrackID aID, StreamTime aDesiredTime, TrackTicks &aLastEndTime) { VideoSegment segment; MonitorAutoLock lock(mMonitor); if (mState != kStarted) return; // Note: we're not giving up mImage here nsRefPtr<layers::Image> image = mImage; TrackTicks target = TimeToTicksRoundUp(USECS_PER_S, aDesiredTime); TrackTicks delta = target - aLastEndTime; LOGFRAME(("NotifyPull, desired = %ld, target = %ld, delta = %ld %s", (int64_t) aDesiredTime, (int64_t) target, (int64_t) delta, image ? "" : "<null>")); // Bug 846188 We may want to limit incoming frames to the requested frame rate // mFps - if you want 30FPS, and the camera gives you 60FPS, this could // cause issues. // We may want to signal if the actual frame rate is below mMinFPS - // cameras often don't return the requested frame rate especially in low // light; we should consider surfacing this so that we can switch to a // lower resolution (which may up the frame rate) // Don't append if we've already provided a frame that supposedly goes past the current aDesiredTime // Doing so means a negative delta and thus messes up handling of the graph if (delta > 0) { // nullptr images are allowed if (image) { segment.AppendFrame(image.forget(), delta, gfxIntSize(mWidth, mHeight)); } else { segment.AppendFrame(nullptr, delta, gfxIntSize(0,0)); } // This can fail if either a) we haven't added the track yet, or b) // we've removed or finished the track. if (aSource->AppendToTrack(aID, &(segment))) { aLastEndTime = target; } } }
void AudioNodeExternalInputStream::ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) { // According to spec, number of outputs is always 1. mLastChunks.SetLength(1); // GC stuff can result in our input stream being destroyed before this stream. // Handle that. if (mInputs.IsEmpty()) { mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE); AdvanceOutputSegment(); return; } MOZ_ASSERT(mInputs.Length() == 1); MediaStream* source = mInputs[0]->GetSource(); nsAutoTArray<AudioSegment,1> audioSegments; nsAutoTArray<bool,1> trackMapEntriesUsed; uint32_t inputChannels = 0; for (StreamBuffer::TrackIter tracks(source->mBuffer, MediaSegment::AUDIO); !tracks.IsEnded(); tracks.Next()) { const StreamBuffer::Track& inputTrack = *tracks; // Create a TrackMapEntry if necessary. size_t trackMapIndex = GetTrackMapEntry(inputTrack, aFrom); // Maybe there's nothing in this track yet. If so, ignore it. (While the // track is only playing silence, we may not be able to determine the // correct number of channels to start resampling.) if (trackMapIndex == nsTArray<TrackMapEntry>::NoIndex) { continue; } while (trackMapEntriesUsed.Length() <= trackMapIndex) { trackMapEntriesUsed.AppendElement(false); } trackMapEntriesUsed[trackMapIndex] = true; TrackMapEntry* trackMap = &mTrackMap[trackMapIndex]; AudioSegment segment; GraphTime next; TrackRate inputTrackRate = inputTrack.GetRate(); for (GraphTime t = aFrom; t < aTo; t = next) { MediaInputPort::InputInterval interval = mInputs[0]->GetNextInputInterval(t); interval.mEnd = std::min(interval.mEnd, aTo); if (interval.mStart >= interval.mEnd) break; next = interval.mEnd; // Ticks >= startTicks and < endTicks are in the interval StreamTime outputEnd = GraphTimeToStreamTime(interval.mEnd); TrackTicks startTicks = trackMap->mSamplesPassedToResampler + segment.GetDuration(); StreamTime outputStart = GraphTimeToStreamTime(interval.mStart); NS_ASSERTION(startTicks == TimeToTicksRoundUp(inputTrackRate, outputStart), "Samples missing"); TrackTicks endTicks = TimeToTicksRoundUp(inputTrackRate, outputEnd); TrackTicks ticks = endTicks - startTicks; if (interval.mInputIsBlocked) { segment.AppendNullData(ticks); } else { // See comments in TrackUnionStream::CopyTrackData StreamTime inputStart = source->GraphTimeToStreamTime(interval.mStart); StreamTime inputEnd = source->GraphTimeToStreamTime(interval.mEnd); TrackTicks inputTrackEndPoint = inputTrack.IsEnded() ? inputTrack.GetEnd() : TRACK_TICKS_MAX; if (trackMap->mEndOfLastInputIntervalInInputStream != inputStart || trackMap->mEndOfLastInputIntervalInOutputStream != outputStart) { // Start of a new series of intervals where neither stream is blocked. trackMap->mEndOfConsumedInputTicks = TimeToTicksRoundDown(inputTrackRate, inputStart) - 1; } TrackTicks inputStartTicks = trackMap->mEndOfConsumedInputTicks; TrackTicks inputEndTicks = inputStartTicks + ticks; trackMap->mEndOfConsumedInputTicks = inputEndTicks; trackMap->mEndOfLastInputIntervalInInputStream = inputEnd; trackMap->mEndOfLastInputIntervalInOutputStream = outputEnd; if (inputStartTicks < 0) { // Data before the start of the track is just null. segment.AppendNullData(-inputStartTicks); inputStartTicks = 0; } if (inputEndTicks > inputStartTicks) { segment.AppendSlice(*inputTrack.GetSegment(), std::min(inputTrackEndPoint, inputStartTicks), std::min(inputTrackEndPoint, inputEndTicks)); } // Pad if we're looking past the end of the track segment.AppendNullData(ticks - segment.GetDuration()); } } trackMap->mSamplesPassedToResampler += segment.GetDuration(); trackMap->ResampleInputData(&segment); if (trackMap->mResampledData.GetDuration() < mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE) { // We don't have enough data. Delay it. trackMap->mResampledData.InsertNullDataAtStart( mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE - trackMap->mResampledData.GetDuration()); } audioSegments.AppendElement()->AppendSlice(trackMap->mResampledData, mCurrentOutputPosition, mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE); trackMap->mResampledData.ForgetUpTo(mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE); inputChannels = GetAudioChannelsSuperset(inputChannels, trackMap->mResamplerChannelCount); } for (int32_t i = mTrackMap.Length() - 1; i >= 0; --i) { if (i >= int32_t(trackMapEntriesUsed.Length()) || !trackMapEntriesUsed[i]) { mTrackMap.RemoveElementAt(i); } } uint32_t accumulateIndex = 0; if (inputChannels) { nsAutoTArray<float,GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer; for (uint32_t i = 0; i < audioSegments.Length(); ++i) { AudioChunk tmpChunk; ConvertSegmentToAudioBlock(&audioSegments[i], &tmpChunk); if (!tmpChunk.IsNull()) { if (accumulateIndex == 0) { AllocateAudioBlock(inputChannels, &mLastChunks[0]); } AccumulateInputChunk(accumulateIndex, tmpChunk, &mLastChunks[0], &downmixBuffer); accumulateIndex++; } } } if (accumulateIndex == 0) { mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE); } mCurrentOutputPosition += WEBAUDIO_BLOCK_SIZE; // Using AudioNodeStream's AdvanceOutputSegment to push the media stream graph along with null data. AdvanceOutputSegment(); }