void DecodedStream::InitTracks() { AssertOwnerThread(); if (mData->mStreamInitialized) { return; } SourceMediaStream* sourceStream = mData->mStream; if (mInfo.HasAudio()) { TrackID audioTrackId = mInfo.mAudio.mTrackId; AudioSegment* audio = new AudioSegment(); sourceStream->AddAudioTrack(audioTrackId, mInfo.mAudio.mRate, 0, audio, SourceMediaStream::ADDTRACK_QUEUED); mData->mNextAudioTime = mStartTime.ref(); } if (mInfo.HasVideo()) { TrackID videoTrackId = mInfo.mVideo.mTrackId; VideoSegment* video = new VideoSegment(); sourceStream->AddTrack(videoTrackId, 0, video, SourceMediaStream::ADDTRACK_QUEUED); mData->mNextVideoTime = mStartTime.ref(); } sourceStream->FinishAddTracks(); mData->mStreamInitialized = true; }
void MediaEngineWebRTCAudioSource::Process(const int channel, const webrtc::ProcessingTypes type, sample* audio10ms, const int length, const int samplingFreq, const bool isStereo) { ReentrantMonitorAutoEnter enter(mMonitor); if (mState != kStarted) return; uint32_t len = mSources.Length(); for (uint32_t i = 0; i < len; i++) { nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(length * sizeof(sample)); sample* dest = static_cast<sample*>(buffer->Data()); memcpy(dest, audio10ms, length * sizeof(sample)); AudioSegment segment; nsAutoTArray<const sample*,1> channels; channels.AppendElement(dest); segment.AppendFrames(buffer.forget(), channels, length); SourceMediaStream *source = mSources[i]; if (source) { // This is safe from any thread, and is safe if the track is Finished // or Destroyed source->AppendToTrack(mTrackID, &segment); } } return; }
void DOMHwMediaStream::Init(MediaStream* stream) { SourceMediaStream* srcStream = stream->AsSourceStream(); if (srcStream) { VideoSegment segment; #ifdef MOZ_WIDGET_GONK const StreamTime delta = STREAM_TIME_MAX; // Because MediaStreamGraph will run out frames in non-autoplay mode, // we must give it bigger frame length to cover this situation. mImageData.mOverlayId = DEFAULT_IMAGE_ID; mImageData.mSize.width = DEFAULT_IMAGE_WIDTH; mImageData.mSize.height = DEFAULT_IMAGE_HEIGHT; mOverlayImage->SetData(mImageData); RefPtr<Image> image = static_cast<Image*>(mOverlayImage.get()); mozilla::gfx::IntSize size = image->GetSize(); segment.AppendFrame(image.forget(), delta, size); #endif srcStream->AddTrack(TRACK_VIDEO_PRIMARY, 0, new VideoSegment()); srcStream->AppendToTrack(TRACK_VIDEO_PRIMARY, &segment); srcStream->FinishAddTracks(); srcStream->AdvanceKnownTracksTime(STREAM_TIME_MAX); } }
void RemoteSourceStreamInfo::StartReceiving() { if (mReceiving || mPipelines.empty()) { return; } mReceiving = true; SourceMediaStream* source = GetMediaStream()->GetInputStream()->AsSourceStream(); source->SetPullEnabled(true); // AdvanceKnownTracksTicksTime(HEAT_DEATH_OF_UNIVERSE) means that in // theory per the API, we can't add more tracks before that // time. However, the impl actually allows it, and it avoids a whole // bunch of locking that would be required (and potential blocking) // if we used smaller values and updated them on each NotifyPull. source->AdvanceKnownTracksTime(STREAM_TIME_MAX); CSFLogDebug(logTag, "Finished adding tracks to MediaStream %p", source); }
void DecodedStream::SendAudio(double aVolume, bool aIsSameOrigin, const PrincipalHandle& aPrincipalHandle) { AssertOwnerThread(); if (!mInfo.HasAudio()) { return; } AudioSegment output; uint32_t rate = mInfo.mAudio.mRate; AutoTArray<RefPtr<MediaData>,10> audio; TrackID audioTrackId = mInfo.mAudio.mTrackId; SourceMediaStream* sourceStream = mData->mStream; // It's OK to hold references to the AudioData because AudioData // is ref-counted. mAudioQueue.GetElementsAfter(mData->mNextAudioTime, &audio); for (uint32_t i = 0; i < audio.Length(); ++i) { SendStreamAudio(mData.get(), mStartTime.ref(), audio[i], &output, rate, aPrincipalHandle); } output.ApplyVolume(aVolume); if (!aIsSameOrigin) { output.ReplaceWithDisabled(); } // |mNextAudioTime| is updated as we process each audio sample in // SendStreamAudio(). This is consistent with how |mNextVideoTime| // is updated for video samples. if (output.GetDuration() > 0) { sourceStream->AppendToTrack(audioTrackId, &output); } if (mAudioQueue.IsFinished() && !mData->mHaveSentFinishAudio) { sourceStream->EndTrack(audioTrackId); mData->mHaveSentFinishAudio = true; } }
void MediaEngineWebRTCAudioSource::Process(int channel, webrtc::ProcessingTypes type, sample* audio10ms, int length, int samplingFreq, bool isStereo) { MonitorAutoLock lock(mMonitor); if (mState != kStarted) return; uint32_t len = mSources.Length(); for (uint32_t i = 0; i < len; i++) { nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(length * sizeof(sample)); sample* dest = static_cast<sample*>(buffer->Data()); memcpy(dest, audio10ms, length * sizeof(sample)); AudioSegment segment; nsAutoTArray<const sample*,1> channels; channels.AppendElement(dest); segment.AppendFrames(buffer.forget(), channels, length); TimeStamp insertTime; segment.GetStartTime(insertTime); SourceMediaStream *source = mSources[i]; if (source) { // This is safe from any thread, and is safe if the track is Finished // or Destroyed. // Make sure we include the stream and the track. // The 0:1 is a flag to note when we've done the final insert for a given input block. LogTime(AsyncLatencyLogger::AudioTrackInsertion, LATENCY_STREAM_ID(source, mTrackID), (i+1 < len) ? 0 : 1, insertTime); source->AppendToTrack(mTrackID, &segment); } } return; }
void DOMHwMediaStream::SetImageSize(uint32_t width, uint32_t height) { #ifdef MOZ_WIDGET_GONK OverlayImage::Data imgData; imgData.mOverlayId = mOverlayImage->GetOverlayId(); imgData.mSize = IntSize(width, height); mOverlayImage->SetData(imgData); #endif SourceMediaStream* srcStream = GetInputStream()->AsSourceStream(); StreamBuffer::Track* track = srcStream->FindTrack(TRACK_VIDEO_PRIMARY); if (!track || !track->GetSegment()) { return; } #ifdef MOZ_WIDGET_GONK // Clear the old segment. // Changing the existing content of segment is a Very BAD thing, and this way will // confuse consumers of MediaStreams. // It is only acceptable for DOMHwMediaStream // because DOMHwMediaStream doesn't have consumers of TV streams currently. track->GetSegment()->Clear(); // Change the image size. const StreamTime delta = STREAM_TIME_MAX; RefPtr<Image> image = static_cast<Image*>(mOverlayImage.get()); mozilla::gfx::IntSize size = image->GetSize(); VideoSegment segment; segment.AppendFrame(image.forget(), delta, size); srcStream->AppendToTrack(TRACK_VIDEO_PRIMARY, &segment); #endif }
void DecodedStream::SendVideo(bool aIsSameOrigin, const PrincipalHandle& aPrincipalHandle) { AssertOwnerThread(); if (!mInfo.HasVideo()) { return; } VideoSegment output; TrackID videoTrackId = mInfo.mVideo.mTrackId; AutoTArray<RefPtr<MediaData>, 10> video; SourceMediaStream* sourceStream = mData->mStream; // It's OK to hold references to the VideoData because VideoData // is ref-counted. mVideoQueue.GetElementsAfter(mData->mNextVideoTime, &video); // tracksStartTimeStamp might be null when the SourceMediaStream not yet // be added to MediaStreamGraph. TimeStamp tracksStartTimeStamp = sourceStream->GetStreamTracksStrartTimeStamp(); if (tracksStartTimeStamp.IsNull()) { tracksStartTimeStamp = TimeStamp::Now(); } for (uint32_t i = 0; i < video.Length(); ++i) { VideoData* v = video[i]->As<VideoData>(); if (mData->mNextVideoTime < v->mTime) { // Write last video frame to catch up. mLastVideoImage can be null here // which is fine, it just means there's no video. // TODO: |mLastVideoImage| should come from the last image rendered // by the state machine. This will avoid the black frame when capture // happens in the middle of playback (especially in th middle of a // video frame). E.g. if we have a video frame that is 30 sec long // and capture happens at 15 sec, we'll have to append a black frame // that is 15 sec long. WriteVideoToMediaStream(sourceStream, mData->mLastVideoImage, v->mTime, mData->mNextVideoTime, mData->mLastVideoImageDisplaySize, tracksStartTimeStamp + TimeDuration::FromMicroseconds(v->mTime), &output, aPrincipalHandle); mData->mNextVideoTime = v->mTime; } if (mData->mNextVideoTime < v->GetEndTime()) { WriteVideoToMediaStream(sourceStream, v->mImage, v->GetEndTime(), mData->mNextVideoTime, v->mDisplay, tracksStartTimeStamp + TimeDuration::FromMicroseconds(v->GetEndTime()), &output, aPrincipalHandle); mData->mNextVideoTime = v->GetEndTime(); mData->mLastVideoImage = v->mImage; mData->mLastVideoImageDisplaySize = v->mDisplay; } } // Check the output is not empty. if (output.GetLastFrame()) { mData->mEOSVideoCompensation = ZeroDurationAtLastChunk(output); } if (!aIsSameOrigin) { output.ReplaceWithDisabled(); } if (output.GetDuration() > 0) { sourceStream->AppendToTrack(videoTrackId, &output); } if (mVideoQueue.IsFinished() && !mData->mHaveSentFinishVideo) { if (mData->mEOSVideoCompensation) { VideoSegment endSegment; // Calculate the deviation clock time from DecodedStream. int64_t deviation_usec = sourceStream->StreamTimeToMicroseconds(1); WriteVideoToMediaStream(sourceStream, mData->mLastVideoImage, mData->mNextVideoTime + deviation_usec, mData->mNextVideoTime, mData->mLastVideoImageDisplaySize, tracksStartTimeStamp + TimeDuration::FromMicroseconds(mData->mNextVideoTime + deviation_usec), &endSegment, aPrincipalHandle); mData->mNextVideoTime += deviation_usec; MOZ_ASSERT(endSegment.GetDuration() > 0); if (!aIsSameOrigin) { endSegment.ReplaceWithDisabled(); } sourceStream->AppendToTrack(videoTrackId, &endSegment); } sourceStream->EndTrack(videoTrackId); mData->mHaveSentFinishVideo = true; } }
void MediaEngineWebRTCAudioSource::Process(int channel, webrtc::ProcessingTypes type, sample* audio10ms, int length, int samplingFreq, bool isStereo) { // On initial capture, throw away all far-end data except the most recent sample // since it's already irrelevant and we want to keep avoid confusing the AEC far-end // input code with "old" audio. if (!mStarted) { mStarted = true; while (gFarendObserver->Size() > 1) { FarEndAudioChunk *buffer = gFarendObserver->Pop(); // only call if size() > 0 free(buffer); } } while (gFarendObserver->Size() > 0) { FarEndAudioChunk *buffer = gFarendObserver->Pop(); // only call if size() > 0 if (buffer) { int length = buffer->mSamples; if (mVoERender->ExternalPlayoutData(buffer->mData, gFarendObserver->PlayoutFrequency(), gFarendObserver->PlayoutChannels(), mPlayoutDelay, length) == -1) { return; } } free(buffer); } #ifdef PR_LOGGING mSamples += length; if (mSamples > samplingFreq) { mSamples %= samplingFreq; // just in case mSamples >> samplingFreq if (PR_LOG_TEST(GetMediaManagerLog(), PR_LOG_DEBUG)) { webrtc::EchoStatistics echo; mVoECallReport->GetEchoMetricSummary(echo); #define DUMP_STATVAL(x) (x).min, (x).max, (x).average LOG(("Echo: ERL: %d/%d/%d, ERLE: %d/%d/%d, RERL: %d/%d/%d, NLP: %d/%d/%d", DUMP_STATVAL(echo.erl), DUMP_STATVAL(echo.erle), DUMP_STATVAL(echo.rerl), DUMP_STATVAL(echo.a_nlp))); } } #endif MonitorAutoLock lock(mMonitor); if (mState != kStarted) return; uint32_t len = mSources.Length(); for (uint32_t i = 0; i < len; i++) { nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(length * sizeof(sample)); sample* dest = static_cast<sample*>(buffer->Data()); memcpy(dest, audio10ms, length * sizeof(sample)); AudioSegment segment; nsAutoTArray<const sample*,1> channels; channels.AppendElement(dest); segment.AppendFrames(buffer.forget(), channels, length); TimeStamp insertTime; segment.GetStartTime(insertTime); SourceMediaStream *source = mSources[i]; if (source) { // This is safe from any thread, and is safe if the track is Finished // or Destroyed. // Make sure we include the stream and the track. // The 0:1 is a flag to note when we've done the final insert for a given input block. LogTime(AsyncLatencyLogger::AudioTrackInsertion, LATENCY_STREAM_ID(source, mTrackID), (i+1 < len) ? 0 : 1, insertTime); source->AppendToTrack(mTrackID, &segment); } } return; }