void MediaSource::RemoveSourceBuffer(SourceBuffer& aSourceBuffer, ErrorResult& aRv) { MOZ_ASSERT(NS_IsMainThread()); SourceBuffer* sourceBuffer = &aSourceBuffer; MSE_API("MediaSource(%p)::RemoveSourceBuffer(aSourceBuffer=%p)", this, sourceBuffer); if (!mSourceBuffers->Contains(sourceBuffer)) { aRv.Throw(NS_ERROR_DOM_NOT_FOUND_ERR); return; } if (sourceBuffer->Updating()) { // TODO: // abort stream append loop (if running) // set updating to false // fire "abort" at sourceBuffer // fire "updateend" at sourceBuffer } // TODO: // For all sourceBuffer audioTracks, videoTracks, textTracks: // set sourceBuffer to null // remove sourceBuffer video, audio, text Tracks from MediaElement tracks // remove sourceBuffer video, audio, text Tracks and fire "removetrack" at affected lists // fire "removetrack" at modified MediaElement track lists // If removed enabled/selected, fire "change" at affected MediaElement list. if (mActiveSourceBuffers->Contains(sourceBuffer)) { mActiveSourceBuffers->Remove(sourceBuffer); } mSourceBuffers->Remove(sourceBuffer); // TODO: Free all resources associated with sourceBuffer }
RefPtr<MediaSource::ActiveCompletionPromise> MediaSource::SourceBufferIsActive(SourceBuffer* aSourceBuffer) { MOZ_ASSERT(NS_IsMainThread()); mActiveSourceBuffers->ClearSimple(); bool initMissing = false; bool found = false; for (uint32_t i = 0; i < mSourceBuffers->Length(); i++) { SourceBuffer* sourceBuffer = mSourceBuffers->IndexedGetter(i, found); MOZ_ALWAYS_TRUE(found); if (sourceBuffer == aSourceBuffer) { mActiveSourceBuffers->Append(aSourceBuffer); } else if (sourceBuffer->IsActive()) { mActiveSourceBuffers->AppendSimple(sourceBuffer); } else { // Some source buffers haven't yet received an init segment. // There's nothing more we can do at this stage. initMissing = true; } } if (initMissing || !mDecoder) { return ActiveCompletionPromise::CreateAndResolve(true, __func__); } mDecoder->NotifyInitDataArrived(); // Add our promise to the queue. // It will be resolved once the HTMLMediaElement modifies its readyState. MozPromiseHolder<ActiveCompletionPromise> holder; RefPtr<ActiveCompletionPromise> promise = holder.Ensure(__func__); mCompletionPromises.AppendElement(std::move(holder)); return promise; }
size_t MediaElementSession::maximumMediaSourceBufferSize(const SourceBuffer& buffer) const { // A good quality 1080p video uses 8,000 kbps and stereo audio uses 384 kbps, so assume 95% for video and 5% for audio. const float bufferBudgetPercentageForVideo = .95; const float bufferBudgetPercentageForAudio = .05; size_t maximum; Settings* settings = buffer.document().settings(); if (settings) maximum = settings->maximumSourceBufferSize(); else maximum = fiveMinutesOf1080PVideo + fiveMinutesStereoAudio; // Allow a SourceBuffer to buffer as though it is audio-only even if it doesn't have any active tracks (yet). size_t bufferSize = static_cast<size_t>(maximum * bufferBudgetPercentageForAudio); if (buffer.hasVideo()) bufferSize += static_cast<size_t>(maximum * bufferBudgetPercentageForVideo); // FIXME: we might want to modify this algorithm to: // - decrease the maximum size for background tabs // - decrease the maximum size allowed for inactive elements when a process has more than one // element, eg. so a page with many elements which are played one at a time doesn't keep // everything buffered after an element has finished playing. return bufferSize; }
void MediaSource::SourceBufferIsActive(SourceBuffer* aSourceBuffer) { MOZ_ASSERT(NS_IsMainThread()); mActiveSourceBuffers->ClearSimple(); bool found = false; for (uint32_t i = 0; i < mSourceBuffers->Length(); i++) { SourceBuffer* sourceBuffer = mSourceBuffers->IndexedGetter(i, found); MOZ_ALWAYS_TRUE(found); if (sourceBuffer == aSourceBuffer) { mActiveSourceBuffers->Append(aSourceBuffer); } else if (sourceBuffer->IsActive()) { mActiveSourceBuffers->AppendSimple(sourceBuffer); } } }
void MediaSource::GetBuffered(TimeRanges* aBuffered) { MOZ_ASSERT(aBuffered->Length() == 0); if (mActiveSourceBuffers->IsEmpty()) { return; } double highestEndTime = 0; nsTArray<nsRefPtr<TimeRanges>> activeRanges; for (uint32_t i = 0; i < mActiveSourceBuffers->Length(); ++i) { bool found; SourceBuffer* sourceBuffer = mActiveSourceBuffers->IndexedGetter(i, found); ErrorResult dummy; *activeRanges.AppendElement() = sourceBuffer->GetBuffered(dummy); highestEndTime = std::max(highestEndTime, activeRanges.LastElement()->GetEndTime()); } TimeRanges* intersectionRanges = aBuffered; intersectionRanges->Add(0, highestEndTime); for (uint32_t i = 0; i < activeRanges.Length(); ++i) { TimeRanges* sourceRanges = activeRanges[i]; if (mReadyState == MediaSourceReadyState::Ended) { // Set the end time on the last range to highestEndTime by adding a // new range spanning the current end time to highestEndTime, which // Normalize() will then merge with the old last range. sourceRanges->Add(sourceRanges->GetEndTime(), highestEndTime); sourceRanges->Normalize(); } intersectionRanges->Intersection(sourceRanges); } MSE_DEBUG("MediaSource(%p)::GetBuffered ranges=%s", this, DumpTimeRanges(intersectionRanges).get()); }
void CLKernel::Compile( const CLDevice* pdev, SourceBuffer& srcbuf, const char* name ) { if( 0!=mProgram ) clReleaseProgram(mProgram); if( 0!=mKernel ) clReleaseKernel(mKernel); srcbuf.CreateCLprogram( pdev, mProgram, mKernel, name ); //////////////////////////////////////////////////////////// // query max workgroup size for kernel on the device //////////////////////////////////////////////////////////// size_t MaxWorkGroupSize = 0; int err = clGetKernelWorkGroupInfo(mKernel, pdev->GetDeviceID(), CL_KERNEL_WORK_GROUP_SIZE, sizeof(size_t), &MaxWorkGroupSize, NULL); OrkAssert( err==CL_SUCCESS ); miMaxWorkGroupSize = int(MaxWorkGroupSize); }
static NS_METHOD AppendToSourceBuffer(nsIInputStream*, void* aClosure, const char* aFromRawSegment, uint32_t, uint32_t aCount, uint32_t* aWriteCount) { SourceBuffer* sourceBuffer = static_cast<SourceBuffer*>(aClosure); // Copy the source data. Unless we hit OOM, we squelch the return value here, // because returning an error means that ReadSegments stops reading data, and // we want to ensure that we read everything we get. If we hit OOM then we // return a failed status to the caller. nsresult rv = sourceBuffer->Append(aFromRawSegment, aCount); if (rv == NS_ERROR_OUT_OF_MEMORY) { return rv; } // Report that we wrote everything we got. *aWriteCount = aCount; return NS_OK; }
void MediaSource::onTrackChanged(TrackBase* track) { DCHECK(RuntimeEnabledFeatures::audioVideoTracksEnabled()); SourceBuffer* sourceBuffer = SourceBufferTrackBaseSupplement::sourceBuffer(*track); if (!sourceBuffer) return; DCHECK(m_sourceBuffers->contains(sourceBuffer)); if (track->type() == WebMediaPlayer::AudioTrack) { sourceBuffer->audioTracks().scheduleChangeEvent(); } else if (track->type() == WebMediaPlayer::VideoTrack) { if (static_cast<VideoTrack*>(track)->selected()) sourceBuffer->videoTracks().trackSelected(track->id()); sourceBuffer->videoTracks().scheduleChangeEvent(); } bool isActive = (sourceBuffer->videoTracks().selectedIndex() != -1) || sourceBuffer->audioTracks().hasEnabledTrack(); setSourceBufferActive(sourceBuffer, isActive); }
void MediaSource::removeSourceBuffer(SourceBuffer& buffer, ExceptionCode& ec) { LOG(MediaSource, "MediaSource::removeSourceBuffer() %p", this); Ref<SourceBuffer> protect(buffer); // 2. If sourceBuffer specifies an object that is not in sourceBuffers then // throw a NOT_FOUND_ERR exception and abort these steps. if (!m_sourceBuffers->length() || !m_sourceBuffers->contains(buffer)) { ec = NOT_FOUND_ERR; return; } // 3. If the sourceBuffer.updating attribute equals true, then run the following steps: ... buffer.abortIfUpdating(); // 4. Let SourceBuffer audioTracks list equal the AudioTrackList object returned by sourceBuffer.audioTracks. RefPtr<AudioTrackList> audioTracks = buffer.audioTracks(); // 5. If the SourceBuffer audioTracks list is not empty, then run the following steps: if (audioTracks->length()) { // 5.1 Let HTMLMediaElement audioTracks list equal the AudioTrackList object returned by the audioTracks // attribute on the HTMLMediaElement. // 5.2 Let the removed enabled audio track flag equal false. bool removedEnabledAudioTrack = false; // 5.3 For each AudioTrack object in the SourceBuffer audioTracks list, run the following steps: while (audioTracks->length()) { auto& track = *audioTracks->lastItem(); // 5.3.1 Set the sourceBuffer attribute on the AudioTrack object to null. track.setSourceBuffer(nullptr); // 5.3.2 If the enabled attribute on the AudioTrack object is true, then set the removed enabled // audio track flag to true. if (track.enabled()) removedEnabledAudioTrack = true; // 5.3.3 Remove the AudioTrack object from the HTMLMediaElement audioTracks list. // 5.3.4 Queue a task to fire a trusted event named removetrack, that does not bubble and is not // cancelable, and that uses the TrackEvent interface, at the HTMLMediaElement audioTracks list. if (mediaElement()) mediaElement()->removeAudioTrack(track); // 5.3.5 Remove the AudioTrack object from the SourceBuffer audioTracks list. // 5.3.6 Queue a task to fire a trusted event named removetrack, that does not bubble and is not // cancelable, and that uses the TrackEvent interface, at the SourceBuffer audioTracks list. audioTracks->remove(track); } // 5.4 If the removed enabled audio track flag equals true, then queue a task to fire a simple event // named change at the HTMLMediaElement audioTracks list. if (removedEnabledAudioTrack) mediaElement()->audioTracks().scheduleChangeEvent(); } // 6. Let SourceBuffer videoTracks list equal the VideoTrackList object returned by sourceBuffer.videoTracks. RefPtr<VideoTrackList> videoTracks = buffer.videoTracks(); // 7. If the SourceBuffer videoTracks list is not empty, then run the following steps: if (videoTracks->length()) { // 7.1 Let HTMLMediaElement videoTracks list equal the VideoTrackList object returned by the videoTracks // attribute on the HTMLMediaElement. // 7.2 Let the removed selected video track flag equal false. bool removedSelectedVideoTrack = false; // 7.3 For each VideoTrack object in the SourceBuffer videoTracks list, run the following steps: while (videoTracks->length()) { auto& track = *videoTracks->lastItem(); // 7.3.1 Set the sourceBuffer attribute on the VideoTrack object to null. track.setSourceBuffer(nullptr); // 7.3.2 If the selected attribute on the VideoTrack object is true, then set the removed selected // video track flag to true. if (track.selected()) removedSelectedVideoTrack = true; // 7.3.3 Remove the VideoTrack object from the HTMLMediaElement videoTracks list. // 7.3.4 Queue a task to fire a trusted event named removetrack, that does not bubble and is not // cancelable, and that uses the TrackEvent interface, at the HTMLMediaElement videoTracks list. if (mediaElement()) mediaElement()->removeVideoTrack(track); // 7.3.5 Remove the VideoTrack object from the SourceBuffer videoTracks list. // 7.3.6 Queue a task to fire a trusted event named removetrack, that does not bubble and is not // cancelable, and that uses the TrackEvent interface, at the SourceBuffer videoTracks list. videoTracks->remove(track); } // 7.4 If the removed selected video track flag equals true, then queue a task to fire a simple event // named change at the HTMLMediaElement videoTracks list. if (removedSelectedVideoTrack) mediaElement()->videoTracks().scheduleChangeEvent(); } // 8. Let SourceBuffer textTracks list equal the TextTrackList object returned by sourceBuffer.textTracks. RefPtr<TextTrackList> textTracks = buffer.textTracks(); // 9. If the SourceBuffer textTracks list is not empty, then run the following steps: if (textTracks->length()) { // 9.1 Let HTMLMediaElement textTracks list equal the TextTrackList object returned by the textTracks // attribute on the HTMLMediaElement. // 9.2 Let the removed enabled text track flag equal false. bool removedEnabledTextTrack = false; // 9.3 For each TextTrack object in the SourceBuffer textTracks list, run the following steps: while (textTracks->length()) { auto& track = *textTracks->lastItem(); // 9.3.1 Set the sourceBuffer attribute on the TextTrack object to null. track.setSourceBuffer(nullptr); // 9.3.2 If the mode attribute on the TextTrack object is set to "showing" or "hidden", then // set the removed enabled text track flag to true. if (track.mode() == TextTrack::Mode::Showing || track.mode() == TextTrack::Mode::Hidden) removedEnabledTextTrack = true; // 9.3.3 Remove the TextTrack object from the HTMLMediaElement textTracks list. // 9.3.4 Queue a task to fire a trusted event named removetrack, that does not bubble and is not // cancelable, and that uses the TrackEvent interface, at the HTMLMediaElement textTracks list. if (mediaElement()) mediaElement()->removeTextTrack(track); // 9.3.5 Remove the TextTrack object from the SourceBuffer textTracks list. // 9.3.6 Queue a task to fire a trusted event named removetrack, that does not bubble and is not // cancelable, and that uses the TrackEvent interface, at the SourceBuffer textTracks list. textTracks->remove(track); } // 9.4 If the removed enabled text track flag equals true, then queue a task to fire a simple event // named change at the HTMLMediaElement textTracks list. if (removedEnabledTextTrack) mediaElement()->textTracks().scheduleChangeEvent(); } // 10. If sourceBuffer is in activeSourceBuffers, then remove sourceBuffer from activeSourceBuffers ... m_activeSourceBuffers->remove(buffer); // 11. Remove sourceBuffer from sourceBuffers and fire a removesourcebuffer event // on that object. m_sourceBuffers->remove(buffer); // 12. Destroy all resources for sourceBuffer. buffer.removedFromMediaSource(); }
TEST_F(SourceBufferTest, Lf) { const char16 text[] = L"line1\nline2\nline3\n"; SourceBuffer buffer; buffer.Add(text, ::lstrlen(text)); EXPECT_EQ(String("line2"), buffer.GetLine(1)); }
SourceBuffer* SourceBuffer::create(PassOwnPtr<WebSourceBuffer> webSourceBuffer, MediaSource* source, GenericEventQueue* asyncEventQueue) { SourceBuffer* sourceBuffer = new SourceBuffer(webSourceBuffer, source, asyncEventQueue); sourceBuffer->suspendIfNeeded(); return sourceBuffer; }