void VideoCodecStatistics::EndOfCallStats() { #ifdef MOZILLA_INTERNAL_API if (!mFirstDecodeTime.IsNull()) { TimeDuration callDelta = TimeStamp::Now() - mFirstDecodeTime; if (callDelta.ToSeconds() != 0) { uint32_t recovered_per_min = mRecoveredBeforeLoss/(callDelta.ToSeconds()/60); CSFLogError(logTag, "Video recovery before error per min %u", recovered_per_min); #if !defined(MOZILLA_XPCOMRT_API) Telemetry::Accumulate(Telemetry::WEBRTC_VIDEO_RECOVERY_BEFORE_ERROR_PER_MIN, recovered_per_min); #endif // !defined(MOZILLA_XPCOMRT_API) uint32_t err_per_min = mRecoveredLosses/(callDelta.ToSeconds()/60); CSFLogError(logTag, "Video recovery after error per min %u", err_per_min); #if !defined(MOZILLA_XPCOMRT_API) Telemetry::Accumulate(Telemetry::WEBRTC_VIDEO_RECOVERY_AFTER_ERROR_PER_MIN, err_per_min); #endif // !defined(MOZILLA_XPCOMRT_API) float percent = (mTotalLossTime.ToSeconds()*100)/callDelta.ToSeconds(); CSFLogError(logTag, "Video error time percentage %f%%", percent); #if !defined(MOZILLA_XPCOMRT_API) Telemetry::Accumulate(Telemetry::WEBRTC_VIDEO_DECODE_ERROR_TIME_PERMILLE, static_cast<uint32_t>(percent*10)); #endif // !defined(MOZILLA_XPCOMRT_API) } } #endif }
// Iterate the same way we do in GetFPS() int FPSCounter::BuildHistogram(std::map<int, int>& aFpsData) { TimeStamp currentIntervalStart = GetLatestTimeStamp(); TimeStamp currentTimeStamp = GetLatestTimeStamp(); TimeStamp startTimeStamp = GetLatestTimeStamp(); int frameCount = 0; int totalFrameCount = 0; ResetReverseIterator(); while (HasNext(startTimeStamp)) { currentTimeStamp = GetNextTimeStamp(); TimeDuration interval = currentIntervalStart - currentTimeStamp; if (interval.ToSeconds() >= 1.0 ) { currentIntervalStart = currentTimeStamp; aFpsData[frameCount]++; frameCount = 0; } frameCount++; totalFrameCount++; } TimeDuration totalTime = currentIntervalStart - currentTimeStamp; printf_stderr("Discarded %d frames over %f ms in histogram for %s\n", frameCount, totalTime.ToMilliseconds(), mFPSName); return totalFrameCount; }
double ElementPropertyTransition::ValuePortionFor(TimeStamp aRefreshTime) const { // Set |timePortion| to the portion of the way we are through the time // input to the transition's timing function (always within the range // 0-1). double duration = mDuration.ToSeconds(); NS_ABORT_IF_FALSE(duration >= 0.0, "negative duration forbidden"); double timePortion; if (duration == 0.0) { // When duration is zero, we can still have a transition when delay // is nonzero. mStartTime already incorporates delay. if (aRefreshTime >= mStartTime) { timePortion = 1.0; } else { timePortion = 0.0; } } else { timePortion = (aRefreshTime - mStartTime).ToSeconds() / duration; if (timePortion < 0.0) timePortion = 0.0; // use start value during transition-delay if (timePortion > 1.0) timePortion = 1.0; // we might be behind on flushing } return mTimingFunction.GetValue(timePortion); }
void AxisPhysicsModel::Simulate(const TimeDuration& aDeltaTime) { for(mProgress += aDeltaTime.ToSeconds() / kFixedTimestep; mProgress > 1.0; mProgress -= 1.0) { Integrate(kFixedTimestep); } }
// Returns true if we iterated over a full interval of data bool FPSCounter::IteratedFullInterval(TimeStamp aTimestamp, double aDuration) { MOZ_ASSERT(mIteratorIndex >= 0, "Cannot be negative"); MOZ_ASSERT(mIteratorIndex < kMaxFrames, "Iterator index cannot be greater than kMaxFrames"); TimeStamp currentStamp = mFrameTimestamps[mIteratorIndex]; TimeDuration duration = aTimestamp - currentStamp; return duration.ToSeconds() >= aDuration; }
void FunctionTimerLog::LogString(const char *str) { if (mFile) { mLatest = TimeStamp::Now(); TimeDuration elapsed = mLatest - sAppStart; fprintf((FILE*)mFile, "[% 9.2f] %s\n", elapsed.ToSeconds() * 1000.0, str); } }
void BenchmarkPlayback::DrainComplete() { RefPtr<Benchmark> ref(mMainThreadState); Dispatch(NS_NewRunnableFunction([this, ref]() { int32_t frames = mFrameCount - ref->mParameters.mStartupFrame; TimeDuration elapsedTime = TimeStamp::Now() - mDecodeStartTime; uint32_t decodeFps = frames / elapsedTime.ToSeconds(); MainThreadShutdown(); ref->Dispatch(NS_NewRunnableFunction([ref, decodeFps]() { ref->ReturnResult(decodeFps); })); })); }
void TestLatencyParent::Main() { TimeDuration resolution = TimeDuration::Resolution(); if (resolution.ToSeconds() > kTimingResolutionCutoff) { puts(" (skipping TestLatency, timing resolution is too poor)"); Close(); return; } printf(" timing resolution: %g seconds\n", resolution.ToSecondsSigDigits()); if (mozilla::ipc::LoggingEnabled()) NS_RUNTIMEABORT("you really don't want to log all IPC messages during this test, trust me"); PingPongTrial(); }
void Compositor::NotifyNotUsedAfterComposition(TextureHost* aTextureHost) { MOZ_ASSERT(!mIsDestroyed); mNotifyNotUsedAfterComposition.AppendElement(aTextureHost); // If Compositor holds many TextureHosts without compositing, // the TextureHosts should be flushed to reduce memory consumption. const int thresholdCount = 5; const double thresholdSec = 2.0f; if (mNotifyNotUsedAfterComposition.Length() > thresholdCount) { TimeDuration duration = TimeStamp::Now() - mLastCompositionEndTime; // Check if we could flush if (duration.ToSeconds() > thresholdSec) { FlushPendingNotifyNotUsed(); } } }
bool TextureSourceProvider::NotifyNotUsedAfterComposition(TextureHost* aTextureHost) { mNotifyNotUsedAfterComposition.AppendElement(aTextureHost); // If Compositor holds many TextureHosts without compositing, // the TextureHosts should be flushed to reduce memory consumption. const int thresholdCount = 5; const double thresholdSec = 2.0f; if (mNotifyNotUsedAfterComposition.Length() > thresholdCount) { TimeStamp lastCompositionEndTime = GetLastCompositionEndTime(); TimeDuration duration = lastCompositionEndTime ? TimeStamp::Now() - lastCompositionEndTime : TimeDuration(); // Check if we could flush if (duration.ToSeconds() > thresholdSec) { FlushPendingNotifyNotUsed(); } } return true; }
void VRDisplayOpenVR::StopPresentation() { if (!mIsPresenting) { return; } mVRCompositor->ClearLastSubmittedFrame(); mIsPresenting = false; const TimeDuration duration = TimeStamp::Now() - mTelemetry.mPresentationStart; Telemetry::Accumulate(Telemetry::WEBVR_USERS_VIEW_IN, 2); Telemetry::Accumulate(Telemetry::WEBVR_TIME_SPENT_VIEWING_IN_OPENVR, duration.ToMilliseconds()); ::vr::Compositor_CumulativeStats stats; mVRCompositor->GetCumulativeStats(&stats, sizeof(::vr::Compositor_CumulativeStats)); const uint32_t droppedFramesPerSec = (stats.m_nNumReprojectedFrames - mTelemetry.mLastDroppedFrameCount) / duration.ToSeconds(); Telemetry::Accumulate(Telemetry::WEBVR_DROPPED_FRAMES_IN_OPENVR, droppedFramesPerSec); }
static void HostDB_ClearEntry(PLDHashTable *table, PLDHashEntryHdr *entry) { nsHostDBEnt *he = static_cast<nsHostDBEnt*>(entry); MOZ_ASSERT(he, "nsHostDBEnt is null!"); nsHostRecord *hr = he->rec; MOZ_ASSERT(hr, "nsHostDBEnt has null host record!"); LOG(("Clearing cache db entry for host [%s].\n", hr->host)); #if defined(DEBUG) && defined(PR_LOGGING) { MutexAutoLock lock(hr->addr_info_lock); if (!hr->addr_info) { LOG(("No address info for host [%s].\n", hr->host)); } else { TimeDuration diff = hr->expiration - TimeStamp::NowLoRes(); LOG(("Record for [%s] expires in %f seconds.\n", hr->host, diff.ToSeconds())); NetAddrElement *addrElement = nullptr; char buf[kIPv6CStrBufSize]; do { if (!addrElement) { addrElement = hr->addr_info->mAddresses.getFirst(); } else { addrElement = addrElement->getNext(); } if (addrElement) { NetAddrToString(&addrElement->mAddress, buf, sizeof(buf)); LOG((" [%s]\n", buf)); } } while (addrElement); } } #endif NS_RELEASE(he->rec); }
void BenchmarkPlayback::Output(MediaData* aData) { RefPtr<Benchmark> ref(mMainThreadState); Dispatch(NS_NewRunnableFunction([this, ref]() { mFrameCount++; if (mFrameCount == ref->mParameters.mStartupFrame) { mDecodeStartTime = TimeStamp::Now(); } int32_t frames = mFrameCount - ref->mParameters.mStartupFrame; TimeDuration elapsedTime = TimeStamp::Now() - mDecodeStartTime; if (!mFinished && (frames == ref->mParameters.mFramesToMeasure || elapsedTime >= ref->mParameters.mTimeout)) { uint32_t decodeFps = frames / elapsedTime.ToSeconds(); MainThreadShutdown(); ref->Dispatch(NS_NewRunnableFunction([ref, decodeFps]() { ref->ReturnResult(decodeFps); })); } })); }
void BenchmarkPlayback::Output(MediaDataDecoder::DecodedData&& aResults) { MOZ_ASSERT(OnThread()); MOZ_ASSERT(!mFinished); RefPtr<Benchmark> ref(mGlobalState); mFrameCount += aResults.Length(); if (!mDecodeStartTime && mFrameCount >= ref->mParameters.mStartupFrame) { mDecodeStartTime = Some(TimeStamp::Now()); } TimeStamp now = TimeStamp::Now(); uint32_t frames = mFrameCount - ref->mParameters.mStartupFrame; TimeDuration elapsedTime = now - mDecodeStartTime.refOr(now); if (((frames == ref->mParameters.mFramesToMeasure) && mFrameCount > ref->mParameters.mStartupFrame && frames > 0) || elapsedTime >= ref->mParameters.mTimeout || mDrained) { uint32_t decodeFps = frames / elapsedTime.ToSeconds(); GlobalShutdown(); ref->Dispatch(NS_NewRunnableFunction( "BenchmarkPlayback::Output", [ref, decodeFps]() { ref->ReturnResult(decodeFps); })); } }
// Function to complusively shut down the system with a given mode when timeout. static void* ForceQuitWatchdog(void* aParamPtr) { watchdogParam_t* paramPtr = reinterpret_cast<watchdogParam_t*>(aParamPtr); if (paramPtr->timeoutSecs > 0 && paramPtr->timeoutSecs <= 30) { // If we shut down normally before the timeout, this thread will // be harmlessly reaped by the OS. TimeStamp deadline = (TimeStamp::Now() + TimeDuration::FromSeconds(paramPtr->timeoutSecs)); while (true) { TimeDuration remaining = (deadline - TimeStamp::Now()); int sleepSeconds = int(remaining.ToSeconds()); if (sleepSeconds <= 0) { break; } sleep(sleepSeconds); } } hal::ShutdownMode mode = paramPtr->mode; delete paramPtr; QuitHard(mode); return nullptr; }
static void GetChromeHangReport(Telemetry::ProcessedStack& aStack, int32_t& aSystemUptime, int32_t& aFirefoxUptime) { MOZ_ASSERT(winMainThreadHandle); // The thread we're about to suspend might have the alloc lock // so allocate ahead of time std::vector<uintptr_t> rawStack; rawStack.reserve(MAX_CALL_STACK_PCS); DWORD ret = ::SuspendThread(winMainThreadHandle); if (ret == -1) { return; } NS_StackWalk(ChromeStackWalker, /* skipFrames */ 0, /* maxFrames */ 0, reinterpret_cast<void*>(&rawStack), reinterpret_cast<uintptr_t>(winMainThreadHandle), nullptr); ret = ::ResumeThread(winMainThreadHandle); if (ret == -1) { return; } aStack = Telemetry::GetStackAndModules(rawStack); // Record system uptime (in minutes) at the time of the hang aSystemUptime = ((GetTickCount() / 1000) - (gTimeout * 2)) / 60; // Record Firefox uptime (in minutes) at the time of the hang bool error; TimeStamp processCreation = TimeStamp::ProcessCreation(error); if (!error) { TimeDuration td = TimeStamp::Now() - processCreation; aFirefoxUptime = (static_cast<int32_t>(td.ToSeconds()) - (gTimeout * 2)) / 60; } else { aFirefoxUptime = -1; } }
void nsHostResolver::OnLookupComplete(nsHostRecord *rec, nsresult status, AddrInfo *result) { // get the list of pending callbacks for this lookup, and notify // them that the lookup is complete. PRCList cbs; PR_INIT_CLIST(&cbs); { MutexAutoLock lock(mLock); // grab list of callbacks to notify MoveCList(rec->callbacks, cbs); // update record fields. We might have a rec->addr_info already if a // previous lookup result expired and we're reresolving it.. AddrInfo *old_addr_info; { MutexAutoLock lock(rec->addr_info_lock); old_addr_info = rec->addr_info; rec->addr_info = result; rec->addr_info_gencnt++; } delete old_addr_info; rec->expiration = TimeStamp::NowLoRes(); if (result) { rec->expiration += mMaxCacheLifetime; rec->negative = false; } else { rec->expiration += TimeDuration::FromSeconds(60); /* one minute for negative cache */ rec->negative = true; } rec->resolving = false; if (rec->usingAnyThread) { mActiveAnyThreadCount--; rec->usingAnyThread = false; } if (rec->addr_info && !mShutdown) { // add to mEvictionQ PR_APPEND_LINK(rec, &mEvictionQ); NS_ADDREF(rec); if (mEvictionQSize < mMaxCacheEntries) mEvictionQSize++; else { // remove first element on mEvictionQ nsHostRecord *head = static_cast<nsHostRecord *>(PR_LIST_HEAD(&mEvictionQ)); PR_REMOVE_AND_INIT_LINK(head); PL_DHashTableOperate(&mDB, (nsHostKey *) head, PL_DHASH_REMOVE); if (!head->negative) { // record the age of the entry upon eviction. TimeDuration age = TimeStamp::NowLoRes() - (head->expiration - mMaxCacheLifetime); Telemetry::Accumulate(Telemetry::DNS_CLEANUP_AGE, static_cast<uint32_t>(age.ToSeconds() / 60)); } // release reference to rec owned by mEvictionQ NS_RELEASE(head); } } } MOZ_EVENT_TRACER_DONE(rec, "net::dns::resolve"); if (!PR_CLIST_IS_EMPTY(&cbs)) { PRCList *node = cbs.next; while (node != &cbs) { nsResolveHostCallback *callback = static_cast<nsResolveHostCallback *>(node); node = node->next; callback->OnLookupComplete(this, rec, status); // NOTE: callback must not be dereferenced after this point!! } } NS_RELEASE(rec); }
/* This function tries to stick to portable C89 as much as possible * so that it can be easily copied into other applications */ void LayerManagerOGL::FPSState::DrawFPS(GLContext* context, CopyProgram* copyprog) { fcount++; int rate = 30; if (fcount >= rate) { TimeStamp now = TimeStamp::Now(); TimeDuration duration = now - last; last = now; fps = rate / duration.ToSeconds() + .5; fcount = 0; } GLint viewport[4]; context->fGetIntegerv(LOCAL_GL_VIEWPORT, viewport); static GLuint texture; if (!initialized) { // Bind the number of textures we need, in this case one. context->fGenTextures(1, &texture); context->fBindTexture(LOCAL_GL_TEXTURE_2D, texture); context->fTexParameteri(LOCAL_GL_TEXTURE_2D,LOCAL_GL_TEXTURE_MIN_FILTER,LOCAL_GL_NEAREST); context->fTexParameteri(LOCAL_GL_TEXTURE_2D,LOCAL_GL_TEXTURE_MAG_FILTER,LOCAL_GL_NEAREST); unsigned char text[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 0, 255, 255, 0, 0, 255, 255, 255, 0, 255, 255, 255, 0, 255, 0, 255, 0, 255, 255, 255, 0, 255, 255, 255, 0, 255, 255, 255, 0, 255, 255, 255, 0, 255, 255, 255, 0, 0, 255, 0, 255, 0, 0, 255, 0, 0, 0, 0, 255, 0, 0, 0, 255, 0, 255, 0, 255, 0, 255, 0, 0, 0, 255, 0, 0, 0, 0, 0, 255, 0, 255, 0, 255, 0, 255, 0, 255, 0, 0, 255, 0, 255, 0, 0, 255, 0, 0, 255, 255, 255, 0, 255, 255, 255, 0, 255, 255, 255, 0, 255, 255, 255, 0, 255, 255, 255, 0, 0, 0, 255, 0, 255, 255, 255, 0, 255, 255, 255, 0, 0, 255, 0, 255, 0, 0, 255, 0, 0, 255, 0, 0, 0, 0, 0, 255, 0, 0, 0, 255, 0, 0, 0, 255, 0, 255, 0, 255, 0, 0, 0, 255, 0, 255, 0, 255, 0, 0, 0, 255, 0, 0, 255, 255, 255, 0, 255, 255, 255, 0, 255, 255, 255, 0, 255, 255, 255, 0, 0, 0, 255, 0, 255, 255, 255, 0, 255, 255, 255, 0, 0, 0, 255, 0, 255, 255, 255, 0, 0, 0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; // convert from 8 bit to 32 bit so that don't have to write the text above out in 32 bit format // we rely on int being 32 bits unsigned int* buf = (unsigned int*)malloc(64 * 8 * 4); for (int i = 0; i < 7; i++) { for (int j = 0; j < 41; j++) { unsigned int purple = 0xfff000ff; unsigned int white = 0xffffffff; buf[i * 64 + j] = (text[i * 41 + j] == 0) ? purple : white; } } context->fTexImage2D(LOCAL_GL_TEXTURE_2D, 0, LOCAL_GL_RGBA, 64, 8, 0, LOCAL_GL_RGBA, LOCAL_GL_UNSIGNED_BYTE, buf); free(buf); initialized = true; } struct Vertex2D { float x,y; }; const Vertex2D vertices[] = { { -1.0f, 1.0f - 42.f / viewport[3] }, { -1.0f, 1.0f}, { -1.0f + 22.f / viewport[2], 1.0f - 42.f / viewport[3] }, { -1.0f + 22.f / viewport[2], 1.0f }, { -1.0f + 22.f / viewport[2], 1.0f - 42.f / viewport[3] }, { -1.0f + 22.f / viewport[2], 1.0f }, { -1.0f + 44.f / viewport[2], 1.0f - 42.f / viewport[3] }, { -1.0f + 44.f / viewport[2], 1.0f }, { -1.0f + 44.f / viewport[2], 1.0f - 42.f / viewport[3] }, { -1.0f + 44.f / viewport[2], 1.0f }, { -1.0f + 66.f / viewport[2], 1.0f - 42.f / viewport[3] }, { -1.0f + 66.f / viewport[2], 1.0f } }; int v1 = fps % 10; int v10 = (fps % 100) / 10; int v100 = (fps % 1000) / 100; // Feel free to comment these texture coordinates out and use one // of the ones below instead, or play around with your own values. const GLfloat texCoords[] = { (v100 * 4.f) / 64, 7.f / 8, (v100 * 4.f) / 64, 0.0f, (v100 * 4.f + 4) / 64, 7.f / 8, (v100 * 4.f + 4) / 64, 0.0f, (v10 * 4.f) / 64, 7.f / 8, (v10 * 4.f) / 64, 0.0f, (v10 * 4.f + 4) / 64, 7.f / 8, (v10 * 4.f + 4) / 64, 0.0f, (v1 * 4.f) / 64, 7.f / 8, (v1 * 4.f) / 64, 0.0f, (v1 * 4.f + 4) / 64, 7.f / 8, (v1 * 4.f + 4) / 64, 0.0f, }; // Turn necessary features on context->fEnable(LOCAL_GL_BLEND); context->fBlendFunc(LOCAL_GL_ONE, LOCAL_GL_SRC_COLOR); context->fActiveTexture(LOCAL_GL_TEXTURE0); context->fBindTexture(LOCAL_GL_TEXTURE_2D, texture); copyprog->Activate(); copyprog->SetTextureUnit(0); // we're going to use client-side vertex arrays for this. context->fBindBuffer(LOCAL_GL_ARRAY_BUFFER, 0); // "COPY" context->fBlendFuncSeparate(LOCAL_GL_ONE, LOCAL_GL_ZERO, LOCAL_GL_ONE, LOCAL_GL_ZERO); // enable our vertex attribs; we'll call glVertexPointer below // to fill with the correct data. GLint vcattr = copyprog->AttribLocation(CopyProgram::VertexCoordAttrib); GLint tcattr = copyprog->AttribLocation(CopyProgram::TexCoordAttrib); context->fEnableVertexAttribArray(vcattr); context->fEnableVertexAttribArray(tcattr); context->fVertexAttribPointer(vcattr, 2, LOCAL_GL_FLOAT, LOCAL_GL_FALSE, 0, vertices); context->fVertexAttribPointer(tcattr, 2, LOCAL_GL_FLOAT, LOCAL_GL_FALSE, 0, texCoords); context->fDrawArrays(LOCAL_GL_TRIANGLE_STRIP, 0, 12); }
// Returns true if we captured a full interval of data bool FPSCounter::CapturedFullInterval(TimeStamp aTimestamp) { TimeDuration duration = aTimestamp - mLastInterval; return duration.ToSeconds() >= kFpsDumpInterval; }
NS_IMETHODIMP nsWaveStateMachine::Run() { // Monitor is held by this thread almost permanently, but must be manually // dropped during long operations to prevent the main thread from blocking // when calling methods on the state machine object. nsAutoMonitor monitor(mMonitor); for (;;) { switch (mState) { case STATE_LOADING_METADATA: { monitor.Exit(); PRBool loaded = LoadRIFFChunk() && LoadFormatChunk() && FindDataOffset(); monitor.Enter(); if (!loaded) { ChangeState(STATE_ERROR); } if (mState == STATE_LOADING_METADATA) { mMetadataValid = PR_TRUE; if (mNextState != STATE_SEEKING) { nsCOMPtr<nsIRunnable> event = NS_NewRunnableMethod(mDecoder, &nsWaveDecoder::MetadataLoaded); NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL); } ChangeState(mNextState); } } break; case STATE_BUFFERING: { TimeStamp now = TimeStamp::Now(); if (now - mBufferingStart < mBufferingWait && mStream->GetCachedDataEnd(mPlaybackPosition) < mBufferingEndOffset && !mStream->IsDataCachedToEndOfStream(mPlaybackPosition) && !mStream->IsSuspendedByCache()) { LOG(PR_LOG_DEBUG, ("In buffering: buffering data until %d bytes available or %f seconds\n", PRUint32(mBufferingEndOffset - mStream->GetCachedDataEnd(mPlaybackPosition)), (mBufferingWait - (now - mBufferingStart)).ToSeconds())); monitor.Wait(PR_MillisecondsToInterval(1000)); } else { ChangeState(mNextState); UpdateReadyState(); } break; } case STATE_PLAYING: { if (!mAudioStream) { OpenAudioStream(); if (!mAudioStream) { ChangeState(STATE_ERROR); break; } } TimeStamp now = TimeStamp::Now(); TimeStamp lastWakeup = now - TimeDuration::FromMilliseconds(AUDIO_BUFFER_LENGTH); do { TimeDuration sleepTime = now - lastWakeup; lastWakeup = now; // We aim to have AUDIO_BUFFER_LENGTH milliseconds of audio // buffered, but only sleep for AUDIO_BUFFER_WAKEUP milliseconds // (waking early to refill before the backend underruns). Since we // wake early, we only buffer sleepTime milliseconds of audio since // there is still AUDIO_BUFFER_LENGTH - sleepTime milliseconds of // audio buffered. TimeDuration targetTime = TimeDuration::FromMilliseconds(AUDIO_BUFFER_LENGTH); if (sleepTime < targetTime) { targetTime = sleepTime; } PRInt64 len = TimeToBytes(double(targetTime.ToSeconds())); PRInt64 leftToPlay = GetDataLength() - (mPlaybackPosition - mWavePCMOffset); if (leftToPlay <= len) { len = leftToPlay; ChangeState(STATE_ENDED); } PRInt64 availableOffset = mStream->GetCachedDataEnd(mPlaybackPosition); // Don't buffer if we're at the end of the stream, or if the // load has been suspended by the cache (in the latter case // we need to advance playback to free up cache space). if (mState != STATE_ENDED && availableOffset < mPlaybackPosition + len && !mStream->IsSuspendedByCache()) { mBufferingStart = now; mBufferingEndOffset = mPlaybackPosition + TimeToBytes(double(mBufferingWait.ToSeconds())); mBufferingEndOffset = PR_MAX(mPlaybackPosition + len, mBufferingEndOffset); mNextState = mState; ChangeState(STATE_BUFFERING); UpdateReadyState(); break; } if (len > 0) { nsAutoArrayPtr<char> buf(new char[size_t(len)]); PRInt64 got = 0; monitor.Exit(); PRBool ok = ReadAll(buf.get(), len, &got); monitor.Enter(); // Reached EOF. if (!ok) { ChangeState(STATE_ENDED); if (got == 0) { break; } } // Calculate difference between the current media stream position // and the expected end of the PCM data. PRInt64 endDelta = mWavePCMOffset + mWaveLength - mPlaybackPosition; if (endDelta < 0) { // Read past the end of PCM data. Adjust got to avoid playing // back trailing data. got -= -endDelta; ChangeState(STATE_ENDED); } if (mState == STATE_ENDED) { got = RoundDownToSample(got); } PRUint32 sampleSize = mSampleFormat == nsAudioStream::FORMAT_U8 ? 1 : 2; NS_ABORT_IF_FALSE(got % sampleSize == 0, "Must write complete samples"); PRUint32 lengthInSamples = PRUint32(got / sampleSize); monitor.Exit(); mAudioStream->Write(buf.get(), lengthInSamples, PR_FALSE); monitor.Enter(); FirePositionChanged(PR_FALSE); } if (mState == STATE_PLAYING) { monitor.Wait(PR_MillisecondsToInterval(AUDIO_BUFFER_WAKEUP)); now = TimeStamp::Now(); } } while (mState == STATE_PLAYING); break; } case STATE_SEEKING: { CloseAudioStream(); mSeekTime = NS_MIN(mSeekTime, GetDuration()); double seekTime = mSeekTime; // Calculate relative offset within PCM data. PRInt64 position = RoundDownToSample(TimeToBytes(seekTime)); NS_ABORT_IF_FALSE(position >= 0 && position <= GetDataLength(), "Invalid seek position"); // Convert to absolute offset within stream. position += mWavePCMOffset; // If in the midst of a seek, report the requested seek time // as the current time as required by step 8 of 4.8.10.9 'Seeking' // in the WHATWG spec. PRInt64 oldPosition = mPlaybackPosition; mPlaybackPosition = position; FirePositionChanged(PR_TRUE); monitor.Exit(); nsCOMPtr<nsIRunnable> startEvent = NS_NewRunnableMethod(mDecoder, &nsWaveDecoder::SeekingStarted); NS_DispatchToMainThread(startEvent, NS_DISPATCH_SYNC); monitor.Enter(); if (mState == STATE_SHUTDOWN) { break; } monitor.Exit(); nsresult rv; rv = mStream->Seek(nsISeekableStream::NS_SEEK_SET, position); monitor.Enter(); if (NS_FAILED(rv)) { NS_WARNING("Seek failed"); mPlaybackPosition = oldPosition; FirePositionChanged(PR_TRUE); } if (mState == STATE_SHUTDOWN) { break; } if (mState == STATE_SEEKING && mSeekTime == seekTime) { // Special case #1: if a seek was requested during metadata load, // mNextState will have been clobbered. This can only happen when // we're instantiating a decoder to service a seek request after // playback has ended, so we know that the clobbered mNextState // was PAUSED. // Special case #2: if a seek is requested after the state machine // entered STATE_ENDED but before the user has seen the ended // event, playback has not ended as far as the user's // concerned--the state machine needs to return to the last // playback state. // Special case #3: if seeking to the end of the media, transition // directly into STATE_ENDED. State nextState = mNextState; if (nextState == STATE_SEEKING) { nextState = STATE_PAUSED; } else if (nextState == STATE_ENDED) { nextState = mPaused ? STATE_PAUSED : STATE_PLAYING; } else if (GetDuration() == seekTime) { nextState = STATE_ENDED; } ChangeState(nextState); } if (mState != STATE_SEEKING) { monitor.Exit(); nsCOMPtr<nsIRunnable> stopEvent = NS_NewRunnableMethod(mDecoder, &nsWaveDecoder::SeekingStopped); NS_DispatchToMainThread(stopEvent, NS_DISPATCH_SYNC); monitor.Enter(); } } break; case STATE_PAUSED: monitor.Wait(); break; case STATE_ENDED: FirePositionChanged(PR_TRUE); if (mAudioStream) { monitor.Exit(); mAudioStream->Drain(); monitor.Enter(); // After the drain call the audio stream is unusable. Close it so that // next time audio is used a new stream is created. CloseAudioStream(); } mPlaybackEnded = PR_TRUE; if (mState == STATE_ENDED) { nsCOMPtr<nsIRunnable> event = NS_NewRunnableMethod(mDecoder, &nsWaveDecoder::PlaybackEnded); NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL); // We've finished playback. Shutdown the state machine thread, // in order to save memory on thread stacks, particuarly on Linux. event = new ShutdownThreadEvent(mDecoder->mPlaybackThread); NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL); mDecoder->mPlaybackThread = nsnull; return NS_OK; } break; case STATE_ERROR: { nsCOMPtr<nsIRunnable> event = NS_NewRunnableMethod(mDecoder, &nsWaveDecoder::DecodeError); NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL); monitor.Wait(); if (mState != STATE_SHUTDOWN) { NS_WARNING("Invalid state transition"); ChangeState(STATE_ERROR); } } break; case STATE_SHUTDOWN: mPlaybackEnded = PR_TRUE; CloseAudioStream(); return NS_OK; } } return NS_OK; }
static bool SampleAnimations(Layer* aLayer, TimeStamp aPoint) { AnimationArray& animations = aLayer->GetAnimations(); InfallibleTArray<AnimData>& animationData = aLayer->GetAnimationData(); bool activeAnimations = false; for (uint32_t i = animations.Length(); i-- !=0; ) { Animation& animation = animations[i]; AnimData& animData = animationData[i]; activeAnimations = true; TimeDuration elapsedDuration = aPoint - animation.startTime(); // Skip animations that are yet to start. // // Currently, this should only happen when the refresh driver is under test // control and is made to produce a time in the past or is restored from // test control causing it to jump backwards in time. // // Since activeAnimations is true, this could mean we keep compositing // unnecessarily during the delay, but so long as this only happens while // the refresh driver is under test control that should be ok. if (elapsedDuration.ToSeconds() < 0) { continue; } AnimationTiming timing; timing.mIterationDuration = animation.duration(); // Currently animations run on the compositor have their delay factored // into their start time, hence the delay is effectively zero. timing.mDelay = TimeDuration(0); timing.mIterationCount = animation.iterationCount(); timing.mDirection = animation.direction(); // Animations typically only run on the compositor during their active // interval but if we end up sampling them outside that range (for // example, while they are waiting to be removed) we currently just // assume that we should fill. timing.mFillMode = NS_STYLE_ANIMATION_FILL_MODE_BOTH; ComputedTiming computedTiming = dom::Animation::GetComputedTimingAt( Nullable<TimeDuration>(elapsedDuration), timing); NS_ABORT_IF_FALSE(0.0 <= computedTiming.mTimeFraction && computedTiming.mTimeFraction <= 1.0, "time fraction should be in [0-1]"); int segmentIndex = 0; AnimationSegment* segment = animation.segments().Elements(); while (segment->endPortion() < computedTiming.mTimeFraction) { ++segment; ++segmentIndex; } double positionInSegment = (computedTiming.mTimeFraction - segment->startPortion()) / (segment->endPortion() - segment->startPortion()); double portion = animData.mFunctions[segmentIndex]->GetValue(positionInSegment); // interpolate the property Animatable interpolatedValue; SampleValue(portion, animation, animData.mStartValues[segmentIndex], animData.mEndValues[segmentIndex], &interpolatedValue); LayerComposite* layerComposite = aLayer->AsLayerComposite(); switch (animation.property()) { case eCSSProperty_opacity: { layerComposite->SetShadowOpacity(interpolatedValue.get_float()); break; } case eCSSProperty_transform: { Matrix4x4 matrix = interpolatedValue.get_ArrayOfTransformFunction()[0].get_TransformMatrix().value(); if (ContainerLayer* c = aLayer->AsContainerLayer()) { matrix = matrix * Matrix4x4().Scale(c->GetInheritedXScale(), c->GetInheritedYScale(), 1); } layerComposite->SetShadowTransform(matrix); layerComposite->SetShadowTransformSetByAnimation(true); break; } default: NS_WARNING("Unhandled animated property"); } } for (Layer* child = aLayer->GetFirstChild(); child; child = child->GetNextSibling()) { activeAnimations |= SampleAnimations(child, aPoint); } return activeAnimations; }