long AudioStream::GetUnprocessed(void* aBuffer, long aFrames, int64_t &aTimeMs) { mMonitor.AssertCurrentThreadOwns(); uint8_t* wpos = reinterpret_cast<uint8_t*>(aBuffer); // Flush the timestretcher pipeline, if we were playing using a playback rate // other than 1.0. uint32_t flushedFrames = 0; if (mTimeStretcher && mTimeStretcher->numSamples()) { flushedFrames = mTimeStretcher->receiveSamples(reinterpret_cast<AudioDataValue*>(wpos), aFrames); wpos += FramesToBytes(flushedFrames); } uint32_t toPopBytes = FramesToBytes(aFrames - flushedFrames); uint32_t available = std::min(toPopBytes, mBuffer.Length()); void* input[2]; uint32_t input_size[2]; mBuffer.PopElements(available, &input[0], &input_size[0], &input[1], &input_size[1]); memcpy(wpos, input[0], input_size[0]); wpos += input_size[0]; memcpy(wpos, input[1], input_size[1]); // First time block now has our first returned sample mReadPoint += BytesToFrames(available); GetBufferInsertTime(aTimeMs); return BytesToFrames(available) + flushedFrames; }
long AudioStream::GetTimeStretched(void* aBuffer, long aFrames, int64_t &aTimeMs) { mMonitor.AssertCurrentThreadOwns(); long processedFrames = 0; // We need to call the non-locking version, because we already have the lock. if (EnsureTimeStretcherInitializedUnlocked() != NS_OK) { return 0; } uint8_t* wpos = reinterpret_cast<uint8_t*>(aBuffer); double playbackRate = static_cast<double>(mInRate) / mOutRate; uint32_t toPopBytes = FramesToBytes(ceil(aFrames * playbackRate)); uint32_t available = 0; bool lowOnBufferedData = false; do { // Check if we already have enough data in the time stretcher pipeline. if (mTimeStretcher->numSamples() <= static_cast<uint32_t>(aFrames)) { void* input[2]; uint32_t input_size[2]; available = std::min(mBuffer.Length(), toPopBytes); if (available != toPopBytes) { lowOnBufferedData = true; } mBuffer.PopElements(available, &input[0], &input_size[0], &input[1], &input_size[1]); mReadPoint += BytesToFrames(available); for(uint32_t i = 0; i < 2; i++) { mTimeStretcher->putSamples(reinterpret_cast<AudioDataValue*>(input[i]), BytesToFrames(input_size[i])); } } uint32_t receivedFrames = mTimeStretcher->receiveSamples(reinterpret_cast<AudioDataValue*>(wpos), aFrames - processedFrames); wpos += FramesToBytes(receivedFrames); processedFrames += receivedFrames; } while (processedFrames < aFrames && !lowOnBufferedData); GetBufferInsertTime(aTimeMs); return processedFrames; }
// Get unprocessed samples, and pad the beginning of the buffer with silence if // there is not enough data. long AudioStream::GetUnprocessedWithSilencePadding(void* aBuffer, long aFrames, int64_t& aTimeMs) { uint32_t toPopBytes = FramesToBytes(aFrames); uint32_t available = std::min(toPopBytes, mBuffer.Length()); uint32_t silenceOffset = toPopBytes - available; uint8_t* wpos = reinterpret_cast<uint8_t*>(aBuffer); memset(wpos, 0, silenceOffset); wpos += silenceOffset; void* input[2]; uint32_t input_size[2]; mBuffer.PopElements(available, &input[0], &input_size[0], &input[1], &input_size[1]); memcpy(wpos, input[0], input_size[0]); wpos += input_size[0]; memcpy(wpos, input[1], input_size[1]); GetBufferInsertTime(aTimeMs); return aFrames; }
long AudioStream::DataCallback(void* aBuffer, long aFrames) { MonitorAutoLock mon(mMonitor); MOZ_ASSERT(mState != SHUTDOWN, "No data callback after shutdown"); uint32_t available = std::min(static_cast<uint32_t>(FramesToBytes(aFrames)), mBuffer.Length()); NS_ABORT_IF_FALSE(available % mBytesPerFrame == 0, "Must copy complete frames"); AudioDataValue* output = reinterpret_cast<AudioDataValue*>(aBuffer); uint32_t underrunFrames = 0; uint32_t servicedFrames = 0; int64_t insertTime; // NOTE: wasapi (others?) can call us back *after* stop()/Shutdown() (mState == SHUTDOWN) // Bug 996162 // callback tells us cubeb succeeded initializing if (mState == STARTED) { // For low-latency streams, we want to minimize any built-up data when // we start getting callbacks. // Simple version - contract on first callback only. if (mLatencyRequest == LowLatency) { #ifdef PR_LOGGING uint32_t old_len = mBuffer.Length(); #endif available = mBuffer.ContractTo(FramesToBytes(aFrames)); #ifdef PR_LOGGING TimeStamp now = TimeStamp::Now(); if (!mStartTime.IsNull()) { int64_t timeMs = (now - mStartTime).ToMilliseconds(); PR_LOG(gAudioStreamLog, PR_LOG_WARNING, ("Stream took %lldms to start after first Write() @ %u", timeMs, mOutRate)); } else { PR_LOG(gAudioStreamLog, PR_LOG_WARNING, ("Stream started before Write() @ %u", mOutRate)); } if (old_len != available) { // Note that we may have dropped samples in Write() as well! PR_LOG(gAudioStreamLog, PR_LOG_WARNING, ("AudioStream %p dropped %u + %u initial frames @ %u", this, mReadPoint, BytesToFrames(old_len - available), mOutRate)); mReadPoint += BytesToFrames(old_len - available); } #endif } mState = RUNNING; } if (available) { // When we are playing a low latency stream, and it is the first time we are // getting data from the buffer, we prefer to add the silence for an // underrun at the beginning of the buffer, so the first buffer is not cut // in half by the silence inserted to compensate for the underrun. if (mInRate == mOutRate) { if (mLatencyRequest == LowLatency && !mWritten) { servicedFrames = GetUnprocessedWithSilencePadding(output, aFrames, insertTime); } else { servicedFrames = GetUnprocessed(output, aFrames, insertTime); } } else { servicedFrames = GetTimeStretched(output, aFrames, insertTime); } float scaled_volume = float(GetVolumeScale() * mVolume); ScaleAudioSamples(output, aFrames * mOutChannels, scaled_volume); NS_ABORT_IF_FALSE(mBuffer.Length() % mBytesPerFrame == 0, "Must copy complete frames"); // Notify any blocked Write() call that more space is available in mBuffer. mon.NotifyAll(); } else { GetBufferInsertTime(insertTime); } underrunFrames = aFrames - servicedFrames; // Always send audible frames first, and silent frames later. // Otherwise it will break the assumption of FrameHistory. if (mState != DRAINING) { mAudioClock.UpdateFrameHistory(servicedFrames, underrunFrames); uint8_t* rpos = static_cast<uint8_t*>(aBuffer) + FramesToBytes(aFrames - underrunFrames); memset(rpos, 0, FramesToBytes(underrunFrames)); if (underrunFrames) { PR_LOG(gAudioStreamLog, PR_LOG_WARNING, ("AudioStream %p lost %d frames", this, underrunFrames)); } servicedFrames += underrunFrames; } else { mAudioClock.UpdateFrameHistory(servicedFrames, 0); } WriteDumpFile(mDumpFile, this, aFrames, aBuffer); // Don't log if we're not interested or if the stream is inactive if (PR_LOG_TEST(GetLatencyLog(), PR_LOG_DEBUG) && mState != SHUTDOWN && insertTime != INT64_MAX && servicedFrames > underrunFrames) { uint32_t latency = UINT32_MAX; if (cubeb_stream_get_latency(mCubebStream, &latency)) { NS_WARNING("Could not get latency from cubeb."); } TimeStamp now = TimeStamp::Now(); mLatencyLog->Log(AsyncLatencyLogger::AudioStream, reinterpret_cast<uint64_t>(this), insertTime, now); mLatencyLog->Log(AsyncLatencyLogger::Cubeb, reinterpret_cast<uint64_t>(mCubebStream.get()), (latency * 1000) / mOutRate, now); } return servicedFrames; }
long AudioStream::DataCallback(void* aBuffer, long aFrames) { MonitorAutoLock mon(mMonitor); uint32_t available = std::min(static_cast<uint32_t>(FramesToBytes(aFrames)), mBuffer.Length()); NS_ABORT_IF_FALSE(available % mBytesPerFrame == 0, "Must copy complete frames"); AudioDataValue* output = reinterpret_cast<AudioDataValue*>(aBuffer); uint32_t underrunFrames = 0; uint32_t servicedFrames = 0; int64_t insertTime; if (available) { // When we are playing a low latency stream, and it is the first time we are // getting data from the buffer, we prefer to add the silence for an // underrun at the beginning of the buffer, so the first buffer is not cut // in half by the silence inserted to compensate for the underrun. if (mInRate == mOutRate) { if (mLatencyRequest == LowLatency && !mWritten) { servicedFrames = GetUnprocessedWithSilencePadding(output, aFrames, insertTime); } else { servicedFrames = GetUnprocessed(output, aFrames, insertTime); } } else { servicedFrames = GetTimeStretched(output, aFrames, insertTime); } float scaled_volume = float(GetVolumeScale() * mVolume); ScaleAudioSamples(output, aFrames * mOutChannels, scaled_volume); NS_ABORT_IF_FALSE(mBuffer.Length() % mBytesPerFrame == 0, "Must copy complete frames"); // Notify any blocked Write() call that more space is available in mBuffer. mon.NotifyAll(); } else { GetBufferInsertTime(insertTime); } underrunFrames = aFrames - servicedFrames; if (mState != DRAINING) { uint8_t* rpos = static_cast<uint8_t*>(aBuffer) + FramesToBytes(aFrames - underrunFrames); memset(rpos, 0, FramesToBytes(underrunFrames)); if (underrunFrames) { PR_LOG(gAudioStreamLog, PR_LOG_WARNING, ("AudioStream %p lost %d frames", this, underrunFrames)); } mLostFrames += underrunFrames; servicedFrames += underrunFrames; } WriteDumpFile(mDumpFile, this, aFrames, aBuffer); // Don't log if we're not interested or if the stream is inactive if (PR_LOG_TEST(GetLatencyLog(), PR_LOG_DEBUG) && insertTime != INT64_MAX && servicedFrames > underrunFrames) { uint32_t latency = UINT32_MAX; if (cubeb_stream_get_latency(mCubebStream, &latency)) { NS_WARNING("Could not get latency from cubeb."); } TimeStamp now = TimeStamp::Now(); mLatencyLog->Log(AsyncLatencyLogger::AudioStream, reinterpret_cast<uint64_t>(this), insertTime, now); mLatencyLog->Log(AsyncLatencyLogger::Cubeb, reinterpret_cast<uint64_t>(mCubebStream.get()), (latency * 1000) / mOutRate, now); } mAudioClock.UpdateWritePosition(servicedFrames); return servicedFrames; }