long AudioStream::GetUnprocessed(void* aBuffer, long aFrames, int64_t &aTimeMs) { mMonitor.AssertCurrentThreadOwns(); uint8_t* wpos = reinterpret_cast<uint8_t*>(aBuffer); // Flush the timestretcher pipeline, if we were playing using a playback rate // other than 1.0. uint32_t flushedFrames = 0; if (mTimeStretcher && mTimeStretcher->numSamples()) { flushedFrames = mTimeStretcher->receiveSamples(reinterpret_cast<AudioDataValue*>(wpos), aFrames); wpos += FramesToBytes(flushedFrames); } uint32_t toPopBytes = FramesToBytes(aFrames - flushedFrames); uint32_t available = std::min(toPopBytes, mBuffer.Length()); void* input[2]; uint32_t input_size[2]; mBuffer.PopElements(available, &input[0], &input_size[0], &input[1], &input_size[1]); memcpy(wpos, input[0], input_size[0]); wpos += input_size[0]; memcpy(wpos, input[1], input_size[1]); // First time block now has our first returned sample mReadPoint += BytesToFrames(available); GetBufferInsertTime(aTimeMs); return BytesToFrames(available) + flushedFrames; }
uint32_t AudioStream::Available() { MonitorAutoLock mon(mMonitor); MOZ_ASSERT(mBuffer.Length() % mBytesPerFrame == 0, "Buffer invariant violated."); return BytesToFrames(mBuffer.Available()); }
uint32_t BufferedAudioStream::Available() { MonitorAutoLock mon(mMonitor); NS_ABORT_IF_FALSE(mBuffer.Length() % mBytesPerFrame == 0, "Buffer invariant violated."); return BytesToFrames(mBuffer.Available()); }
void ca_Flush(audio_output_t *p_aout, bool wait) { struct aout_sys_common *p_sys = (struct aout_sys_common *) p_aout->sys; if (wait) { int32_t i_bytes; while (TPCircularBufferTail(&p_sys->circular_buffer, &i_bytes) != NULL) { /* Calculate the duration of the circular buffer, in order to wait * for the render thread to play it all */ const mtime_t i_frame_us = FramesToUs(p_sys, BytesToFrames(p_sys, i_bytes)) + 10000; msleep(i_frame_us / 2); } } else { /* flush circular buffer if data is left */ TPCircularBufferClear(&p_sys->circular_buffer); } }
static int UpdatePlayer(StreamPlayer *player) { ALint processed, state; /* Get relevant source info */ alGetSourcei(player->source, AL_SOURCE_STATE, &state); alGetSourcei(player->source, AL_BUFFERS_PROCESSED, &processed); if(alGetError() != AL_NO_ERROR) { fprintf(stderr, "Error checking source state\n"); return 0; } /* Unqueue and handle each processed buffer */ while(processed > 0) { ALuint bufid; size_t got; alSourceUnqueueBuffers(player->source, 1, &bufid); processed--; /* Read the next chunk of data, refill the buffer, and queue it * back on the source */ got = readAVAudioData(player->stream, player->data, player->datasize); if(got > 0) { alBufferSamplesSOFT(bufid, player->rate, player->format, BytesToFrames(got, player->channels, player->type), player->channels, player->type, player->data); alSourceQueueBuffers(player->source, 1, &bufid); } if(alGetError() != AL_NO_ERROR) { fprintf(stderr, "Error buffering data\n"); return 0; } } /* Make sure the source hasn't underrun */ if(state != AL_PLAYING && state != AL_PAUSED) { ALint queued; /* If no buffers are queued, playback is finished */ alGetSourcei(player->source, AL_BUFFERS_QUEUED, &queued); if(queued == 0) return 0; alSourcePlay(player->source); if(alGetError() != AL_NO_ERROR) { fprintf(stderr, "Error restarting playback\n"); return 0; } } return 1; }
long AudioStream::GetTimeStretched(void* aBuffer, long aFrames, int64_t &aTimeMs) { mMonitor.AssertCurrentThreadOwns(); long processedFrames = 0; // We need to call the non-locking version, because we already have the lock. if (EnsureTimeStretcherInitializedUnlocked() != NS_OK) { return 0; } uint8_t* wpos = reinterpret_cast<uint8_t*>(aBuffer); double playbackRate = static_cast<double>(mInRate) / mOutRate; uint32_t toPopBytes = FramesToBytes(ceil(aFrames * playbackRate)); uint32_t available = 0; bool lowOnBufferedData = false; do { // Check if we already have enough data in the time stretcher pipeline. if (mTimeStretcher->numSamples() <= static_cast<uint32_t>(aFrames)) { void* input[2]; uint32_t input_size[2]; available = std::min(mBuffer.Length(), toPopBytes); if (available != toPopBytes) { lowOnBufferedData = true; } mBuffer.PopElements(available, &input[0], &input_size[0], &input[1], &input_size[1]); mReadPoint += BytesToFrames(available); for(uint32_t i = 0; i < 2; i++) { mTimeStretcher->putSamples(reinterpret_cast<AudioDataValue*>(input[i]), BytesToFrames(input_size[i])); } } uint32_t receivedFrames = mTimeStretcher->receiveSamples(reinterpret_cast<AudioDataValue*>(wpos), aFrames - processedFrames); wpos += FramesToBytes(receivedFrames); processedFrames += receivedFrames; } while (processedFrames < aFrames && !lowOnBufferedData); GetBufferInsertTime(aTimeMs); return processedFrames; }
int ca_TimeGet(audio_output_t *p_aout, mtime_t *delay) { struct aout_sys_common *p_sys = (struct aout_sys_common *) p_aout->sys; int32_t i_bytes; TPCircularBufferTail(&p_sys->circular_buffer, &i_bytes); int64_t i_frames = BytesToFrames(p_sys, i_bytes); *delay = FramesToUs(p_sys, i_frames) + p_sys->i_dev_latency_us; return 0; }
void ca_Play(audio_output_t * p_aout, block_t * p_block) { struct aout_sys_common *p_sys = (struct aout_sys_common *) p_aout->sys; /* Do the channel reordering */ if (p_sys->chans_to_reorder) aout_ChannelReorder(p_block->p_buffer, p_block->i_buffer, p_sys->chans_to_reorder, p_sys->chan_table, VLC_CODEC_FL32); /* move data to buffer */ while (!TPCircularBufferProduceBytes(&p_sys->circular_buffer, p_block->p_buffer, p_block->i_buffer)) { if (atomic_load_explicit(&p_sys->b_paused, memory_order_relaxed)) { msg_Warn(p_aout, "dropping block because the circular buffer is " "full and paused"); break; } /* Try to play what we can */ int32_t i_avalaible_bytes; TPCircularBufferHead(&p_sys->circular_buffer, &i_avalaible_bytes); assert(i_avalaible_bytes >= 0); if (unlikely((size_t) i_avalaible_bytes >= p_block->i_buffer)) continue; bool ret = TPCircularBufferProduceBytes(&p_sys->circular_buffer, p_block->p_buffer, i_avalaible_bytes); assert(ret == true); p_block->p_buffer += i_avalaible_bytes; p_block->i_buffer -= i_avalaible_bytes; /* Wait for the render buffer to play the remaining data */ const mtime_t i_frame_us = FramesToUs(p_sys, BytesToFrames(p_sys, p_block->i_buffer)); msleep(i_frame_us / 2); } unsigned i_underrun_size = atomic_exchange(&p_sys->i_underrun_size, 0); if (i_underrun_size > 0) msg_Warn(p_aout, "underrun of %u bytes", i_underrun_size); block_Release(p_block); }
long BufferedAudioStream::GetUnprocessed(void* aBuffer, long aFrames) { uint8_t* wpos = reinterpret_cast<uint8_t*>(aBuffer); // Flush the timestretcher pipeline, if we were playing using a playback rate // other than 1.0. uint32_t flushedFrames = 0; if (mTimeStretcher && mTimeStretcher->numSamples()) { flushedFrames = mTimeStretcher->receiveSamples(reinterpret_cast<AudioDataValue*>(wpos), aFrames); wpos += FramesToBytes(flushedFrames); } uint32_t toPopBytes = FramesToBytes(aFrames - flushedFrames); uint32_t available = std::min(toPopBytes, mBuffer.Length()); void* input[2]; uint32_t input_size[2]; mBuffer.PopElements(available, &input[0], &input_size[0], &input[1], &input_size[1]); memcpy(wpos, input[0], input_size[0]); wpos += input_size[0]; memcpy(wpos, input[1], input_size[1]); return BytesToFrames(available) + flushedFrames; }
/* Prebuffers some audio from the file, and starts playing the source */ static int StartPlayer(StreamPlayer *player) { size_t i, got; /* Rewind the source position and clear the buffer queue */ alSourceRewind(player->source); alSourcei(player->source, AL_BUFFER, 0); /* Fill the buffer queue */ for(i = 0;i < NUM_BUFFERS;i++) { /* Get some data to give it to the buffer */ got = readAVAudioData(player->stream, player->data, player->datasize); if(got == 0) break; alBufferSamplesSOFT(player->buffers[i], player->rate, player->format, BytesToFrames(got, player->channels, player->type), player->channels, player->type, player->data); } if(alGetError() != AL_NO_ERROR) { fprintf(stderr, "Error buffering for playback\n"); return 0; } /* Now queue and start playback! */ alSourceQueueBuffers(player->source, i, player->buffers); alSourcePlay(player->source); if(alGetError() != AL_NO_ERROR) { fprintf(stderr, "Error starting playback\n"); return 0; } return 1; }
// aTime is the time in ms the samples were inserted into MediaStreamGraph nsresult AudioStream::Write(const AudioDataValue* aBuf, uint32_t aFrames, TimeStamp *aTime) { MonitorAutoLock mon(mMonitor); if (mState == ERRORED) { return NS_ERROR_FAILURE; } NS_ASSERTION(mState == INITIALIZED || mState == STARTED || mState == RUNNING, "Stream write in unexpected state."); // See if we need to start() the stream, since we must do that from this thread CheckForStart(); // Downmix to Stereo. if (mChannels > 2 && mChannels <= 8) { DownmixAudioToStereo(const_cast<AudioDataValue*> (aBuf), mChannels, aFrames); } else if (mChannels > 8) { return NS_ERROR_FAILURE; } const uint8_t* src = reinterpret_cast<const uint8_t*>(aBuf); uint32_t bytesToCopy = FramesToBytes(aFrames); // XXX this will need to change if we want to enable this on-the-fly! if (PR_LOG_TEST(GetLatencyLog(), PR_LOG_DEBUG)) { // Record the position and time this data was inserted int64_t timeMs; if (aTime && !aTime->IsNull()) { if (mStartTime.IsNull()) { AsyncLatencyLogger::Get(true)->GetStartTime(mStartTime); } timeMs = (*aTime - mStartTime).ToMilliseconds(); } else { timeMs = 0; } struct Inserts insert = { timeMs, aFrames}; mInserts.AppendElement(insert); } while (bytesToCopy > 0) { uint32_t available = std::min(bytesToCopy, mBuffer.Available()); NS_ABORT_IF_FALSE(available % mBytesPerFrame == 0, "Must copy complete frames."); mBuffer.AppendElements(src, available); src += available; bytesToCopy -= available; if (bytesToCopy > 0) { // Careful - the CubebInit thread may not have gotten to STARTED yet if ((mState == INITIALIZED || mState == STARTED) && mLatencyRequest == LowLatency) { // don't ever block MediaStreamGraph low-latency streams uint32_t remains = 0; // we presume the buffer is full if (mBuffer.Length() > bytesToCopy) { remains = mBuffer.Length() - bytesToCopy; // Free up just enough space } // account for dropping samples PR_LOG(gAudioStreamLog, PR_LOG_WARNING, ("Stream %p dropping %u bytes (%u frames)in Write()", this, mBuffer.Length() - remains, BytesToFrames(mBuffer.Length() - remains))); mReadPoint += BytesToFrames(mBuffer.Length() - remains); mBuffer.ContractTo(remains); } else { // RUNNING or high latency // If we are not playing, but our buffer is full, start playing to make // room for soon-to-be-decoded data. if (mState != STARTED && mState != RUNNING) { PR_LOG(gAudioStreamLog, PR_LOG_WARNING, ("Starting stream %p in Write (%u waiting)", this, bytesToCopy)); StartUnlocked(); if (mState == ERRORED) { return NS_ERROR_FAILURE; } } PR_LOG(gAudioStreamLog, PR_LOG_WARNING, ("Stream %p waiting in Write() (%u waiting)", this, bytesToCopy)); mon.Wait(); } } } mWritten += aFrames; return NS_OK; }
long AudioStream::DataCallback(void* aBuffer, long aFrames) { MonitorAutoLock mon(mMonitor); MOZ_ASSERT(mState != SHUTDOWN, "No data callback after shutdown"); uint32_t available = std::min(static_cast<uint32_t>(FramesToBytes(aFrames)), mBuffer.Length()); NS_ABORT_IF_FALSE(available % mBytesPerFrame == 0, "Must copy complete frames"); AudioDataValue* output = reinterpret_cast<AudioDataValue*>(aBuffer); uint32_t underrunFrames = 0; uint32_t servicedFrames = 0; int64_t insertTime; // NOTE: wasapi (others?) can call us back *after* stop()/Shutdown() (mState == SHUTDOWN) // Bug 996162 // callback tells us cubeb succeeded initializing if (mState == STARTED) { // For low-latency streams, we want to minimize any built-up data when // we start getting callbacks. // Simple version - contract on first callback only. if (mLatencyRequest == LowLatency) { #ifdef PR_LOGGING uint32_t old_len = mBuffer.Length(); #endif available = mBuffer.ContractTo(FramesToBytes(aFrames)); #ifdef PR_LOGGING TimeStamp now = TimeStamp::Now(); if (!mStartTime.IsNull()) { int64_t timeMs = (now - mStartTime).ToMilliseconds(); PR_LOG(gAudioStreamLog, PR_LOG_WARNING, ("Stream took %lldms to start after first Write() @ %u", timeMs, mOutRate)); } else { PR_LOG(gAudioStreamLog, PR_LOG_WARNING, ("Stream started before Write() @ %u", mOutRate)); } if (old_len != available) { // Note that we may have dropped samples in Write() as well! PR_LOG(gAudioStreamLog, PR_LOG_WARNING, ("AudioStream %p dropped %u + %u initial frames @ %u", this, mReadPoint, BytesToFrames(old_len - available), mOutRate)); mReadPoint += BytesToFrames(old_len - available); } #endif } mState = RUNNING; } if (available) { // When we are playing a low latency stream, and it is the first time we are // getting data from the buffer, we prefer to add the silence for an // underrun at the beginning of the buffer, so the first buffer is not cut // in half by the silence inserted to compensate for the underrun. if (mInRate == mOutRate) { if (mLatencyRequest == LowLatency && !mWritten) { servicedFrames = GetUnprocessedWithSilencePadding(output, aFrames, insertTime); } else { servicedFrames = GetUnprocessed(output, aFrames, insertTime); } } else { servicedFrames = GetTimeStretched(output, aFrames, insertTime); } float scaled_volume = float(GetVolumeScale() * mVolume); ScaleAudioSamples(output, aFrames * mOutChannels, scaled_volume); NS_ABORT_IF_FALSE(mBuffer.Length() % mBytesPerFrame == 0, "Must copy complete frames"); // Notify any blocked Write() call that more space is available in mBuffer. mon.NotifyAll(); } else { GetBufferInsertTime(insertTime); } underrunFrames = aFrames - servicedFrames; // Always send audible frames first, and silent frames later. // Otherwise it will break the assumption of FrameHistory. if (mState != DRAINING) { mAudioClock.UpdateFrameHistory(servicedFrames, underrunFrames); uint8_t* rpos = static_cast<uint8_t*>(aBuffer) + FramesToBytes(aFrames - underrunFrames); memset(rpos, 0, FramesToBytes(underrunFrames)); if (underrunFrames) { PR_LOG(gAudioStreamLog, PR_LOG_WARNING, ("AudioStream %p lost %d frames", this, underrunFrames)); } servicedFrames += underrunFrames; } else { mAudioClock.UpdateFrameHistory(servicedFrames, 0); } WriteDumpFile(mDumpFile, this, aFrames, aBuffer); // Don't log if we're not interested or if the stream is inactive if (PR_LOG_TEST(GetLatencyLog(), PR_LOG_DEBUG) && mState != SHUTDOWN && insertTime != INT64_MAX && servicedFrames > underrunFrames) { uint32_t latency = UINT32_MAX; if (cubeb_stream_get_latency(mCubebStream, &latency)) { NS_WARNING("Could not get latency from cubeb."); } TimeStamp now = TimeStamp::Now(); mLatencyLog->Log(AsyncLatencyLogger::AudioStream, reinterpret_cast<uint64_t>(this), insertTime, now); mLatencyLog->Log(AsyncLatencyLogger::Cubeb, reinterpret_cast<uint64_t>(mCubebStream.get()), (latency * 1000) / mOutRate, now); } return servicedFrames; }
/* LoadBuffer loads the named audio file into an OpenAL buffer object, and * returns the new buffer ID. */ static ALuint LoadSound(const char *filename) { ALenum err, format, type, channels; ALuint rate, buffer; size_t datalen; void *data; FilePtr sound; /* Open the audio file */ sound = openAudioFile(filename, 1000); if(!sound) { fprintf(stderr, "Could not open audio in %s\n", filename); closeAudioFile(sound); return 0; } /* Get the sound format, and figure out the OpenAL format */ if(getAudioInfo(sound, &rate, &channels, &type) != 0) { fprintf(stderr, "Error getting audio info for %s\n", filename); closeAudioFile(sound); return 0; } format = GetFormat(channels, type, alIsBufferFormatSupportedSOFT); if(format == AL_NONE) { fprintf(stderr, "Unsupported format (%s, %s) for %s\n", ChannelsName(channels), TypeName(type), filename); closeAudioFile(sound); return 0; } /* Decode the whole audio stream to a buffer. */ data = decodeAudioStream(sound, &datalen); if(!data) { fprintf(stderr, "Failed to read audio from %s\n", filename); closeAudioFile(sound); return 0; } /* Buffer the audio data into a new buffer object, then free the data and * close the file. */ buffer = 0; alGenBuffers(1, &buffer); alBufferSamplesSOFT(buffer, rate, format, BytesToFrames(datalen, channels, type), channels, type, data); free(data); closeAudioFile(sound); /* Check if an error occured, and clean up if so. */ err = alGetError(); if(err != AL_NO_ERROR) { fprintf(stderr, "OpenAL Error: %s\n", alGetString(err)); if(alIsBuffer(buffer)) alDeleteBuffers(1, &buffer); return 0; } return buffer; }