long AudioStream::GetUnprocessed(void* aBuffer, long aFrames) { mMonitor.AssertCurrentThreadOwns(); uint8_t* wpos = reinterpret_cast<uint8_t*>(aBuffer); // Flush the timestretcher pipeline, if we were playing using a playback rate // other than 1.0. uint32_t flushedFrames = 0; if (mTimeStretcher && mTimeStretcher->numSamples()) { flushedFrames = mTimeStretcher->receiveSamples(reinterpret_cast<AudioDataValue*>(wpos), aFrames); wpos += FramesToBytes(flushedFrames); } uint32_t toPopBytes = FramesToBytes(aFrames - flushedFrames); uint32_t available = std::min(toPopBytes, mBuffer.Length()); void* input[2]; uint32_t input_size[2]; mBuffer.PopElements(available, &input[0], &input_size[0], &input[1], &input_size[1]); memcpy(wpos, input[0], input_size[0]); wpos += input_size[0]; memcpy(wpos, input[1], input_size[1]); return BytesToFrames(available) + flushedFrames; }
long AudioStream::DataCallback(void* aBuffer, long aFrames) { MonitorAutoLock mon(mMonitor); MOZ_ASSERT(mState != SHUTDOWN, "No data callback after shutdown"); uint32_t available = std::min(static_cast<uint32_t>(FramesToBytes(aFrames)), mBuffer.Length()); MOZ_ASSERT(available % mBytesPerFrame == 0, "Must copy complete frames"); AudioDataValue* output = reinterpret_cast<AudioDataValue*>(aBuffer); uint32_t underrunFrames = 0; uint32_t servicedFrames = 0; // NOTE: wasapi (others?) can call us back *after* stop()/Shutdown() (mState == SHUTDOWN) // Bug 996162 // callback tells us cubeb succeeded initializing if (mState == STARTED) { mState = RUNNING; } if (available) { if (mInRate == mOutRate) { servicedFrames = GetUnprocessed(output, aFrames); } else { servicedFrames = GetTimeStretched(output, aFrames); } MOZ_ASSERT(mBuffer.Length() % mBytesPerFrame == 0, "Must copy complete frames"); // Notify any blocked Write() call that more space is available in mBuffer. mon.NotifyAll(); } underrunFrames = aFrames - servicedFrames; // Always send audible frames first, and silent frames later. // Otherwise it will break the assumption of FrameHistory. if (mState != DRAINING) { mAudioClock.UpdateFrameHistory(servicedFrames, underrunFrames); uint8_t* rpos = static_cast<uint8_t*>(aBuffer) + FramesToBytes(aFrames - underrunFrames); memset(rpos, 0, FramesToBytes(underrunFrames)); if (underrunFrames) { MOZ_LOG(gAudioStreamLog, LogLevel::Warning, ("AudioStream %p lost %d frames", this, underrunFrames)); } servicedFrames += underrunFrames; } else { mAudioClock.UpdateFrameHistory(servicedFrames, 0); } WriteDumpFile(mDumpFile, this, aFrames, aBuffer); return servicedFrames; }
nsresult BufferedAudioStream::Init(int32_t aNumChannels, int32_t aRate, const dom::AudioChannelType aAudioChannelType) { cubeb* cubebContext = GetCubebContext(); if (!cubebContext || aNumChannels < 0 || aRate < 0) { return NS_ERROR_FAILURE; } mInRate = mOutRate = aRate; mChannels = aNumChannels; mDumpFile = OpenDumpFile(this); cubeb_stream_params params; params.rate = aRate; params.channels = aNumChannels; #if defined(__ANDROID__) #if defined(MOZ_B2G) params.stream_type = ConvertChannelToCubebType(aAudioChannelType); #else params.stream_type = CUBEB_STREAM_TYPE_MUSIC; #endif if (params.stream_type == CUBEB_STREAM_TYPE_MAX) { return NS_ERROR_INVALID_ARG; } #endif if (AUDIO_OUTPUT_FORMAT == AUDIO_FORMAT_S16) { params.format = CUBEB_SAMPLE_S16NE; } else { params.format = CUBEB_SAMPLE_FLOAT32NE; } mBytesPerFrame = sizeof(AudioDataValue) * aNumChannels; mAudioClock.Init(); { cubeb_stream* stream; if (cubeb_stream_init(cubebContext, &stream, "BufferedAudioStream", params, GetCubebLatency(), DataCallback_S, StateCallback_S, this) == CUBEB_OK) { mCubebStream.own(stream); } } if (!mCubebStream) { return NS_ERROR_FAILURE; } // Size mBuffer for one second of audio. This value is arbitrary, and was // selected based on the observed behaviour of the existing AudioStream // implementations. uint32_t bufferLimit = FramesToBytes(aRate); NS_ABORT_IF_FALSE(bufferLimit % mBytesPerFrame == 0, "Must buffer complete frames"); mBuffer.SetCapacity(bufferLimit); return NS_OK; }
long BufferedAudioStream::DataCallback(void* aBuffer, long aFrames) { MonitorAutoLock mon(mMonitor); uint32_t available = std::min(static_cast<uint32_t>(FramesToBytes(aFrames)), mBuffer.Length()); NS_ABORT_IF_FALSE(available % mBytesPerFrame == 0, "Must copy complete frames"); uint32_t underrunFrames = 0; uint32_t servicedFrames = 0; if (available) { AudioDataValue* output = reinterpret_cast<AudioDataValue*>(aBuffer); if (mInRate == mOutRate) { servicedFrames = GetUnprocessed(output, aFrames); } else { servicedFrames = GetTimeStretched(output, aFrames); } float scaled_volume = float(GetVolumeScale() * mVolume); ScaleAudioSamples(output, aFrames * mChannels, scaled_volume); NS_ABORT_IF_FALSE(mBuffer.Length() % mBytesPerFrame == 0, "Must copy complete frames"); // Notify any blocked Write() call that more space is available in mBuffer. mon.NotifyAll(); } underrunFrames = aFrames - servicedFrames; if (mState != DRAINING) { uint8_t* rpos = static_cast<uint8_t*>(aBuffer) + FramesToBytes(aFrames - underrunFrames); memset(rpos, 0, FramesToBytes(underrunFrames)); #ifdef PR_LOGGING if (underrunFrames) { PR_LOG(gAudioStreamLog, PR_LOG_WARNING, ("AudioStream %p lost %d frames", this, underrunFrames)); } #endif mLostFrames += underrunFrames; servicedFrames += underrunFrames; } WriteDumpFile(mDumpFile, this, aFrames, aBuffer); mAudioClock.UpdateWritePosition(servicedFrames); return servicedFrames; }
void AL_APIENTRY wrap_BufferSamples(ALuint buffer, ALuint samplerate, ALenum internalformat, ALsizei samples, ALenum channels, ALenum type, const ALvoid *data) { alBufferData(buffer, internalformat, data, FramesToBytes(samples, channels, type), samplerate); }
// aTime is the time in ms the samples were inserted into MediaStreamGraph nsresult AudioStream::Write(const AudioDataValue* aBuf, uint32_t aFrames) { MonitorAutoLock mon(mMonitor); if (mState == ERRORED) { return NS_ERROR_FAILURE; } NS_ASSERTION(mState == INITIALIZED || mState == STARTED || mState == RUNNING, "Stream write in unexpected state."); // Downmix to Stereo. if (mChannels > 2 && mChannels <= 8) { DownmixAudioToStereo(const_cast<AudioDataValue*> (aBuf), mChannels, aFrames); } else if (mChannels > 8) { return NS_ERROR_FAILURE; } if (mChannels >= 2 && mIsMonoAudioEnabled) { DownmixStereoToMono(const_cast<AudioDataValue*> (aBuf), aFrames); } const uint8_t* src = reinterpret_cast<const uint8_t*>(aBuf); uint32_t bytesToCopy = FramesToBytes(aFrames); while (bytesToCopy > 0) { uint32_t available = std::min(bytesToCopy, mBuffer.Available()); MOZ_ASSERT(available % mBytesPerFrame == 0, "Must copy complete frames."); mBuffer.AppendElements(src, available); src += available; bytesToCopy -= available; if (bytesToCopy > 0) { // If we are not playing, but our buffer is full, start playing to make // room for soon-to-be-decoded data. if (mState != STARTED && mState != RUNNING) { MOZ_LOG(gAudioStreamLog, LogLevel::Warning, ("Starting stream %p in Write (%u waiting)", this, bytesToCopy)); StartUnlocked(); if (mState == ERRORED) { return NS_ERROR_FAILURE; } } MOZ_LOG(gAudioStreamLog, LogLevel::Warning, ("Stream %p waiting in Write() (%u waiting)", this, bytesToCopy)); mon.Wait(); } } mWritten += aFrames; return NS_OK; }
long AudioStream::GetTimeStretched(void* aBuffer, long aFrames, int64_t &aTimeMs) { mMonitor.AssertCurrentThreadOwns(); long processedFrames = 0; // We need to call the non-locking version, because we already have the lock. if (EnsureTimeStretcherInitializedUnlocked() != NS_OK) { return 0; } uint8_t* wpos = reinterpret_cast<uint8_t*>(aBuffer); double playbackRate = static_cast<double>(mInRate) / mOutRate; uint32_t toPopBytes = FramesToBytes(ceil(aFrames * playbackRate)); uint32_t available = 0; bool lowOnBufferedData = false; do { // Check if we already have enough data in the time stretcher pipeline. if (mTimeStretcher->numSamples() <= static_cast<uint32_t>(aFrames)) { void* input[2]; uint32_t input_size[2]; available = std::min(mBuffer.Length(), toPopBytes); if (available != toPopBytes) { lowOnBufferedData = true; } mBuffer.PopElements(available, &input[0], &input_size[0], &input[1], &input_size[1]); mReadPoint += BytesToFrames(available); for(uint32_t i = 0; i < 2; i++) { mTimeStretcher->putSamples(reinterpret_cast<AudioDataValue*>(input[i]), BytesToFrames(input_size[i])); } } uint32_t receivedFrames = mTimeStretcher->receiveSamples(reinterpret_cast<AudioDataValue*>(wpos), aFrames - processedFrames); wpos += FramesToBytes(receivedFrames); processedFrames += receivedFrames; } while (processedFrames < aFrames && !lowOnBufferedData); GetBufferInsertTime(aTimeMs); return processedFrames; }
nsresult AudioStream::Init(int32_t aNumChannels, int32_t aRate, const dom::AudioChannel aAudioChannel) { mStartTime = TimeStamp::Now(); mIsFirst = CubebUtils::GetFirstStream(); if (!CubebUtils::GetCubebContext() || aNumChannels < 0 || aRate < 0) { return NS_ERROR_FAILURE; } MOZ_LOG(gAudioStreamLog, LogLevel::Debug, ("%s channels: %d, rate: %d for %p", __FUNCTION__, aNumChannels, aRate, this)); mInRate = mOutRate = aRate; mChannels = aNumChannels; mOutChannels = (aNumChannels > 2) ? 2 : aNumChannels; mDumpFile = OpenDumpFile(this); cubeb_stream_params params; params.rate = aRate; params.channels = mOutChannels; #if defined(__ANDROID__) #if defined(MOZ_B2G) mAudioChannel = aAudioChannel; params.stream_type = CubebUtils::ConvertChannelToCubebType(aAudioChannel); #else mAudioChannel = dom::AudioChannel::Content; params.stream_type = CUBEB_STREAM_TYPE_MUSIC; #endif if (params.stream_type == CUBEB_STREAM_TYPE_MAX) { return NS_ERROR_INVALID_ARG; } #endif if (AUDIO_OUTPUT_FORMAT == AUDIO_FORMAT_S16) { params.format = CUBEB_SAMPLE_S16NE; } else { params.format = CUBEB_SAMPLE_FLOAT32NE; } mBytesPerFrame = sizeof(AudioDataValue) * mOutChannels; mAudioClock.Init(); // Size mBuffer for one second of audio. This value is arbitrary, and was // selected based on the observed behaviour of the existing AudioStream // implementations. uint32_t bufferLimit = FramesToBytes(aRate); MOZ_ASSERT(bufferLimit % mBytesPerFrame == 0, "Must buffer complete frames"); mBuffer.SetCapacity(bufferLimit); return OpenCubeb(params); }
/* Opens the first audio stream of the named file. If a file is already open, * it will be closed first. */ static int OpenPlayerFile(StreamPlayer *player, const char *filename) { ClosePlayerFile(player); /* Open the file and get the first stream from it */ player->file = openAVFile(filename); player->stream = getAVAudioStream(player->file, 0); if(!player->stream) { fprintf(stderr, "Could not open audio in %s\n", filename); goto error; } /* Get the stream format, and figure out the OpenAL format */ if(getAVAudioInfo(player->stream, &player->rate, &player->channels, &player->type) != 0) { fprintf(stderr, "Error getting audio info for %s\n", filename); goto error; } player->format = GetFormat(player->channels, player->type, alIsBufferFormatSupportedSOFT); if(player->format == 0) { fprintf(stderr, "Unsupported format (%s, %s) for %s\n", ChannelsName(player->channels), TypeName(player->type), filename); goto error; } /* Allocate enough space for the temp buffer, given the format */ player->datasize = FramesToBytes(BUFFER_SIZE, player->channels, player->type); player->data = malloc(player->datasize); if(player->data == NULL) { fprintf(stderr, "Error allocating %d bytes\n", player->datasize); goto error; } return 1; error: closeAVFile(player->file); player->file = NULL; player->stream = NULL; player->datasize = 0; return 0; }
void AudioStream::Reset() { MOZ_ASSERT(mLatencyRequest == LowLatency, "We should only be reseting low latency streams"); mShouldDropFrames = true; mNeedsStart = true; cubeb_stream_params params; params.rate = mInRate; params.channels = mOutChannels; #if defined(__ANDROID__) #if defined(MOZ_B2G) params.stream_type = CubebUtils::ConvertChannelToCubebType(mAudioChannel); #else params.stream_type = CUBEB_STREAM_TYPE_MUSIC; #endif if (params.stream_type == CUBEB_STREAM_TYPE_MAX) { return; } #endif if (AUDIO_OUTPUT_FORMAT == AUDIO_FORMAT_S16) { params.format = CUBEB_SAMPLE_S16NE; } else { params.format = CUBEB_SAMPLE_FLOAT32NE; } mBytesPerFrame = sizeof(AudioDataValue) * mOutChannels; // Size mBuffer for one second of audio. This value is arbitrary, and was // selected based on the observed behaviour of the existing AudioStream // implementations. uint32_t bufferLimit = FramesToBytes(mInRate); MOZ_ASSERT(bufferLimit % mBytesPerFrame == 0, "Must buffer complete frames"); mBuffer.Reset(); mBuffer.SetCapacity(bufferLimit); // Don't block this thread to initialize a cubeb stream. // When this is done, it will start callbacks from Cubeb. Those will // cause us to move from INITIALIZED to RUNNING. Until then, we // can't access any cubeb functions. // Use a RefPtr to avoid leaks if Dispatch fails RefPtr<AudioInitTask> init = new AudioInitTask(this, mLatencyRequest, params); init->Dispatch(); }
int psm_record::start(int gap, int rate){ alGetError(); int channels = AL_MONO_SOFT; int type = AL_SHORT_SOFT; context_ = &rc_; context_->gap = gap; context_->channels = channels; context_->type = type; context_->rate = rate; int SSIZE = rate * FramesToBytes(1, channels, type) * 1/*1000ms, samples bytes*/; context_->format = GetFormat(channels, type, alIsBufferFormatSupportedSOFT); ALCdevice *device = alcCaptureOpenDevice(NULL, context_->rate, context_->format, SSIZE); if (alGetError() != AL_NO_ERROR) { return -1; } alcCaptureStart(device); context_->device = device; return 0; }
nsresult BufferedAudioStream::Write(const AudioDataValue* aBuf, uint32_t aFrames) { MonitorAutoLock mon(mMonitor); if (!mCubebStream || mState == ERRORED) { return NS_ERROR_FAILURE; } NS_ASSERTION(mState == INITIALIZED || mState == STARTED, "Stream write in unexpected state."); const uint8_t* src = reinterpret_cast<const uint8_t*>(aBuf); uint32_t bytesToCopy = FramesToBytes(aFrames); while (bytesToCopy > 0) { uint32_t available = std::min(bytesToCopy, mBuffer.Available()); NS_ABORT_IF_FALSE(available % mBytesPerFrame == 0, "Must copy complete frames."); mBuffer.AppendElements(src, available); src += available; bytesToCopy -= available; if (bytesToCopy > 0) { // If we are not playing, but our buffer is full, start playing to make // room for soon-to-be-decoded data. if (mState != STARTED) { StartUnlocked(); if (mState != STARTED) { return NS_ERROR_FAILURE; } } mon.Wait(); } } mWritten += aFrames; return NS_OK; }
// Get unprocessed samples, and pad the beginning of the buffer with silence if // there is not enough data. long AudioStream::GetUnprocessedWithSilencePadding(void* aBuffer, long aFrames, int64_t& aTimeMs) { uint32_t toPopBytes = FramesToBytes(aFrames); uint32_t available = std::min(toPopBytes, mBuffer.Length()); uint32_t silenceOffset = toPopBytes - available; uint8_t* wpos = reinterpret_cast<uint8_t*>(aBuffer); memset(wpos, 0, silenceOffset); wpos += silenceOffset; void* input[2]; uint32_t input_size[2]; mBuffer.PopElements(available, &input[0], &input_size[0], &input[1], &input_size[1]); memcpy(wpos, input[0], input_size[0]); wpos += input_size[0]; memcpy(wpos, input[1], input_size[1]); GetBufferInsertTime(aTimeMs); return aFrames; }
int main() { PlaybackInfo playback = { NULL, NULL, 0 }; SDL_AudioSpec desired, obtained; ALuint source, buffer; ALCint attrs[16]; ALenum state; /* Print out error if extension is missing. */ if(!alcIsExtensionPresent(NULL, "ALC_SOFT_loopback")) { fprintf(stderr, "Error: ALC_SOFT_loopback not supported!\n"); return 1; } /* Define a macro to help load the function pointers. */ #define LOAD_PROC(x) ((x) = alcGetProcAddress(NULL, #x)) LOAD_PROC(alcLoopbackOpenDeviceSOFT); LOAD_PROC(alcIsRenderFormatSupportedSOFT); LOAD_PROC(alcRenderSamplesSOFT); #undef LOAD_PROC if(SDL_Init(SDL_INIT_AUDIO) == -1) { fprintf(stderr, "Failed to init SDL audio: %s\n", SDL_GetError()); return 1; } /* Set up SDL audio with our requested format and callback. */ desired.channels = 2; desired.format = AUDIO_S16SYS; desired.freq = 44100; desired.padding = 0; desired.samples = 4096; desired.callback = RenderSDLSamples; desired.userdata = &playback; if(SDL_OpenAudio(&desired, &obtained) != 0) { SDL_Quit(); fprintf(stderr, "Failed to open SDL audio: %s\n", SDL_GetError()); return 1; } /* Set up our OpenAL attributes based on what we got from SDL. */ attrs[0] = ALC_FORMAT_CHANNELS_SOFT; if(obtained.channels == 1) attrs[1] = ALC_MONO_SOFT; else if(obtained.channels == 2) attrs[1] = ALC_STEREO_SOFT; else { fprintf(stderr, "Unhandled SDL channel count: %d\n", obtained.channels); goto error; } attrs[2] = ALC_FORMAT_TYPE_SOFT; if(obtained.format == AUDIO_U8) attrs[3] = ALC_UNSIGNED_BYTE_SOFT; else if(obtained.format == AUDIO_S8) attrs[3] = ALC_BYTE_SOFT; else if(obtained.format == AUDIO_U16SYS) attrs[3] = ALC_UNSIGNED_SHORT_SOFT; else if(obtained.format == AUDIO_S16SYS) attrs[3] = ALC_SHORT_SOFT; else { fprintf(stderr, "Unhandled SDL format: 0x%04x\n", obtained.format); goto error; } attrs[4] = ALC_FREQUENCY; attrs[5] = obtained.freq; attrs[6] = 0; /* end of list */ /* Initialize OpenAL loopback device, using our format attributes. */ playback.Device = alcLoopbackOpenDeviceSOFT(NULL); if(!playback.Device) { fprintf(stderr, "Failed to open loopback device!\n"); goto error; } /* Make sure the format is supported before setting them on the device. */ if(alcIsRenderFormatSupportedSOFT(playback.Device, attrs[5], attrs[1], attrs[3]) == ALC_FALSE) { fprintf(stderr, "Render format not supported: %s, %s, %dhz\n", ChannelsName(attrs[1]), TypeName(attrs[3]), attrs[5]); goto error; } playback.Context = alcCreateContext(playback.Device, attrs); if(!playback.Context || alcMakeContextCurrent(playback.Context) == ALC_FALSE) { fprintf(stderr, "Failed to set an OpenAL audio context\n"); goto error; } playback.FrameSize = FramesToBytes(1, attrs[1], attrs[3]); /* Start SDL playing. Our callback (thus alcRenderSamplesSOFT) will now * start being called regularly to update the AL playback state. */ SDL_PauseAudio(0); /* Load the sound into a buffer. */ buffer = CreateSineWave(); if(!buffer) { SDL_CloseAudio(); alcDestroyContext(playback.Context); alcCloseDevice(playback.Device); SDL_Quit(); return 1; } /* Create the source to play the sound with. */ source = 0; alGenSources(1, &source); alSourcei(source, AL_BUFFER, buffer); assert(alGetError()==AL_NO_ERROR && "Failed to setup sound source"); /* Play the sound until it finishes. */ alSourcePlay(source); do { Sleep(10); alGetSourcei(source, AL_SOURCE_STATE, &state); } while(alGetError() == AL_NO_ERROR && state == AL_PLAYING); /* All done. Delete resources, and close OpenAL. */ alDeleteSources(1, &source); alDeleteBuffers(1, &buffer); /* Stop SDL playing. */ SDL_PauseAudio(1); /* Close up OpenAL and SDL. */ SDL_CloseAudio(); alcDestroyContext(playback.Context); alcCloseDevice(playback.Device); SDL_Quit(); return 0; error: SDL_CloseAudio(); if(playback.Context) alcDestroyContext(playback.Context); if(playback.Device) alcCloseDevice(playback.Device); SDL_Quit(); return 1; }
size_t psm_record::calc_bytes_with_ms(size_t ms) const{ return FramesToBytes(context_->rate * (ms / 1000.0f), context_->channels, context_->type); }
ALsizei BytesToFrames(ALsizei size, ALenum channels, ALenum type) { return size / FramesToBytes(1, channels, type); }
nsresult AudioStream::Init(int32_t aNumChannels, int32_t aRate, const dom::AudioChannelType aAudioChannelType, LatencyRequest aLatencyRequest) { cubeb* cubebContext = GetCubebContext(); if (!cubebContext || aNumChannels < 0 || aRate < 0) { return NS_ERROR_FAILURE; } PR_LOG(gAudioStreamLog, PR_LOG_DEBUG, ("%s channels: %d, rate: %d", __FUNCTION__, aNumChannels, aRate)); mInRate = mOutRate = aRate; mChannels = aNumChannels; mOutChannels = (aNumChannels > 2) ? 2 : aNumChannels; mLatencyRequest = aLatencyRequest; mDumpFile = OpenDumpFile(this); cubeb_stream_params params; params.rate = aRate; params.channels = mOutChannels; #if defined(__ANDROID__) #if defined(MOZ_B2G) params.stream_type = ConvertChannelToCubebType(aAudioChannelType); #else params.stream_type = CUBEB_STREAM_TYPE_MUSIC; #endif if (params.stream_type == CUBEB_STREAM_TYPE_MAX) { return NS_ERROR_INVALID_ARG; } #endif if (AUDIO_OUTPUT_FORMAT == AUDIO_FORMAT_S16) { params.format = CUBEB_SAMPLE_S16NE; } else { params.format = CUBEB_SAMPLE_FLOAT32NE; } mBytesPerFrame = sizeof(AudioDataValue) * mOutChannels; mAudioClock.Init(); // If the latency pref is set, use it. Otherwise, if this stream is intended // for low latency playback, try to get the lowest latency possible. // Otherwise, for normal streams, use 100ms. uint32_t latency; if (aLatencyRequest == LowLatency && !CubebLatencyPrefSet()) { if (cubeb_get_min_latency(cubebContext, params, &latency) != CUBEB_OK) { latency = GetCubebLatency(); } } else { latency = GetCubebLatency(); } { cubeb_stream* stream; if (cubeb_stream_init(cubebContext, &stream, "AudioStream", params, latency, DataCallback_S, StateCallback_S, this) == CUBEB_OK) { mCubebStream.own(stream); } } if (!mCubebStream) { return NS_ERROR_FAILURE; } // Size mBuffer for one second of audio. This value is arbitrary, and was // selected based on the observed behaviour of the existing AudioStream // implementations. uint32_t bufferLimit = FramesToBytes(aRate); NS_ABORT_IF_FALSE(bufferLimit % mBytesPerFrame == 0, "Must buffer complete frames"); mBuffer.SetCapacity(bufferLimit); // Start the stream right away when low latency has been requested. This means // that the DataCallback will feed silence to cubeb, until the first frames // are writtent to this AudioStream. if (mLatencyRequest == LowLatency) { Start(); } return NS_OK; }
// aTime is the time in ms the samples were inserted into MediaStreamGraph nsresult AudioStream::Write(const AudioDataValue* aBuf, uint32_t aFrames, TimeStamp *aTime) { MonitorAutoLock mon(mMonitor); if (!mCubebStream || mState == ERRORED) { return NS_ERROR_FAILURE; } NS_ASSERTION(mState == INITIALIZED || mState == STARTED, "Stream write in unexpected state."); // Downmix to Stereo. if (mChannels > 2 && mChannels <= 8) { DownmixAudioToStereo(const_cast<AudioDataValue*> (aBuf), mChannels, aFrames); } else if (mChannels > 8) { return NS_ERROR_FAILURE; } const uint8_t* src = reinterpret_cast<const uint8_t*>(aBuf); uint32_t bytesToCopy = FramesToBytes(aFrames); // XXX this will need to change if we want to enable this on-the-fly! if (PR_LOG_TEST(GetLatencyLog(), PR_LOG_DEBUG)) { // Record the position and time this data was inserted int64_t timeMs; if (aTime && !aTime->IsNull()) { if (mStartTime.IsNull()) { AsyncLatencyLogger::Get(true)->GetStartTime(mStartTime); } timeMs = (*aTime - mStartTime).ToMilliseconds(); } else { timeMs = 0; } struct Inserts insert = { timeMs, aFrames}; mInserts.AppendElement(insert); } while (bytesToCopy > 0) { uint32_t available = std::min(bytesToCopy, mBuffer.Available()); NS_ABORT_IF_FALSE(available % mBytesPerFrame == 0, "Must copy complete frames."); mBuffer.AppendElements(src, available); src += available; bytesToCopy -= available; if (bytesToCopy > 0) { // If we are not playing, but our buffer is full, start playing to make // room for soon-to-be-decoded data. if (mState != STARTED) { StartUnlocked(); if (mState != STARTED) { return NS_ERROR_FAILURE; } } mon.Wait(); } } mWritten += aFrames; return NS_OK; }
long AudioStream::DataCallback(void* aBuffer, long aFrames) { MonitorAutoLock mon(mMonitor); uint32_t available = std::min(static_cast<uint32_t>(FramesToBytes(aFrames)), mBuffer.Length()); NS_ABORT_IF_FALSE(available % mBytesPerFrame == 0, "Must copy complete frames"); AudioDataValue* output = reinterpret_cast<AudioDataValue*>(aBuffer); uint32_t underrunFrames = 0; uint32_t servicedFrames = 0; int64_t insertTime; if (available) { // When we are playing a low latency stream, and it is the first time we are // getting data from the buffer, we prefer to add the silence for an // underrun at the beginning of the buffer, so the first buffer is not cut // in half by the silence inserted to compensate for the underrun. if (mInRate == mOutRate) { if (mLatencyRequest == LowLatency && !mWritten) { servicedFrames = GetUnprocessedWithSilencePadding(output, aFrames, insertTime); } else { servicedFrames = GetUnprocessed(output, aFrames, insertTime); } } else { servicedFrames = GetTimeStretched(output, aFrames, insertTime); } float scaled_volume = float(GetVolumeScale() * mVolume); ScaleAudioSamples(output, aFrames * mOutChannels, scaled_volume); NS_ABORT_IF_FALSE(mBuffer.Length() % mBytesPerFrame == 0, "Must copy complete frames"); // Notify any blocked Write() call that more space is available in mBuffer. mon.NotifyAll(); } else { GetBufferInsertTime(insertTime); } underrunFrames = aFrames - servicedFrames; if (mState != DRAINING) { uint8_t* rpos = static_cast<uint8_t*>(aBuffer) + FramesToBytes(aFrames - underrunFrames); memset(rpos, 0, FramesToBytes(underrunFrames)); if (underrunFrames) { PR_LOG(gAudioStreamLog, PR_LOG_WARNING, ("AudioStream %p lost %d frames", this, underrunFrames)); } mLostFrames += underrunFrames; servicedFrames += underrunFrames; } WriteDumpFile(mDumpFile, this, aFrames, aBuffer); // Don't log if we're not interested or if the stream is inactive if (PR_LOG_TEST(GetLatencyLog(), PR_LOG_DEBUG) && insertTime != INT64_MAX && servicedFrames > underrunFrames) { uint32_t latency = UINT32_MAX; if (cubeb_stream_get_latency(mCubebStream, &latency)) { NS_WARNING("Could not get latency from cubeb."); } TimeStamp now = TimeStamp::Now(); mLatencyLog->Log(AsyncLatencyLogger::AudioStream, reinterpret_cast<uint64_t>(this), insertTime, now); mLatencyLog->Log(AsyncLatencyLogger::Cubeb, reinterpret_cast<uint64_t>(mCubebStream.get()), (latency * 1000) / mOutRate, now); } mAudioClock.UpdateWritePosition(servicedFrames); return servicedFrames; }
// aTime is the time in ms the samples were inserted into MediaStreamGraph nsresult AudioStream::Write(const AudioDataValue* aBuf, uint32_t aFrames, TimeStamp *aTime) { MonitorAutoLock mon(mMonitor); if (mState == ERRORED) { return NS_ERROR_FAILURE; } NS_ASSERTION(mState == INITIALIZED || mState == STARTED || mState == RUNNING, "Stream write in unexpected state."); // See if we need to start() the stream, since we must do that from this thread CheckForStart(); // Downmix to Stereo. if (mChannels > 2 && mChannels <= 8) { DownmixAudioToStereo(const_cast<AudioDataValue*> (aBuf), mChannels, aFrames); } else if (mChannels > 8) { return NS_ERROR_FAILURE; } const uint8_t* src = reinterpret_cast<const uint8_t*>(aBuf); uint32_t bytesToCopy = FramesToBytes(aFrames); // XXX this will need to change if we want to enable this on-the-fly! if (PR_LOG_TEST(GetLatencyLog(), PR_LOG_DEBUG)) { // Record the position and time this data was inserted int64_t timeMs; if (aTime && !aTime->IsNull()) { if (mStartTime.IsNull()) { AsyncLatencyLogger::Get(true)->GetStartTime(mStartTime); } timeMs = (*aTime - mStartTime).ToMilliseconds(); } else { timeMs = 0; } struct Inserts insert = { timeMs, aFrames}; mInserts.AppendElement(insert); } while (bytesToCopy > 0) { uint32_t available = std::min(bytesToCopy, mBuffer.Available()); NS_ABORT_IF_FALSE(available % mBytesPerFrame == 0, "Must copy complete frames."); mBuffer.AppendElements(src, available); src += available; bytesToCopy -= available; if (bytesToCopy > 0) { // Careful - the CubebInit thread may not have gotten to STARTED yet if ((mState == INITIALIZED || mState == STARTED) && mLatencyRequest == LowLatency) { // don't ever block MediaStreamGraph low-latency streams uint32_t remains = 0; // we presume the buffer is full if (mBuffer.Length() > bytesToCopy) { remains = mBuffer.Length() - bytesToCopy; // Free up just enough space } // account for dropping samples PR_LOG(gAudioStreamLog, PR_LOG_WARNING, ("Stream %p dropping %u bytes (%u frames)in Write()", this, mBuffer.Length() - remains, BytesToFrames(mBuffer.Length() - remains))); mReadPoint += BytesToFrames(mBuffer.Length() - remains); mBuffer.ContractTo(remains); } else { // RUNNING or high latency // If we are not playing, but our buffer is full, start playing to make // room for soon-to-be-decoded data. if (mState != STARTED && mState != RUNNING) { PR_LOG(gAudioStreamLog, PR_LOG_WARNING, ("Starting stream %p in Write (%u waiting)", this, bytesToCopy)); StartUnlocked(); if (mState == ERRORED) { return NS_ERROR_FAILURE; } } PR_LOG(gAudioStreamLog, PR_LOG_WARNING, ("Stream %p waiting in Write() (%u waiting)", this, bytesToCopy)); mon.Wait(); } } } mWritten += aFrames; return NS_OK; }
// NOTE: this must not block a LowLatency stream for any significant amount // of time, or it will block the entirety of MSG nsresult AudioStream::Init(int32_t aNumChannels, int32_t aRate, const dom::AudioChannel aAudioChannel, LatencyRequest aLatencyRequest) { mStartTime = TimeStamp::Now(); mIsFirst = GetFirstStream(); if (!GetCubebContext() || aNumChannels < 0 || aRate < 0) { return NS_ERROR_FAILURE; } PR_LOG(gAudioStreamLog, PR_LOG_DEBUG, ("%s channels: %d, rate: %d for %p", __FUNCTION__, aNumChannels, aRate, this)); mInRate = mOutRate = aRate; mChannels = aNumChannels; mOutChannels = (aNumChannels > 2) ? 2 : aNumChannels; mLatencyRequest = aLatencyRequest; mDumpFile = OpenDumpFile(this); cubeb_stream_params params; params.rate = aRate; params.channels = mOutChannels; #if defined(__ANDROID__) #if defined(MOZ_B2G) params.stream_type = ConvertChannelToCubebType(aAudioChannel); #else params.stream_type = CUBEB_STREAM_TYPE_MUSIC; #endif if (params.stream_type == CUBEB_STREAM_TYPE_MAX) { return NS_ERROR_INVALID_ARG; } #endif if (AUDIO_OUTPUT_FORMAT == AUDIO_FORMAT_S16) { params.format = CUBEB_SAMPLE_S16NE; } else { params.format = CUBEB_SAMPLE_FLOAT32NE; } mBytesPerFrame = sizeof(AudioDataValue) * mOutChannels; mAudioClock.Init(); // Size mBuffer for one second of audio. This value is arbitrary, and was // selected based on the observed behaviour of the existing AudioStream // implementations. uint32_t bufferLimit = FramesToBytes(aRate); NS_ABORT_IF_FALSE(bufferLimit % mBytesPerFrame == 0, "Must buffer complete frames"); mBuffer.SetCapacity(bufferLimit); if (aLatencyRequest == LowLatency) { // Don't block this thread to initialize a cubeb stream. // When this is done, it will start callbacks from Cubeb. Those will // cause us to move from INITIALIZED to RUNNING. Until then, we // can't access any cubeb functions. // Use a RefPtr to avoid leaks if Dispatch fails RefPtr<AudioInitTask> init = new AudioInitTask(this, aLatencyRequest, params); init->Dispatch(); return NS_OK; } // High latency - open synchronously nsresult rv = OpenCubeb(params, aLatencyRequest); // See if we need to start() the stream, since we must do that from this // thread for now (cubeb API issue) { MonitorAutoLock mon(mMonitor); CheckForStart(); } return rv; }
long AudioStream::DataCallback(void* aBuffer, long aFrames) { MonitorAutoLock mon(mMonitor); MOZ_ASSERT(mState != SHUTDOWN, "No data callback after shutdown"); uint32_t available = std::min(static_cast<uint32_t>(FramesToBytes(aFrames)), mBuffer.Length()); NS_ABORT_IF_FALSE(available % mBytesPerFrame == 0, "Must copy complete frames"); AudioDataValue* output = reinterpret_cast<AudioDataValue*>(aBuffer); uint32_t underrunFrames = 0; uint32_t servicedFrames = 0; int64_t insertTime; // NOTE: wasapi (others?) can call us back *after* stop()/Shutdown() (mState == SHUTDOWN) // Bug 996162 // callback tells us cubeb succeeded initializing if (mState == STARTED) { // For low-latency streams, we want to minimize any built-up data when // we start getting callbacks. // Simple version - contract on first callback only. if (mLatencyRequest == LowLatency) { #ifdef PR_LOGGING uint32_t old_len = mBuffer.Length(); #endif available = mBuffer.ContractTo(FramesToBytes(aFrames)); #ifdef PR_LOGGING TimeStamp now = TimeStamp::Now(); if (!mStartTime.IsNull()) { int64_t timeMs = (now - mStartTime).ToMilliseconds(); PR_LOG(gAudioStreamLog, PR_LOG_WARNING, ("Stream took %lldms to start after first Write() @ %u", timeMs, mOutRate)); } else { PR_LOG(gAudioStreamLog, PR_LOG_WARNING, ("Stream started before Write() @ %u", mOutRate)); } if (old_len != available) { // Note that we may have dropped samples in Write() as well! PR_LOG(gAudioStreamLog, PR_LOG_WARNING, ("AudioStream %p dropped %u + %u initial frames @ %u", this, mReadPoint, BytesToFrames(old_len - available), mOutRate)); mReadPoint += BytesToFrames(old_len - available); } #endif } mState = RUNNING; } if (available) { // When we are playing a low latency stream, and it is the first time we are // getting data from the buffer, we prefer to add the silence for an // underrun at the beginning of the buffer, so the first buffer is not cut // in half by the silence inserted to compensate for the underrun. if (mInRate == mOutRate) { if (mLatencyRequest == LowLatency && !mWritten) { servicedFrames = GetUnprocessedWithSilencePadding(output, aFrames, insertTime); } else { servicedFrames = GetUnprocessed(output, aFrames, insertTime); } } else { servicedFrames = GetTimeStretched(output, aFrames, insertTime); } float scaled_volume = float(GetVolumeScale() * mVolume); ScaleAudioSamples(output, aFrames * mOutChannels, scaled_volume); NS_ABORT_IF_FALSE(mBuffer.Length() % mBytesPerFrame == 0, "Must copy complete frames"); // Notify any blocked Write() call that more space is available in mBuffer. mon.NotifyAll(); } else { GetBufferInsertTime(insertTime); } underrunFrames = aFrames - servicedFrames; // Always send audible frames first, and silent frames later. // Otherwise it will break the assumption of FrameHistory. if (mState != DRAINING) { mAudioClock.UpdateFrameHistory(servicedFrames, underrunFrames); uint8_t* rpos = static_cast<uint8_t*>(aBuffer) + FramesToBytes(aFrames - underrunFrames); memset(rpos, 0, FramesToBytes(underrunFrames)); if (underrunFrames) { PR_LOG(gAudioStreamLog, PR_LOG_WARNING, ("AudioStream %p lost %d frames", this, underrunFrames)); } servicedFrames += underrunFrames; } else { mAudioClock.UpdateFrameHistory(servicedFrames, 0); } WriteDumpFile(mDumpFile, this, aFrames, aBuffer); // Don't log if we're not interested or if the stream is inactive if (PR_LOG_TEST(GetLatencyLog(), PR_LOG_DEBUG) && mState != SHUTDOWN && insertTime != INT64_MAX && servicedFrames > underrunFrames) { uint32_t latency = UINT32_MAX; if (cubeb_stream_get_latency(mCubebStream, &latency)) { NS_WARNING("Could not get latency from cubeb."); } TimeStamp now = TimeStamp::Now(); mLatencyLog->Log(AsyncLatencyLogger::AudioStream, reinterpret_cast<uint64_t>(this), insertTime, now); mLatencyLog->Log(AsyncLatencyLogger::Cubeb, reinterpret_cast<uint64_t>(mCubebStream.get()), (latency * 1000) / mOutRate, now); } return servicedFrames; }
ALuint WaveDecoder::read(ALvoid *ptr, ALuint count) { mFile->clear(); auto pos = mFile->tellg(); size_t len = count * mFrameSize; ALuint total = 0; if(pos < mEnd) { len = std::min<std::istream::pos_type>(len, mEnd-pos); #ifdef __BIG_ENDIAN__ switch(mSampleType) { case SampleType_Float32: while(total < len && mFile->good() && !mFile->eof()) { char temp[256]; size_t todo = std::min(len-total, sizeof(temp)); mFile->read(temp, todo); std::streamsize got = mFile->gcount(); for(std::streamsize i = 0;i < got;++i) reinterpret_cast<char*>(ptr)[total+i] = temp[i^3]; total += got; } total /= mFrameSize; break; case SampleType_Int16: while(total < len && mFile->good() && !mFile->eof()) { char temp[256]; size_t todo = std::min(len-total, sizeof(temp)); mFile->read(temp, todo); std::streamsize got = mFile->gcount(); for(std::streamsize i = 0;i < got;++i) reinterpret_cast<char*>(ptr)[total+i] = temp[i^1]; total += got; } total /= mFrameSize; break; case SampleType_UInt8: case SampleType_Mulaw: #else { #endif mFile->read(reinterpret_cast<char*>(ptr), len); total = mFile->gcount() / mFrameSize; } } return total; } SharedPtr<Decoder> WaveDecoderFactory::createDecoder(SharedPtr<std::istream> file) { ChannelConfig channels = ChannelConfig_Mono; SampleType type = SampleType_UInt8; ALuint frequency = 0; ALuint framesize = 0; ALuint loop_pts[2]{0, 0}; ALuint blockalign = 0; ALuint framealign = 0; char tag[4]{}; if(!file->read(tag, 4) || file->gcount() != 4 || memcmp(tag, "RIFF", 4) != 0) return SharedPtr<Decoder>(nullptr); ALuint totalsize = read_le32(*file) & ~1u; if(!file->read(tag, 4) || file->gcount() != 4 || memcmp(tag, "WAVE", 4) != 0) return SharedPtr<Decoder>(nullptr); while(file->good() && !file->eof() && totalsize > 8) { if(!file->read(tag, 4) || file->gcount() != 4) return SharedPtr<Decoder>(nullptr); ALuint size = read_le32(*file); if(size < 2) return SharedPtr<Decoder>(nullptr); totalsize -= 8; size = std::min((size+1) & ~1u, totalsize); totalsize -= size; if(memcmp(tag, "fmt ", 4) == 0) { /* 'fmt ' tag needs at least 16 bytes. */ if(size < 16) goto next_chunk; /* format type */ ALushort fmttype = read_le16(*file); size -= 2; /* mono or stereo data */ int chancount = read_le16(*file); size -= 2; /* sample frequency */ frequency = read_le32(*file); size -= 4; /* skip average bytes per second */ read_le32(*file); size -= 4; /* bytes per block */ blockalign = read_le16(*file); size -= 2; /* bits per sample */ int bitdepth = read_le16(*file); size -= 2; /* Look for any extra data and try to find the format */ ALuint extrabytes = 0; if(size >= 2) { extrabytes = read_le16(*file); size -= 2; } extrabytes = std::min<ALuint>(extrabytes, size); /* Format type should be 0x0001 for integer PCM data, 0x0003 for * float PCM data, 0x0007 for muLaw, and 0xFFFE extensible data. */ if(fmttype == 0x0001) { if(chancount == 1) channels = ChannelConfig_Mono; else if(chancount == 2) channels = ChannelConfig_Stereo; else goto next_chunk; if(bitdepth == 8) type = SampleType_UInt8; else if(bitdepth == 16) type = SampleType_Int16; else goto next_chunk; } else if(fmttype == 0x0003) { if(chancount == 1) channels = ChannelConfig_Mono; else if(chancount == 2) channels = ChannelConfig_Stereo; else goto next_chunk; if(bitdepth == 32) type = SampleType_Float32; else goto next_chunk; } else if(fmttype == 0x0007) { if(chancount == 1) channels = ChannelConfig_Mono; else if(chancount == 2) channels = ChannelConfig_Stereo; else goto next_chunk; if(bitdepth == 8) type = SampleType_Mulaw; else goto next_chunk; } else if(fmttype == 0xFFFE) { if(size < 22) goto next_chunk; char subtype[16]; ALushort validbits = read_le16(*file); size -= 2; ALuint chanmask = read_le32(*file); size -= 4; file->read(subtype, 16); size -= file->gcount(); /* Padded bit depths not supported */ if(validbits != bitdepth) goto next_chunk; if(memcmp(subtype, SUBTYPE_BFORMAT_PCM, 16) == 0 || memcmp(subtype, SUBTYPE_BFORMAT_FLOAT, 16) == 0) { if(chanmask != 0) goto next_chunk; if(chancount == 3) channels = ChannelConfig_BFmt_WXY; else if(chancount == 4) channels = ChannelConfig_BFmt_WXYZ; else goto next_chunk; } else if(memcmp(subtype, SUBTYPE_PCM, 16) == 0 || memcmp(subtype, SUBTYPE_FLOAT, 16) == 0) { if(chancount == 1 && chanmask == CHANNELS_MONO) channels = ChannelConfig_Mono; else if(chancount == 2 && chanmask == CHANNELS_STEREO) channels = ChannelConfig_Stereo; else if(chancount == 4 && chanmask == CHANNELS_QUAD) channels = ChannelConfig_Quad; else if(chancount == 6 && (chanmask == CHANNELS_5DOT1 || chanmask == CHANNELS_5DOT1_REAR)) channels = ChannelConfig_X51; else if(chancount == 7 && chanmask == CHANNELS_6DOT1) channels = ChannelConfig_X61; else if(chancount == 8 && chanmask == CHANNELS_7DOT1) channels = ChannelConfig_X71; else goto next_chunk; } if(memcmp(subtype, SUBTYPE_PCM, 16) == 0 || memcmp(subtype, SUBTYPE_BFORMAT_PCM, 16) == 0) { if(bitdepth == 8) type = SampleType_UInt8; else if(bitdepth == 16) type = SampleType_Int16; else goto next_chunk; } else if(memcmp(subtype, SUBTYPE_FLOAT, 16) == 0 || memcmp(subtype, SUBTYPE_BFORMAT_FLOAT, 16) == 0) { if(bitdepth == 32) type = SampleType_Float32; else goto next_chunk; } else goto next_chunk; } else goto next_chunk; framesize = FramesToBytes(1, channels, type); /* Calculate the number of frames per block (ADPCM will need extra * consideration). */ framealign = blockalign / framesize; } else if(memcmp(tag, "smpl", 4) == 0) { /* sampler data needs at least 36 bytes */ if(size < 36) goto next_chunk; /* Most of this only affects MIDI sampling, but we only care about * the loop definitions at the end. */ /*ALuint manufacturer =*/ read_le32(*file); /*ALuint product =*/ read_le32(*file); /*ALuint smpperiod =*/ read_le32(*file); /*ALuint unitynote =*/ read_le32(*file); /*ALuint pitchfrac =*/ read_le32(*file); /*ALuint smptefmt =*/ read_le32(*file); /*ALuint smpteoffset =*/ read_le32(*file); ALuint loopcount = read_le32(*file); /*ALuint extrabytes =*/ read_le32(*file); size -= 36; for(ALuint i = 0;i < loopcount && size >= 24;++i) { /*ALuint id =*/ read_le32(*file); ALuint type = read_le32(*file); ALuint loopstart = read_le32(*file); ALuint loopend = read_le32(*file); /*ALuint frac =*/ read_le32(*file); ALuint numloops = read_le32(*file); size -= 24; /* Only handle indefinite forward loops. */ if(type == 0 || numloops == 0) { loop_pts[0] = loopstart; loop_pts[1] = loopend; break; } } } else if(memcmp(tag, "data", 4) == 0) { if(framesize == 0) goto next_chunk; /* Make sure there's at least one sample frame of audio data. */ std::istream::pos_type start = file->tellg(); std::istream::pos_type end = start + std::istream::pos_type(size - (size%framesize)); if(end-start >= framesize) { /* Loop points are byte offsets relative to the data start. * Convert to sample frame offsets. */ return SharedPtr<Decoder>(new WaveDecoder(file, channels, type, frequency, framesize, start, end, loop_pts[0] / blockalign * framealign, loop_pts[1] / blockalign * framealign )); } } next_chunk: if(size > 0) file->ignore(size); } return SharedPtr<Decoder>(nullptr); }
size_t psm_record::calc_frame_bytes() const{ return FramesToBytes(1, context_->channels, context_->type); }