AudioBus* AudioNodeInput::pull(AudioBus* inPlaceBus, size_t framesToProcess) { ASSERT(context()->isAudioThread()); // Handle single connection case. if (numberOfRenderingConnections() == 1 && node()->internalChannelCountMode() == AudioNode::Max) { // The output will optimize processing using inPlaceBus if it's able. AudioNodeOutput* output = this->renderingOutput(0); return output->pull(inPlaceBus, framesToProcess); } AudioBus* internalSummingBus = this->internalSummingBus(); if (!numberOfRenderingConnections()) { // At least, generate silence if we're not connected to anything. // FIXME: if we wanted to get fancy, we could propagate a 'silent hint' here to optimize the downstream graph processing. internalSummingBus->zero(); return internalSummingBus; } // Handle multiple connections case. sumAllConnections(internalSummingBus, framesToProcess); return internalSummingBus; }
//------------------------------------------------------------------------ tresult PLUGIN_API AGainSimple::setBusArrangements (SpeakerArrangement* inputs, int32 numIns, SpeakerArrangement* outputs, int32 numOuts) { if (numIns == 1 && numOuts == 1) { if (inputs[0] == SpeakerArr::kMono && outputs[0] == SpeakerArr::kMono) { AudioBus* bus = FCast<AudioBus> (audioInputs.at (0)); if (bus) { if (bus->getArrangement () != SpeakerArr::kMono) { removeAudioBusses (); addAudioInput (USTRING ("Mono In"), SpeakerArr::kMono); addAudioOutput (USTRING ("Mono Out"), SpeakerArr::kMono); } return kResultOk; } } else { AudioBus* bus = FCast<AudioBus> (audioInputs.at (0)); if (bus) { if (bus->getArrangement () != SpeakerArr::kStereo) { removeAudioBusses (); addAudioInput (USTRING ("Stereo In"), SpeakerArr::kStereo); addAudioOutput (USTRING ("Stereo Out"), SpeakerArr::kStereo); } return kResultOk; } } } return kResultFalse; }
void MediaStreamAudioSourceNode::process(size_t numberOfFrames) { AudioBus* outputBus = output(0)->bus(); if (!audioSourceProvider()) { outputBus->zero(); return; } if (!mediaStream() || m_sourceNumberOfChannels != outputBus->numberOfChannels()) { outputBus->zero(); return; } // Use a tryLock() to avoid contention in the real-time audio thread. // If we fail to acquire the lock then the MediaStream must be in the middle of // a format change, so we output silence in this case. MutexTryLocker tryLocker(m_processLock); if (tryLocker.locked()) audioSourceProvider()->provideInput(outputBus, numberOfFrames); else { // We failed to acquire the lock. outputBus->zero(); } }
void StereoPannerHandler::process(size_t framesToProcess) { AudioBus* outputBus = output(0).bus(); if (!isInitialized() || !input(0).isConnected() || !m_stereoPanner.get()) { outputBus->zero(); return; } AudioBus* inputBus = input(0).bus(); if (!inputBus) { outputBus->zero(); return; } if (m_pan->hasSampleAccurateValues()) { // Apply sample-accurate panning specified by AudioParam automation. ASSERT(framesToProcess <= m_sampleAccuratePanValues.size()); if (framesToProcess <= m_sampleAccuratePanValues.size()) { float* panValues = m_sampleAccuratePanValues.data(); m_pan->calculateSampleAccurateValues(panValues, framesToProcess); m_stereoPanner->panWithSampleAccurateValues(inputBus, outputBus, panValues, framesToProcess); } } else { m_stereoPanner->panToTargetValue(inputBus, outputBus, m_pan->value(), framesToProcess); } }
void MediaStreamAudioSourceNode::process(size_t numberOfFrames) { AudioBus* outputBus = output(0)->bus(); if (!audioSourceProvider()) { outputBus->zero(); return; } if (!mediaStream() || m_sourceNumberOfChannels != outputBus->numberOfChannels()) { outputBus->zero(); return; } // Use std::try_to_lock to avoid contention in the real-time audio thread. // If we fail to acquire the lock then the MediaStream must be in the middle of // a format change, so we output silence in this case. std::unique_lock<Lock> lock(m_processMutex, std::try_to_lock); if (!lock.owns_lock()) { // We failed to acquire the lock. outputBus->zero(); return; } audioSourceProvider()->provideInput(outputBus, numberOfFrames); }
void PannerNode::process(size_t framesToProcess) { AudioBus* destination = output(0)->bus(); if (!isInitialized() || !input(0)->isConnected() || !m_panner.get()) { destination->zero(); return; } AudioBus* source = input(0)->bus(); if (!source) { destination->zero(); return; } // Apply the panning effect. double azimuth; double elevation; getAzimuthElevation(&azimuth, &elevation); m_panner->pan(azimuth, elevation, source, destination, framesToProcess); // Get the distance and cone gain. double totalGain = distanceConeGain(); // Snap to desired gain at the beginning. if (m_lastGain == -1.0) m_lastGain = totalGain; // Apply gain in-place with de-zippering. destination->copyWithGainFrom(*destination, &m_lastGain, totalGain); }
void AudioBus::speakersSumFrom(const AudioBus& sourceBus) { // FIXME: Implement down mixing 5.1 to stereo. // https://bugs.webkit.org/show_bug.cgi?id=79192 unsigned numberOfSourceChannels = sourceBus.numberOfChannels(); unsigned numberOfDestinationChannels = numberOfChannels(); if (numberOfDestinationChannels == 2 && numberOfSourceChannels == 1) { // Handle mono -> stereo case (summing mono channel into both left and right). const AudioChannel* sourceChannel = sourceBus.channel(0); channel(0)->sumFrom(sourceChannel); channel(1)->sumFrom(sourceChannel); } else if (numberOfDestinationChannels == 1 && numberOfSourceChannels == 2) { // Handle stereo -> mono case. output += 0.5 * (input.L + input.R). AudioBus& sourceBusSafe = const_cast<AudioBus&>(sourceBus); const float* sourceL = sourceBusSafe.channelByType(ChannelLeft)->data(); const float* sourceR = sourceBusSafe.channelByType(ChannelRight)->data(); float* destination = channelByType(ChannelLeft)->mutableData(); float scale = 0.5; vsma(sourceL, 1, &scale, destination, 1, length()); vsma(sourceR, 1, &scale, destination, 1, length()); } else if (numberOfDestinationChannels == 6 && numberOfSourceChannels == 1) { // Handle mono -> 5.1 case, sum mono channel into center. channel(2)->sumFrom(sourceBus.channel(0)); } else if (numberOfDestinationChannels == 1 && numberOfSourceChannels == 6) { // Handle 5.1 -> mono case. speakersSumFrom5_1_ToMono(sourceBus); } else { // Fallback for unknown combinations. discreteSumFrom(sourceBus); } }
void MediaElementAudioSourceNode::process(size_t numberOfFrames) { AudioBus* outputBus = output(0)->bus(); if (!m_sourceNumberOfChannels || !m_sourceSampleRate) { outputBus->zero(); return; } // Use a std::try_to_lock to avoid contention in the real-time audio thread. // If we fail to acquire the lock then the HTMLMediaElement must be in the middle of // reconfiguring its playback engine, so we output silence in this case. std::unique_lock<Lock> lock(m_processMutex, std::try_to_lock); if (!lock.owns_lock()) { // We failed to acquire the lock. outputBus->zero(); return; } if (AudioSourceProvider* provider = mediaElement().audioSourceProvider()) { if (m_multiChannelResampler.get()) { ASSERT(m_sourceSampleRate != sampleRate()); m_multiChannelResampler->process(provider, outputBus, numberOfFrames); } else { // Bypass the resampler completely if the source is at the context's sample-rate. ASSERT(m_sourceSampleRate == sampleRate()); provider->provideInput(outputBus, numberOfFrames); } } else { // Either this port doesn't yet support HTMLMediaElement audio stream access, // or the stream is not yet available. outputBus->zero(); } }
void GainNode::process(size_t framesToProcess) { // FIXME: for some cases there is a nice optimization to avoid processing here, and let the gain change // happen in the summing junction input of the AudioNode we're connected to. // Then we can avoid all of the following: AudioBus* outputBus = output(0)->bus(); ASSERT(outputBus); if (!isInitialized() || !input(0)->isConnected()) outputBus->zero(); else { AudioBus* inputBus = input(0)->bus(); if (gain()->hasSampleAccurateValues()) { // Apply sample-accurate gain scaling for precise envelopes, grain windows, etc. ASSERT(framesToProcess <= m_sampleAccurateGainValues.size()); if (framesToProcess <= m_sampleAccurateGainValues.size()) { float* gainValues = m_sampleAccurateGainValues.data(); gain()->calculateSampleAccurateValues(gainValues, framesToProcess); outputBus->copyWithSampleAccurateGainValuesFrom(*inputBus, gainValues, framesToProcess); } } else { // Apply the gain with de-zippering into the output bus. outputBus->copyWithGainFrom(*inputBus, &m_lastGain, gain()->value()); } } }
tresult PLUGIN_API IPlugVST3Plugin::setBusArrangements(SpeakerArrangement* inputs, int32 numIns, SpeakerArrangement* outputs, int32 numOuts) { TRACE; // disconnect all io pins, they will be reconnected in process SetInputChannelConnections(0, NInChannels(), false); SetOutputChannelConnections(0, NOutChannels(), false); int32 reqNumInputChannels = SpeakerArr::getChannelCount(inputs[0]); //requested # input channels int32 reqNumOutputChannels = SpeakerArr::getChannelCount(outputs[0]);//requested # output channels // legal io doesn't consider sidechain inputs if (!LegalIO(reqNumInputChannels, reqNumOutputChannels)) { return kResultFalse; } // handle input AudioBus* bus = FCast<AudioBus>(audioInputs.at(0)); // if existing input bus has a different number of channels to the input bus being connected if (bus && SpeakerArr::getChannelCount(bus->getArrangement()) != reqNumInputChannels) { audioInputs.remove(bus); addAudioInput(USTRING("Input"), getSpeakerArrForChans(reqNumInputChannels)); } // handle output bus = FCast<AudioBus>(audioOutputs.at(0)); // if existing output bus has a different number of channels to the output bus being connected if (bus && SpeakerArr::getChannelCount(bus->getArrangement()) != reqNumOutputChannels) { audioOutputs.remove(bus); addAudioOutput(USTRING("Output"), getSpeakerArrForChans(reqNumOutputChannels)); } if (!mScChans && numIns == 1) // No sidechain, every thing OK { return kResultTrue; } if (mScChans && numIns == 2) // numIns = num Input BUSes { int32 reqNumSideChainChannels = SpeakerArr::getChannelCount(inputs[1]); //requested # sidechain input channels bus = FCast<AudioBus>(audioInputs.at(1)); if (bus && SpeakerArr::getChannelCount(bus->getArrangement()) != reqNumSideChainChannels) { audioInputs.remove(bus); addAudioInput(USTRING("Sidechain Input"), getSpeakerArrForChans(reqNumSideChainChannels), kAux, 0); // either mono or stereo } return kResultTrue; } return kResultFalse; }
//------------------------------------------------------------------------ tresult PLUGIN_API AudioEffect::getBusArrangement (BusDirection dir, int32 busIndex, SpeakerArrangement& arr) { BusList* busList = getBusList (kAudio, dir); AudioBus* audioBus = busList ? FCast<Vst::AudioBus> (busList->at (busIndex)) : 0; if (audioBus) { arr = audioBus->getArrangement (); return kResultTrue; } return kResultFalse; }
// Returns true if the channel count and frame-size match. bool AudioBus::topologyMatches(const AudioBus& bus) const { if (numberOfChannels() != bus.numberOfChannels()) return false; // channel mismatch // Make sure source bus has enough frames. if (length() > bus.length()) return false; // frame-size mismatch return true; }
void PowerMonitorNode::process(ContextRenderLock& r, size_t framesToProcess) { // deal with the output in case the power monitor node is embedded in a signal chain for some reason. // It's merely a pass through though. AudioBus* outputBus = output(0)->bus(r); if (!isInitialized() || !input(0)->isConnected()) { if (outputBus) outputBus->zero(); return; } AudioBus* bus = input(0)->bus(r); bool isBusGood = bus && bus->numberOfChannels() > 0 && bus->channel(0)->length() >= framesToProcess; if (!isBusGood) { outputBus->zero(); return; } // specific to this node { std::vector<const float*> channels; unsigned numberOfChannels = bus->numberOfChannels(); for (unsigned i = 0; i < numberOfChannels; ++i) { channels.push_back(bus->channel(i)->data()); } int start = framesToProcess - _windowSize; int end = framesToProcess; if (start < 0) start = 0; float power = 0; for (unsigned c = 0; c < numberOfChannels; ++c) for (int i = start; i < end; ++i) { float p = channels[c][i]; power += p * p; } float rms = sqrtf(power / (numberOfChannels * framesToProcess)); // Protect against accidental overload due to bad values in input stream const float kMinPower = 0.000125f; if (isinf(power) || isnan(power) || power < kMinPower) power = kMinPower; // db is 20 * log10(rms/Vref) where Vref is 1.0 _db = 20.0f * logf(rms) / logf(10.0f); } // to here // For in-place processing, our override of pullInputs() will just pass the audio data // through unchanged if the channel count matches from input to output // (resulting in inputBus == outputBus). Otherwise, do an up-mix to stereo. // if (bus != outputBus) outputBus->copyFrom(*bus); }
void AudioBufferSourceNode::process(size_t framesToProcess) { AudioBus* outputBus = output(0)->bus(); if (!isInitialized()) { outputBus->zero(); return; } // The audio thread can't block on this lock, so we call tryLock() instead. MutexTryLocker tryLocker(m_processLock); if (tryLocker.locked()) { if (!buffer()) { outputBus->zero(); return; } // After calling setBuffer() with a buffer having a different number of channels, there can in rare cases be a slight delay // before the output bus is updated to the new number of channels because of use of tryLocks() in the context's updating system. // In this case, if the the buffer has just been changed and we're not quite ready yet, then just output silence. if (numberOfChannels() != buffer()->numberOfChannels()) { outputBus->zero(); return; } size_t quantumFrameOffset; size_t bufferFramesToProcess; updateSchedulingInfo(framesToProcess, outputBus, quantumFrameOffset, bufferFramesToProcess); if (!bufferFramesToProcess) { outputBus->zero(); return; } for (unsigned i = 0; i < outputBus->numberOfChannels(); ++i) m_destinationChannels[i] = outputBus->channel(i)->mutableData(); // Render by reading directly from the buffer. if (!renderFromBuffer(outputBus, quantumFrameOffset, bufferFramesToProcess)) { outputBus->zero(); return; } outputBus->clearSilentFlag(); } else { // Too bad - the tryLock() failed. We must be in the middle of changing buffers and were already outputting silence anyway. outputBus->zero(); } }
void WebAudioBus::initialize(unsigned numberOfChannels, size_t length, double sampleRate) { #if ENABLE(WEB_AUDIO) AudioBus* audioBus = new AudioBus(numberOfChannels, length); audioBus->setSampleRate(sampleRate); if (m_private) delete m_private; m_private = static_cast<WebAudioBusPrivate*>(audioBus); #else ASSERT_NOT_REACHED(); #endif }
//------------------------------------------------------------------------ tresult PLUGIN_API AGain::setBusArrangements (SpeakerArrangement* inputs, int32 numIns, SpeakerArrangement* outputs, int32 numOuts) { if (numIns == 1 && numOuts == 1) { // the host wants Mono => Mono (or 1 channel -> 1 channel) if (SpeakerArr::getChannelCount (inputs[0]) == 1 && SpeakerArr::getChannelCount (outputs[0]) == 1) { AudioBus* bus = FCast<AudioBus> (audioInputs.at (0)); if (bus) { // check if we are Mono => Mono, if not we need to recreate the buses if (bus->getArrangement () != inputs[0]) { removeAudioBusses (); addAudioInput (STR16 ("Mono In"), inputs[0]); addAudioOutput (STR16 ("Mono Out"), inputs[0]); } return kResultOk; } } // the host wants something else than Mono => Mono, in this case we are always Stereo => Stereo else { AudioBus* bus = FCast<AudioBus> (audioInputs.at (0)); if (bus) { tresult result = kResultFalse; // the host wants 2->2 (could be LsRs -> LsRs) if (SpeakerArr::getChannelCount (inputs[0]) == 2 && SpeakerArr::getChannelCount (outputs[0]) == 2) { removeAudioBusses (); addAudioInput (STR16 ("Stereo In"), inputs[0]); addAudioOutput (STR16 ("Stereo Out"), outputs[0]); result = kResultTrue; } // the host want something different than 1->1 or 2->2 : in this case we want stereo else if (bus->getArrangement () != SpeakerArr::kStereo) { removeAudioBusses (); addAudioInput (STR16 ("Stereo In"), SpeakerArr::kStereo); addAudioOutput (STR16 ("Stereo Out"), SpeakerArr::kStereo); result = kResultFalse; } return result; } } } return kResultFalse; }
AudioBuffer::AudioBuffer(AudioBus& bus) : m_sampleRate(bus.sampleRate()) , m_length(bus.length()) { // Copy audio data from the bus to the Float32Arrays we manage. unsigned numberOfChannels = bus.numberOfChannels(); m_channels.reserveCapacity(numberOfChannels); for (unsigned i = 0; i < numberOfChannels; ++i) { auto channelDataArray = Float32Array::create(m_length); channelDataArray->setNeuterable(false); channelDataArray->setRange(bus.channel(i)->data(), m_length, 0); m_channels.append(WTFMove(channelDataArray)); } }
void MediaElementAudioSourceHandler::process(size_t numberOfFrames) { AudioBus* outputBus = output(0).bus(); if (!mediaElement() || !m_sourceNumberOfChannels || !m_sourceSampleRate) { outputBus->zero(); return; } // Use a tryLock() to avoid contention in the real-time audio thread. // If we fail to acquire the lock then the HTMLMediaElement must be in the middle of // reconfiguring its playback engine, so we output silence in this case. MutexTryLocker tryLocker(m_processLock); if (tryLocker.locked()) { if (AudioSourceProvider* provider = mediaElement()->audioSourceProvider()) { // Grab data from the provider so that the element continues to make progress, even if // we're going to output silence anyway. if (m_multiChannelResampler.get()) { ASSERT(m_sourceSampleRate != sampleRate()); m_multiChannelResampler->process(provider, outputBus, numberOfFrames); } else { // Bypass the resampler completely if the source is at the context's sample-rate. ASSERT(m_sourceSampleRate == sampleRate()); provider->provideInput(outputBus, numberOfFrames); } // Output silence if we don't have access to the element. if (!passesCORSAccessCheck()) { if (m_maybePrintCORSMessage) { // Print a CORS message, but just once for each change in the current media // element source, and only if we have a document to print to. m_maybePrintCORSMessage = false; if (context()->executionContext()) { context()->executionContext()->postTask(FROM_HERE, createCrossThreadTask(&MediaElementAudioSourceHandler::printCORSMessage, this, m_currentSrcString)); } } outputBus->zero(); } } else { // Either this port doesn't yet support HTMLMediaElement audio stream access, // or the stream is not yet available. outputBus->zero(); } } else { // We failed to acquire the lock. outputBus->zero(); } }
void AudioBus::discreteSumFrom(const AudioBus& sourceBus) { unsigned numberOfSourceChannels = sourceBus.numberOfChannels(); unsigned numberOfDestinationChannels = numberOfChannels(); if (numberOfDestinationChannels < numberOfSourceChannels) { // Down-mix by summing channels and dropping the remaining. for (unsigned i = 0; i < numberOfDestinationChannels; ++i) channel(i)->sumFrom(sourceBus.channel(i)); } else if (numberOfDestinationChannels > numberOfSourceChannels) { // Up-mix by summing as many channels as we have. for (unsigned i = 0; i < numberOfSourceChannels; ++i) channel(i)->sumFrom(sourceBus.channel(i)); } }
void AudioBufferSourceNode::process(size_t framesToProcess) { AudioBus* outputBus = output(0)->bus(); if (!isInitialized()) { outputBus->zero(); return; } // The audio thread can't block on this lock, so we call tryLock() instead. MutexTryLocker tryLocker(m_processLock); if (tryLocker.locked()) { if (!buffer()) { outputBus->zero(); return; } size_t quantumFrameOffset; size_t bufferFramesToProcess; updateSchedulingInfo(framesToProcess, outputBus, quantumFrameOffset, bufferFramesToProcess); if (!bufferFramesToProcess) { outputBus->zero(); return; } for (unsigned i = 0; i < outputBus->numberOfChannels(); ++i) m_destinationChannels[i] = outputBus->channel(i)->mutableData(); // Render by reading directly from the buffer. if (!renderFromBuffer(outputBus, quantumFrameOffset, bufferFramesToProcess)) { outputBus->zero(); return; } // Apply the gain (in-place) to the output bus. float totalGain = gain()->value() * m_buffer->gain(); outputBus->copyWithGainFrom(*outputBus, &m_lastGain, totalGain); outputBus->clearSilentFlag(); } else { // Too bad - the tryLock() failed. We must be in the middle of changing buffers and were already outputting silence anyway. outputBus->zero(); } }
void AudioBasicProcessorHandler::process(size_t framesToProcess) { AudioBus* destinationBus = output(0).bus(); if (!isInitialized() || !processor() || processor()->numberOfChannels() != numberOfChannels()) { destinationBus->zero(); } else { AudioBus* sourceBus = input(0).bus(); // FIXME: if we take "tail time" into account, then we can avoid calling processor()->process() once the tail dies down. if (!input(0).isConnected()) sourceBus->zero(); processor()->process(sourceBus, destinationBus, framesToProcess); } }
void MediaElementAudioSourceHandler::process(size_t numberOfFrames) { AudioBus* outputBus = output(0).bus(); // Use a tryLock() to avoid contention in the real-time audio thread. // If we fail to acquire the lock then the HTMLMediaElement must be in the // middle of reconfiguring its playback engine, so we output silence in this // case. MutexTryLocker tryLocker(m_processLock); if (tryLocker.locked()) { if (!mediaElement() || !m_sourceNumberOfChannels || !m_sourceSampleRate) { outputBus->zero(); return; } AudioSourceProvider& provider = mediaElement()->getAudioSourceProvider(); // Grab data from the provider so that the element continues to make // progress, even if we're going to output silence anyway. if (m_multiChannelResampler.get()) { DCHECK_NE(m_sourceSampleRate, sampleRate()); m_multiChannelResampler->process(&provider, outputBus, numberOfFrames); } else { // Bypass the resampler completely if the source is at the context's // sample-rate. DCHECK_EQ(m_sourceSampleRate, sampleRate()); provider.provideInput(outputBus, numberOfFrames); } // Output silence if we don't have access to the element. if (!passesCORSAccessCheck()) { if (m_maybePrintCORSMessage) { // Print a CORS message, but just once for each change in the current // media element source, and only if we have a document to print to. m_maybePrintCORSMessage = false; if (context()->getExecutionContext()) { context()->getExecutionContext()->postTask( BLINK_FROM_HERE, createCrossThreadTask( &MediaElementAudioSourceHandler::printCORSMessage, PassRefPtr<MediaElementAudioSourceHandler>(this), m_currentSrcString)); } } outputBus->zero(); } } else { // We failed to acquire the lock. outputBus->zero(); } }
void AudioBus::copyWithSampleAccurateGainValuesFrom(const AudioBus &sourceBus, float* gainValues, unsigned numberOfGainValues) { // Make sure we're processing from the same type of bus. // We *are* able to process from mono -> stereo if (sourceBus.numberOfChannels() != 1 && !topologyMatches(sourceBus)) { ASSERT_NOT_REACHED(); return; } if (!gainValues || numberOfGainValues > sourceBus.length()) { ASSERT_NOT_REACHED(); return; } if (sourceBus.length() == numberOfGainValues && sourceBus.length() == length() && sourceBus.isSilent()) { zero(); return; } // We handle both the 1 -> N and N -> N case here. const float* source = sourceBus.channel(0)->data(); for (unsigned channelIndex = 0; channelIndex < numberOfChannels(); ++channelIndex) { if (sourceBus.numberOfChannels() == numberOfChannels()) source = sourceBus.channel(channelIndex)->data(); float* destination = channel(channelIndex)->mutableData(); vmul(source, 1, gainValues, 1, destination, 1, numberOfGainValues); } }
void AnalyserHandler::process(size_t framesToProcess) { AudioBus* outputBus = output(0).bus(); if (!isInitialized() || !input(0).isConnected()) { outputBus->zero(); return; } AudioBus* inputBus = input(0).bus(); // Give the analyser the audio which is passing through this AudioNode. m_analyser.writeInput(inputBus, framesToProcess); // For in-place processing, our override of pullInputs() will just pass the audio data through unchanged if the channel count matches from input to output // (resulting in inputBus == outputBus). Otherwise, do an up-mix to stereo. if (inputBus != outputBus) outputBus->copyFrom(*inputBus); }
void AudioBus::speakersCopyFrom(const AudioBus& sourceBus) { // FIXME: Implement down mixing 5.1 to stereo. // https://bugs.webkit.org/show_bug.cgi?id=79192 unsigned numberOfSourceChannels = sourceBus.numberOfChannels(); unsigned numberOfDestinationChannels = numberOfChannels(); if (numberOfDestinationChannels == 2 && numberOfSourceChannels == 1) { // Handle mono -> stereo case (for now simply copy mono channel into both left and right) // FIXME: Really we should apply an equal-power scaling factor here, since we're effectively panning center... const AudioChannel* sourceChannel = sourceBus.channel(0); channel(0)->copyFrom(sourceChannel); channel(1)->copyFrom(sourceChannel); } else if (numberOfDestinationChannels == 1 && numberOfSourceChannels == 2) { // Handle stereo -> mono case. output = 0.5 * (input.L + input.R). AudioBus& sourceBusSafe = const_cast<AudioBus&>(sourceBus); const float* sourceL = sourceBusSafe.channelByType(ChannelLeft)->data(); const float* sourceR = sourceBusSafe.channelByType(ChannelRight)->data(); float* destination = channelByType(ChannelLeft)->mutableData(); vadd(sourceL, 1, sourceR, 1, destination, 1, length()); float scale = 0.5; vsmul(destination, 1, &scale, destination, 1, length()); } else if (numberOfDestinationChannels == 6 && numberOfSourceChannels == 1) { // Handle mono -> 5.1 case, copy mono channel to center. channel(2)->copyFrom(sourceBus.channel(0)); channel(0)->zero(); channel(1)->zero(); channel(3)->zero(); channel(4)->zero(); channel(5)->zero(); } else if (numberOfDestinationChannels == 1 && numberOfSourceChannels == 6) { // Handle 5.1 -> mono case. zero(); speakersSumFrom5_1_ToMono(sourceBus); } else { // Fallback for unknown combinations. discreteCopyFrom(sourceBus); } }
void PannerNode::process(size_t framesToProcess) { AudioBus* destination = output(0)->bus(); if (!isInitialized() || !input(0)->isConnected() || !m_panner.get()) { destination->zero(); return; } AudioBus* source = input(0)->bus(); if (!source) { destination->zero(); return; } // The audio thread can't block on this lock, so we use std::try_to_lock instead. std::unique_lock<std::mutex> lock(m_pannerMutex, std::try_to_lock); if (!lock.owns_lock()) { // Too bad - The try_lock() failed. We must be in the middle of changing the panner. destination->zero(); return; } // Apply the panning effect. double azimuth; double elevation; getAzimuthElevation(&azimuth, &elevation); m_panner->pan(azimuth, elevation, source, destination, framesToProcess); // Get the distance and cone gain. double totalGain = distanceConeGain(); // Snap to desired gain at the beginning. if (m_lastGain == -1.0) m_lastGain = totalGain; // Apply gain in-place with de-zippering. destination->copyWithGainFrom(*destination, &m_lastGain, totalGain); }
void ConvolverNode::process(size_t framesToProcess) { AudioBus* outputBus = output(0)->bus(); ASSERT(outputBus); // Synchronize with possible dynamic changes to the impulse response. MutexTryLocker tryLocker(m_processLock); if (tryLocker.locked()) { if (!isInitialized() || !m_reverb.get()) outputBus->zero(); else { // Process using the convolution engine. // Note that we can handle the case where nothing is connected to the input, in which case we'll just feed silence into the convolver. // FIXME: If we wanted to get fancy we could try to factor in the 'tail time' and stop processing once the tail dies down if // we keep getting fed silence. m_reverb->process(input(0)->bus(), outputBus, framesToProcess); } } else { // Too bad - the tryLock() failed. We must be in the middle of setting a new impulse response. outputBus->zero(); } }
void AudioBasicProcessorNode::process(size_t framesToProcess) { AudioBus* destinationBus = output(0)->bus(); // The realtime thread can't block on this lock, so we call tryLock() instead. if (m_processLock.tryLock()) { if (!isInitialized() || !processor()) destinationBus->zero(); else { AudioBus* sourceBus = input(0)->bus(); // FIXME: if we take "tail time" into account, then we can avoid calling processor()->process() once the tail dies down. if (!input(0)->isConnected()) sourceBus->zero(); processor()->process(sourceBus, destinationBus, framesToProcess); } m_processLock.unlock(); } else { // Too bad - the tryLock() failed. We must be in the middle of re-connecting and were already outputting silence anyway... destinationBus->zero(); } }
void AudioBus::sumFrom(const AudioBus& sourceBus, ChannelInterpretation channelInterpretation) { if (&sourceBus == this) return; unsigned numberOfSourceChannels = sourceBus.numberOfChannels(); unsigned numberOfDestinationChannels = numberOfChannels(); if (numberOfDestinationChannels == numberOfSourceChannels) { for (unsigned i = 0; i < numberOfSourceChannels; ++i) channel(i)->sumFrom(sourceBus.channel(i)); } else { switch (channelInterpretation) { case Speakers: speakersSumFrom(sourceBus); break; case Discrete: discreteSumFrom(sourceBus); break; default: ASSERT_NOT_REACHED(); } } }
void AudioGainNode::process(size_t framesToProcess) { // FIXME: for some cases there is a nice optimization to avoid processing here, and let the gain change // happen in the summing junction input of the AudioNode we're connected to. // Then we can avoid all of the following: AudioBus* outputBus = output(0)->bus(); ASSERT(outputBus); // The realtime thread can't block on this lock, so we call tryLock() instead. if (m_processLock.tryLock()) { if (!isInitialized() || !input(0)->isConnected()) outputBus->zero(); else { AudioBus* inputBus = input(0)->bus(); if (gain()->hasTimelineValues()) { // Apply sample-accurate gain scaling for precise envelopes, grain windows, etc. ASSERT(framesToProcess <= m_sampleAccurateGainValues.size()); if (framesToProcess <= m_sampleAccurateGainValues.size()) { float* gainValues = m_sampleAccurateGainValues.data(); gain()->calculateSampleAccurateValues(gainValues, framesToProcess); outputBus->copyWithSampleAccurateGainValuesFrom(*inputBus, gainValues, framesToProcess); } } else { // Apply the gain with de-zippering into the output bus. outputBus->copyWithGainFrom(*inputBus, &m_lastGain, gain()->value()); } } m_processLock.unlock(); } else { // Too bad - the tryLock() failed. We must be in the middle of re-connecting and were already outputting silence anyway... outputBus->zero(); } }