void AudioScheduledSourceHandler::start(double when, ExceptionState& exceptionState) { DCHECK(isMainThread()); context()->maybeRecordStartAttempt(); if (playbackState() != UNSCHEDULED_STATE) { exceptionState.throwDOMException(InvalidStateError, "cannot call start more than once."); return; } if (when < 0) { exceptionState.throwDOMException( InvalidAccessError, ExceptionMessages::indexExceedsMinimumBound("start time", when, 0.0)); return; } // The node is started. Add a reference to keep us alive so that audio will // eventually get played even if Javascript should drop all references to this // node. The reference will get dropped when the source has finished playing. context()->notifySourceNodeStartedProcessing(node()); // This synchronizes with process(). updateSchedulingInfo will read some of // the variables being set here. MutexLocker processLocker(m_processLock); // If |when| < currentTime, the source must start now according to the spec. // So just set startTime to currentTime in this case to start the source now. m_startTime = std::max(when, context()->currentTime()); setPlaybackState(SCHEDULED_STATE); }
bool AudioBufferSourceNode::setBuffer(AudioBuffer* buffer) { ASSERT(isMainThread()); // The context must be locked since changing the buffer can re-configure the number of channels that are output. AudioContext::AutoLocker contextLocker(context()); // This synchronizes with process(). MutexLocker processLocker(m_processLock); if (buffer) { // Do any necesssary re-configuration to the buffer's number of channels. unsigned numberOfChannels = buffer->numberOfChannels(); if (!numberOfChannels || numberOfChannels > 2) { // FIXME: implement multi-channel greater than stereo. return false; } output(0)->setNumberOfChannels(numberOfChannels); } m_virtualReadIndex = 0; m_buffer = buffer; return true; }
void WaveShaperProcessor::setCurve(Float32Array* curve) { // This synchronizes with process(). MutexLocker processLocker(m_processLock); m_curve = curve; }
void AudioBufferSourceNode::setBuffer(AudioBuffer* buffer, ExceptionState& exceptionState) { ASSERT(isMainThread()); // The context must be locked since changing the buffer can re-configure the number of channels that are output. AudioContext::AutoLocker contextLocker(context()); // This synchronizes with process(). MutexLocker processLocker(m_processLock); if (buffer) { // Do any necesssary re-configuration to the buffer's number of channels. unsigned numberOfChannels = buffer->numberOfChannels(); if (numberOfChannels > AudioContext::maxNumberOfChannels()) { exceptionState.throwTypeError("number of input channels (" + String::number(numberOfChannels) + ") exceeds maximum (" + String::number(AudioContext::maxNumberOfChannels()) + ")."); return; } output(0)->setNumberOfChannels(numberOfChannels); m_sourceChannels = adoptArrayPtr(new const float* [numberOfChannels]); m_destinationChannels = adoptArrayPtr(new float* [numberOfChannels]); for (unsigned i = 0; i < numberOfChannels; ++i) m_sourceChannels[i] = buffer->getChannelData(i)->data(); } m_virtualReadIndex = 0; m_buffer = buffer; }
bool AudioBufferSourceNode::setBuffer(AudioBuffer* buffer) { ASSERT(isMainThread()); // The context must be locked since changing the buffer can re-configure the number of channels that are output. AudioContext::AutoLocker contextLocker(context()); // This synchronizes with process(). MutexLocker processLocker(m_processLock); if (buffer) { // Do any necesssary re-configuration to the buffer's number of channels. unsigned numberOfChannels = buffer->numberOfChannels(); if (numberOfChannels > AudioContext::maxNumberOfChannels()) return false; output(0)->setNumberOfChannels(numberOfChannels); m_sourceChannels = adoptArrayPtr(new const float* [numberOfChannels]); m_destinationChannels = adoptArrayPtr(new float* [numberOfChannels]); for (unsigned i = 0; i < numberOfChannels; ++i) m_sourceChannels[i] = buffer->getChannelData(i)->data(); } m_virtualReadIndex = 0; m_buffer = buffer; return true; }
void AudioScheduledSourceHandler::stop(double when, ExceptionState& exceptionState) { DCHECK(isMainThread()); if (playbackState() == UNSCHEDULED_STATE) { exceptionState.throwDOMException( InvalidStateError, "cannot call stop without calling start first."); return; } if (when < 0) { exceptionState.throwDOMException( InvalidAccessError, ExceptionMessages::indexExceedsMinimumBound("stop time", when, 0.0)); return; } // This synchronizes with process() MutexLocker processLocker(m_processLock); // stop() can be called more than once, with the last call to stop taking // effect, unless the source has already stopped due to earlier calls to stop. // No exceptions are thrown in any case. when = std::max(0.0, when); m_endTime = when; }
void ScriptProcessorHandler::fireProcessEvent() { ASSERT(isMainThread()); bool isIndexGood = m_doubleBufferIndexForEvent < 2; ASSERT(isIndexGood); if (!isIndexGood) return; AudioBuffer* inputBuffer = m_inputBuffers[m_doubleBufferIndexForEvent].get(); AudioBuffer* outputBuffer = m_outputBuffers[m_doubleBufferIndexForEvent].get(); ASSERT(outputBuffer); if (!outputBuffer) return; // Avoid firing the event if the document has already gone away. if (node() && context() && context()->executionContext()) { // This synchronizes with process(). MutexLocker processLocker(m_processEventLock); // Calculate a playbackTime with the buffersize which needs to be processed each time onaudioprocess is called. // The outputBuffer being passed to JS will be played after exhuasting previous outputBuffer by double-buffering. double playbackTime = (context()->currentSampleFrame() + m_bufferSize) / static_cast<double>(context()->sampleRate()); // Call the JavaScript event handler which will do the audio processing. node()->dispatchEvent(AudioProcessingEvent::create(inputBuffer, outputBuffer, playbackTime)); } }
void PannerNode::setConeOuterGain(double angle) { if (coneOuterGain() == angle) return; // This synchronizes with process(). MutexLocker processLocker(m_processLock); m_coneEffect.setOuterGain(angle); markPannerAsDirty(PannerNode::DistanceConeGainDirty); }
void PannerNode::setRolloffFactor(double factor) { if (rolloffFactor() == factor) return; // This synchronizes with process(). MutexLocker processLocker(m_processLock); m_distanceEffect.setRolloffFactor(factor); markPannerAsDirty(PannerNode::DistanceConeGainDirty); }
void PannerNode::setMaxDistance(double distance) { if (maxDistance() == distance) return; // This synchronizes with process(). MutexLocker processLocker(m_processLock); m_distanceEffect.setMaxDistance(distance); markPannerAsDirty(PannerNode::DistanceConeGainDirty); }
void AudioBufferSourceHandler::setBuffer(AudioBuffer* buffer, ExceptionState& exceptionState) { ASSERT(isMainThread()); if (m_buffer) { exceptionState.throwDOMException( InvalidStateError, "Cannot set buffer after it has been already been set"); return; } // The context must be locked since changing the buffer can re-configure the number of channels that are output. AbstractAudioContext::AutoLocker contextLocker(context()); // This synchronizes with process(). MutexLocker processLocker(m_processLock); if (buffer) { // Do any necesssary re-configuration to the buffer's number of channels. unsigned numberOfChannels = buffer->numberOfChannels(); // This should not be possible since AudioBuffers can't be created with too many channels // either. if (numberOfChannels > AbstractAudioContext::maxNumberOfChannels()) { exceptionState.throwDOMException( NotSupportedError, ExceptionMessages::indexOutsideRange( "number of input channels", numberOfChannels, 1u, ExceptionMessages::InclusiveBound, AbstractAudioContext::maxNumberOfChannels(), ExceptionMessages::InclusiveBound)); return; } output(0).setNumberOfChannels(numberOfChannels); m_sourceChannels = wrapArrayUnique(new const float* [numberOfChannels]); m_destinationChannels = wrapArrayUnique(new float* [numberOfChannels]); for (unsigned i = 0; i < numberOfChannels; ++i) m_sourceChannels[i] = buffer->getChannelData(i)->data(); // If this is a grain (as set by a previous call to start()), validate the grain parameters // now since it wasn't validated when start was called (because there was no buffer then). if (m_isGrain) clampGrainParameters(buffer); } m_virtualReadIndex = 0; m_buffer = buffer; }
void PannerNode::setVelocity(float x, float y, float z) { FloatPoint3D velocity = FloatPoint3D(x, y, z); if (m_velocity == velocity) return; // This synchronizes with process(). MutexLocker processLocker(m_processLock); m_velocity = velocity; markPannerAsDirty(PannerNode::DopplerRateDirty); }
void PannerNode::setOrientation(float x, float y, float z) { FloatPoint3D orientation = FloatPoint3D(x, y, z); if (m_orientation == orientation) return; // This synchronizes with process(). MutexLocker processLocker(m_processLock); m_orientation = orientation; markPannerAsDirty(PannerNode::DistanceConeGainDirty); }
void PannerNode::setPosition(float x, float y, float z) { FloatPoint3D position = FloatPoint3D(x, y, z); if (m_position == position) return; // This synchronizes with process(). MutexLocker processLocker(m_processLock); m_position = position; markPannerAsDirty(PannerNode::AzimuthElevationDirty | PannerNode::DistanceConeGainDirty | PannerNode::DopplerRateDirty); }
void WaveShaperProcessor::setOversample(OverSampleType oversample) { // This synchronizes with process(). MutexLocker processLocker(m_processLock); m_oversample = oversample; if (oversample != OverSampleNone) { for (unsigned i = 0; i < m_kernels.size(); ++i) { WaveShaperDSPKernel* kernel = static_cast<WaveShaperDSPKernel*>(m_kernels[i].get()); kernel->lazyInitializeOversampling(); } } }
void BiquadDSPKernel::getFrequencyResponse(int nFrequencies, const float* frequencyHz, float* magResponse, float* phaseResponse) { bool isGood = nFrequencies > 0 && frequencyHz && magResponse && phaseResponse; ASSERT(isGood); if (!isGood) return; Vector<float> frequency(nFrequencies); double nyquist = this->nyquist(); // Convert from frequency in Hz to normalized frequency (0 -> 1), // with 1 equal to the Nyquist frequency. for (int k = 0; k < nFrequencies; ++k) frequency[k] = narrowPrecisionToFloat(frequencyHz[k] / nyquist); double cutoffFrequency; double Q; double gain; double detune; // in Cents { // Get a copy of the current biquad filter coefficients so we can update the biquad with // these values. We need to synchronize with process() to prevent process() from updating // the filter coefficients while we're trying to access them. The process will update it // next time around. // // The BiquadDSPKernel object here (along with it's Biquad object) is for querying the // frequency response and is NOT the same as the one in process() which is used for // performing the actual filtering. This one is is created in // BiquadProcessor::getFrequencyResponse for this purpose. Both, however, point to the same // BiquadProcessor object. // // FIXME: Simplify this: crbug.com/390266 MutexLocker processLocker(m_processLock); cutoffFrequency = biquadProcessor()->parameter1().value(); Q = biquadProcessor()->parameter2().value(); gain = biquadProcessor()->parameter3().value(); detune = biquadProcessor()->parameter4().value(); } updateCoefficients(cutoffFrequency, Q, gain, detune); m_biquad.getFrequencyResponse(nFrequencies, frequency.data(), magResponse, phaseResponse); }
bool PannerNode::setDistanceModel(unsigned model) { switch (model) { case DistanceEffect::ModelLinear: case DistanceEffect::ModelInverse: case DistanceEffect::ModelExponential: if (model != m_distanceModel) { // This synchronizes with process(). MutexLocker processLocker(m_processLock); m_distanceEffect.setModel(static_cast<DistanceEffect::ModelType>(model), true); m_distanceModel = model; } break; default: ASSERT_NOT_REACHED(); return false; } return true; }
bool PannerNode::setPanningModel(unsigned model) { switch (model) { case Panner::PanningModelEqualPower: case Panner::PanningModelHRTF: if (!m_panner.get() || model != m_panningModel) { // This synchronizes with process(). MutexLocker processLocker(m_processLock); OwnPtr<Panner> newPanner = Panner::create(model, sampleRate(), m_hrtfDatabaseLoader.get()); m_panner = newPanner.release(); m_panningModel = model; } break; default: ASSERT_NOT_REACHED(); return false; } return true; }
bool PannerNode::setPanningModel(unsigned model) { switch (model) { case EQUALPOWER: case HRTF: if (!m_panner.get() || model != m_panningModel) { // This synchronizes with process(). MutexLocker processLocker(m_pannerLock); OwnPtr<Panner> newPanner = Panner::create(model, sampleRate()); m_panner = newPanner.release(); m_panningModel = model; } break; case SOUNDFIELD: // FIXME: Implement sound field model. See // https://bugs.webkit.org/show_bug.cgi?id=77367. context()->scriptExecutionContext()->addConsoleMessage(JSMessageSource, WarningMessageLevel, "'soundfield' panning model not implemented."); break; default: return false; } return true; }
void AudioBufferSourceHandler::startSource(double when, double grainOffset, double grainDuration, bool isDurationGiven, ExceptionState& exceptionState) { ASSERT(isMainThread()); context()->recordUserGestureState(); if (playbackState() != UNSCHEDULED_STATE) { exceptionState.throwDOMException( InvalidStateError, "cannot call start more than once."); return; } if (when < 0) { exceptionState.throwDOMException( InvalidStateError, ExceptionMessages::indexExceedsMinimumBound( "start time", when, 0.0)); return; } if (grainOffset < 0) { exceptionState.throwDOMException( InvalidStateError, ExceptionMessages::indexExceedsMinimumBound( "offset", grainOffset, 0.0)); return; } if (grainDuration < 0) { exceptionState.throwDOMException( InvalidStateError, ExceptionMessages::indexExceedsMinimumBound( "duration", grainDuration, 0.0)); return; } // The node is started. Add a reference to keep us alive so that audio // will eventually get played even if Javascript should drop all references // to this node. The reference will get dropped when the source has finished // playing. context()->notifySourceNodeStartedProcessing(node()); // This synchronizes with process(). updateSchedulingInfo will read some of the variables being // set here. MutexLocker processLocker(m_processLock); m_isDurationGiven = isDurationGiven; m_isGrain = true; m_grainOffset = grainOffset; m_grainDuration = grainDuration; // If |when| < currentTime, the source must start now according to the spec. // So just set startTime to currentTime in this case to start the source now. m_startTime = std::max(when, context()->currentTime()); if (buffer()) clampGrainParameters(buffer()); setPlaybackState(SCHEDULED_STATE); }