AbstractAudioContext* AudioContext::create(Document& document, ExceptionState& exceptionState) { ASSERT(isMainThread()); if (s_hardwareContextCount >= MaxHardwareContexts) { exceptionState.throwDOMException( NotSupportedError, ExceptionMessages::indexExceedsMaximumBound( "number of hardware contexts", s_hardwareContextCount, MaxHardwareContexts)); return nullptr; } AudioContext* audioContext = new AudioContext(document); audioContext->suspendIfNeeded(); // This starts the audio thread. The destination node's // provideInput() method will now be called repeatedly to render // audio. Each time provideInput() is called, a portion of the // audio stream is rendered. Let's call this time period a "render // quantum". NOTE: for now AudioContext does not need an explicit // startRendering() call from JavaScript. We may want to consider // requiring it for symmetry with OfflineAudioContext. audioContext->startRendering(); ++s_hardwareContextCount; #if DEBUG_AUDIONODE_REFERENCES fprintf(stderr, "%p: AudioContext::AudioContext(): %u #%u\n", audioContext, audioContext->m_contextId, s_hardwareContextCount); #endif return audioContext; }
AudioContext* AudioContext::create(Document& document, ExceptionState& exceptionState) { ASSERT(isMainThread()); if (s_hardwareContextCount >= MaxHardwareContexts) { exceptionState.throwDOMException( NotSupportedError, ExceptionMessages::indexExceedsMaximumBound( "number of hardware contexts", s_hardwareContextCount, MaxHardwareContexts)); return nullptr; } AudioContext* audioContext = new AudioContext(&document); audioContext->suspendIfNeeded(); return audioContext; }