Ejemplo n.º 1
0
AbstractAudioContext* AudioContext::create(Document& document, ExceptionState& exceptionState)
{
    ASSERT(isMainThread());
    if (s_hardwareContextCount >= MaxHardwareContexts) {
        exceptionState.throwDOMException(
            NotSupportedError,
            ExceptionMessages::indexExceedsMaximumBound(
                "number of hardware contexts",
                s_hardwareContextCount,
                MaxHardwareContexts));
        return nullptr;
    }

    AudioContext* audioContext = new AudioContext(document);
    audioContext->suspendIfNeeded();

    // This starts the audio thread. The destination node's
    // provideInput() method will now be called repeatedly to render
    // audio.  Each time provideInput() is called, a portion of the
    // audio stream is rendered. Let's call this time period a "render
    // quantum". NOTE: for now AudioContext does not need an explicit
    // startRendering() call from JavaScript.  We may want to consider
    // requiring it for symmetry with OfflineAudioContext.
    audioContext->startRendering();
    ++s_hardwareContextCount;
#if DEBUG_AUDIONODE_REFERENCES
    fprintf(stderr, "%p: AudioContext::AudioContext(): %u #%u\n",
        audioContext, audioContext->m_contextId, s_hardwareContextCount);
#endif

    return audioContext;
}
Ejemplo n.º 2
0
WaveShaperNode::WaveShaperNode(AudioContext& context)
    : AudioBasicProcessorNode(context, context.sampleRate())
{
    m_processor = std::make_unique<WaveShaperProcessor>(context.sampleRate(), 1);
    setNodeType(NodeTypeWaveShaper);

    initialize();
}
Ejemplo n.º 3
0
void AudioContext::uninitializeDispatch(void* userData)
{
    AudioContext* context = reinterpret_cast<AudioContext*>(userData);
    ASSERT(context);
    if (!context)
        return;

    context->uninitialize();
}
Ejemplo n.º 4
0
void AudioContext::deleteMarkedNodesDispatch(void* userData)
{
    AudioContext* context = reinterpret_cast<AudioContext*>(userData);
    ASSERT(context);
    if (!context)
        return;

    context->deleteMarkedNodes();
    context->deref();
}
Ejemplo n.º 5
0
      // Sets up |output| iff buffers are set in event handlers.
      void DispatchAudioProcessEvent(ScriptProcessorNode* aNode,
                                     AudioChunk* aOutput)
      {
        AudioContext* context = aNode->Context();
        if (!context) {
          return;
        }

        AutoJSAPI jsapi;
        if (NS_WARN_IF(!jsapi.Init(aNode->GetOwner()))) {
          return;
        }
        JSContext* cx = jsapi.cx();
        uint32_t inputChannelCount = aNode->ChannelCount();

        // Create the input buffer
        RefPtr<AudioBuffer> inputBuffer;
        if (mInputBuffer) {
          ErrorResult rv;
          inputBuffer =
            AudioBuffer::Create(context->GetOwner(), inputChannelCount,
                                aNode->BufferSize(), context->SampleRate(),
                                mInputBuffer.forget(), rv);
          if (rv.Failed()) {
            rv.SuppressException();
            return;
          }
        }

        // Ask content to produce data in the output buffer
        // Note that we always avoid creating the output buffer here, and we try to
        // avoid creating the input buffer as well.  The AudioProcessingEvent class
        // knows how to lazily create them if needed once the script tries to access
        // them.  Otherwise, we may be able to get away without creating them!
        RefPtr<AudioProcessingEvent> event =
          new AudioProcessingEvent(aNode, nullptr, nullptr);
        event->InitEvent(inputBuffer, inputChannelCount, mPlaybackTime);
        aNode->DispatchTrustedEvent(event);

        // Steal the output buffers if they have been set.
        // Don't create a buffer if it hasn't been used to return output;
        // FinishProducingOutputBuffer() will optimize output = null.
        // GetThreadSharedChannelsForRate() may also return null after OOM.
        if (event->HasOutputBuffer()) {
          ErrorResult rv;
          AudioBuffer* buffer = event->GetOutputBuffer(rv);
          // HasOutputBuffer() returning true means that GetOutputBuffer()
          // will not fail.
          MOZ_ASSERT(!rv.Failed());
          *aOutput = buffer->GetThreadSharedChannelsForRate(cx);
          MOZ_ASSERT(aOutput->IsNull() ||
                     aOutput->mBufferFormat == AUDIO_FORMAT_FLOAT32,
                     "AudioBuffers initialized from JS have float data");
        }
      }
/* static */
already_AddRefed<AudioNodeExternalInputStream>
AudioNodeExternalInputStream::Create(MediaStreamGraph* aGraph,
                                     AudioNodeEngine* aEngine) {
  AudioContext* ctx = aEngine->NodeMainThread()->Context();
  MOZ_ASSERT(NS_IsMainThread());
  MOZ_ASSERT(aGraph->GraphRate() == ctx->SampleRate());

  RefPtr<AudioNodeExternalInputStream> stream =
      new AudioNodeExternalInputStream(aEngine, aGraph->GraphRate());
  stream->mSuspendedCount += ctx->ShouldSuspendNewStream();
  aGraph->AddStream(stream);
  return stream.forget();
}
Ejemplo n.º 7
0
v8::Handle<v8::Value> V8AudioContext::createBufferCallback(const v8::Arguments& args)
{
    if (args.Length() < 2)
        return throwError("Not enough arguments", V8Proxy::SyntaxError);

    AudioContext* audioContext = toNative(args.Holder());
    ASSERT(audioContext);

    v8::Handle<v8::Value> arg = args[0];
    
    // AudioBuffer createBuffer(in ArrayBuffer buffer, in boolean mixToMono);
    if (V8ArrayBuffer::HasInstance(arg)) {
        v8::Handle<v8::Object> object = v8::Handle<v8::Object>::Cast(arg);
        ArrayBuffer* arrayBuffer = V8ArrayBuffer::toNative(object);
        ASSERT(arrayBuffer);

        if (arrayBuffer) {
            bool mixToMono = args[1]->ToBoolean()->Value();

            RefPtr<AudioBuffer> audioBuffer = audioContext->createBuffer(arrayBuffer, mixToMono);
            if (!audioBuffer.get())
                return throwError("Error decoding audio file data", V8Proxy::SyntaxError);

            return toV8(audioBuffer.get());
        }
        
        return v8::Undefined();
    }
    
    // AudioBuffer createBuffer(in unsigned long numberOfChannels, in unsigned long numberOfFrames, in float sampleRate);
    if (args.Length() < 3)
        return throwError("Not enough arguments", V8Proxy::SyntaxError);

    bool ok = false;
    
    int32_t numberOfChannels = toInt32(args[0], ok);
    if (!ok || numberOfChannels <= 0 || numberOfChannels > 10)
        return throwError("Invalid number of channels", V8Proxy::SyntaxError);
    
    int32_t numberOfFrames = toInt32(args[1], ok);
    if (!ok || numberOfFrames <= 0)
        return throwError("Invalid number of frames", V8Proxy::SyntaxError);
    
    float sampleRate = toFloat(args[2]);
    
    RefPtr<AudioBuffer> audioBuffer = audioContext->createBuffer(numberOfChannels, numberOfFrames, sampleRate);
    if (!audioBuffer.get())
        return throwError("Error creating AudioBuffer", V8Proxy::SyntaxError);

    return toV8(audioBuffer.get());
}
JSValue JSAudioContext::createBuffer(ExecState* exec)
{
    if (exec->argumentCount() < 2)
        return throwError(exec, createSyntaxError(exec, "Not enough arguments"));

    AudioContext* audioContext = static_cast<AudioContext*>(impl());
    ASSERT(audioContext);

    // AudioBuffer createBuffer(in ArrayBuffer buffer, in boolean mixToMono);
    JSValue val = exec->argument(0);
    if (val.inherits(&JSArrayBuffer::s_info)) {
        ArrayBuffer* arrayBuffer = toArrayBuffer(val);
        ASSERT(arrayBuffer);
        if (arrayBuffer) {
            bool mixToMono = exec->argument(1).toBoolean(exec);

            RefPtr<AudioBuffer> audioBuffer = audioContext->createBuffer(arrayBuffer, mixToMono);
            if (!audioBuffer.get())
                return throwError(exec, createSyntaxError(exec, "Error decoding audio file data"));

            return toJS(exec, globalObject(), audioBuffer.get());
        }

        return jsUndefined();
    }
    
    // AudioBuffer createBuffer(in unsigned long numberOfChannels, in unsigned long numberOfFrames, in float sampleRate);
    if (exec->argumentCount() < 3)
        return throwError(exec, createSyntaxError(exec, "Not enough arguments"));
    
    int32_t numberOfChannels = exec->argument(0).toInt32(exec);
    int32_t numberOfFrames = exec->argument(1).toInt32(exec);
    float sampleRate = exec->argument(2).toFloat(exec);

    if (numberOfChannels <= 0 || numberOfChannels > 10)
        return throwError(exec, createSyntaxError(exec, "Invalid number of channels"));

    if (numberOfFrames <= 0)
        return throwError(exec, createSyntaxError(exec, "Invalid number of frames"));

    if (sampleRate <= 0)
        return throwError(exec, createSyntaxError(exec, "Invalid sample rate"));

    RefPtr<AudioBuffer> audioBuffer = audioContext->createBuffer(numberOfChannels, numberOfFrames, sampleRate);
    if (!audioBuffer.get())
        return throwError(exec, createSyntaxError(exec, "Error creating AudioBuffer"));

    return toJS(exec, globalObject(), audioBuffer.get());
}
Ejemplo n.º 9
0
AudioContext* AudioContext::create(Document& document, ExceptionState& exceptionState)
{
    ASSERT(isMainThread());
    if (s_hardwareContextCount >= MaxHardwareContexts) {
        exceptionState.throwDOMException(
            NotSupportedError,
            ExceptionMessages::indexExceedsMaximumBound(
                "number of hardware contexts",
                s_hardwareContextCount,
                MaxHardwareContexts));
        return nullptr;
    }

    AudioContext* audioContext = new AudioContext(&document);
    audioContext->suspendIfNeeded();
    return audioContext;
}
Ejemplo n.º 10
0
float AudioParamTimeline::valueForContextTime(AudioContext& context, float defaultValue, bool& hasValue)
{
    {
        std::unique_lock<Lock> lock(m_eventsMutex, std::try_to_lock);
        if (!lock.owns_lock() || !m_events.size() || context.currentTime() < m_events[0].time()) {
            hasValue = false;
            return defaultValue;
        }
    }

    // Ask for just a single value.
    float value;
    double sampleRate = context.sampleRate();
    double startTime = context.currentTime();
    double endTime = startTime + 1.1 / sampleRate; // time just beyond one sample-frame
    double controlRate = sampleRate / AudioNode::ProcessingSizeInFrames; // one parameter change per render quantum
    value = valuesForTimeRange(startTime, endTime, defaultValue, &value, 1, sampleRate, controlRate);

    hasValue = true;
    return value;
}
MediaElementAudioSourceNode::MediaElementAudioSourceNode(AudioContext& context, HTMLMediaElement& mediaElement)
    : AudioNode(context, context.sampleRate())
    , m_mediaElement(mediaElement)
    , m_sourceNumberOfChannels(0)
    , m_sourceSampleRate(0)
{
    // Default to stereo. This could change depending on what the media element .src is set to.
    addOutput(std::make_unique<AudioNodeOutput>(this, 2));

    setNodeType(NodeTypeMediaElementAudioSource);

    initialize();
}
/* static */ already_AddRefed<MediaStreamAudioDestinationNode>
MediaStreamAudioDestinationNode::Create(AudioContext& aAudioContext,
                                        const AudioNodeOptions& aOptions,
                                        ErrorResult& aRv)
{
  if (aAudioContext.IsOffline()) {
    aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
    return nullptr;
  }

  if (aAudioContext.CheckClosed(aRv)) {
    return nullptr;
  }

  RefPtr<MediaStreamAudioDestinationNode> audioNode =
    new MediaStreamAudioDestinationNode(&aAudioContext);

  audioNode->Initialize(aOptions, aRv);
  if (NS_WARN_IF(aRv.Failed())) {
    return nullptr;
  }

  return audioNode.forget();
}
Ejemplo n.º 13
0
/* static */ already_AddRefed<AudioBuffer>
AudioBuffer::Constructor(const GlobalObject& aGlobal,
                         AudioContext& aAudioContext,
                         const AudioBufferOptions& aOptions,
                         ErrorResult& aRv)
{
  if (!aOptions.mNumberOfChannels) {
    aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
    return nullptr;
  }

  float sampleRate = aOptions.mSampleRate.WasPassed()
                       ? aOptions.mSampleRate.Value()
                       : aAudioContext.SampleRate();
  return Create(&aAudioContext, aOptions.mNumberOfChannels, aOptions.mLength,
                sampleRate, aRv);
}
Ejemplo n.º 14
0
Ref<AudioBuffer> AudioBuffer::read(AudioContext& context, const std::string& sampleName)
{
  ResourceCache& cache = context.cache();

  std::string name;
  name += "sample:";
  name += sampleName;

  if (Ref<AudioBuffer> buffer = cache.find<AudioBuffer>(name))
    return buffer;

  Ref<Sample> data = Sample::read(cache, sampleName);
  if (!data)
  {
    logError("Failed to read sample for buffer %s", name.c_str());
    return nullptr;
  }

  return create(ResourceInfo(cache, name), context, *data);
}
Ejemplo n.º 15
0
PannerNode::PannerNode(AudioContext& context, float sampleRate)
    : AudioNode(context, sampleRate)
    , m_panningModel(PanningModelType::HRTF)
    , m_lastGain(-1.0)
    , m_connectionCount(0)
{
    // Load the HRTF database asynchronously so we don't block the Javascript thread while creating the HRTF database.
    m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(context.sampleRate());

    addInput(std::make_unique<AudioNodeInput>(this));
    addOutput(std::make_unique<AudioNodeOutput>(this, 2));

    // Node-specific default mixing rules.
    m_channelCount = 2;
    m_channelCountMode = ClampedMax;
    m_channelInterpretation = AudioBus::Speakers;

    m_distanceGain = AudioParam::create(context, "distanceGain", 1.0, 0.0, 1.0);
    m_coneGain = AudioParam::create(context, "coneGain", 1.0, 0.0, 1.0);

    m_position = FloatPoint3D(0, 0, 0);
    m_orientation = FloatPoint3D(1, 0, 0);
    m_velocity = FloatPoint3D(0, 0, 0);

    setNodeType(NodeTypePanner);

    initialize();
}
WaveShaperNode::WaveShaperNode(AudioContext& context)
    : AudioNode(context)
{
    setHandler(AudioBasicProcessorHandler::create(AudioHandler::NodeTypeWaveShaper, *this, context.sampleRate(), adoptPtr(new WaveShaperProcessor(context.sampleRate(), 1))));

    handler().initialize();
}