Example #1
0
WaveShaperNode::WaveShaperNode(AudioContext& context)
    : AudioBasicProcessorNode(context, context.sampleRate())
{
    m_processor = std::make_unique<WaveShaperProcessor>(context.sampleRate(), 1);
    setNodeType(NodeTypeWaveShaper);

    initialize();
}
MediaElementAudioSourceNode::MediaElementAudioSourceNode(AudioContext& context, HTMLMediaElement& mediaElement)
    : AudioNode(context, context.sampleRate())
    , m_mediaElement(mediaElement)
    , m_sourceNumberOfChannels(0)
    , m_sourceSampleRate(0)
{
    // Default to stereo. This could change depending on what the media element .src is set to.
    addOutput(std::make_unique<AudioNodeOutput>(this, 2));

    setNodeType(NodeTypeMediaElementAudioSource);

    initialize();
}
float AudioParamTimeline::valueForContextTime(AudioContext& context, float defaultValue, bool& hasValue)
{
    {
        std::unique_lock<Lock> lock(m_eventsMutex, std::try_to_lock);
        if (!lock.owns_lock() || !m_events.size() || context.currentTime() < m_events[0].time()) {
            hasValue = false;
            return defaultValue;
        }
    }

    // Ask for just a single value.
    float value;
    double sampleRate = context.sampleRate();
    double startTime = context.currentTime();
    double endTime = startTime + 1.1 / sampleRate; // time just beyond one sample-frame
    double controlRate = sampleRate / AudioNode::ProcessingSizeInFrames; // one parameter change per render quantum
    value = valuesForTimeRange(startTime, endTime, defaultValue, &value, 1, sampleRate, controlRate);

    hasValue = true;
    return value;
}
Example #4
0
PannerNode::PannerNode(AudioContext& context, float sampleRate)
    : AudioNode(context, sampleRate)
    , m_panningModel(PanningModelType::HRTF)
    , m_lastGain(-1.0)
    , m_connectionCount(0)
{
    // Load the HRTF database asynchronously so we don't block the Javascript thread while creating the HRTF database.
    m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(context.sampleRate());

    addInput(std::make_unique<AudioNodeInput>(this));
    addOutput(std::make_unique<AudioNodeOutput>(this, 2));

    // Node-specific default mixing rules.
    m_channelCount = 2;
    m_channelCountMode = ClampedMax;
    m_channelInterpretation = AudioBus::Speakers;

    m_distanceGain = AudioParam::create(context, "distanceGain", 1.0, 0.0, 1.0);
    m_coneGain = AudioParam::create(context, "coneGain", 1.0, 0.0, 1.0);

    m_position = FloatPoint3D(0, 0, 0);
    m_orientation = FloatPoint3D(1, 0, 0);
    m_velocity = FloatPoint3D(0, 0, 0);

    setNodeType(NodeTypePanner);

    initialize();
}
WaveShaperNode::WaveShaperNode(AudioContext& context)
    : AudioNode(context)
{
    setHandler(AudioBasicProcessorHandler::create(AudioHandler::NodeTypeWaveShaper, *this, context.sampleRate(), adoptPtr(new WaveShaperProcessor(context.sampleRate(), 1))));

    handler().initialize();
}