示例#1
0
    // This variant starts a sound at a given offset relative to the beginning of the
    // sample, ends it an offfset (relative to the beginning), and optional delays
    // the start. If 0 is passed as end, then the sound will play to the end.
    std::shared_ptr<AudioBufferSourceNode> SoundBuffer::play(ContextRenderLock& r, float start, float end, float when)
    {
        auto ac = r.context();
        if (audioBuffer && ac) {
            if (end == 0)
                end = audioBuffer->duration();
            
            auto ac = r.context();
            if (!ac)
                return nullptr;
            
            std::shared_ptr<AudioBufferSourceNode> sourceBufferNode(new AudioBufferSourceNode(ac->destination()->sampleRate()));
            
            // Connect the source node to the parsed audio data for playback
            sourceBufferNode->setBuffer(r, audioBuffer);
            
            // bus the sound to the mixer.
            sourceBufferNode->connect(ac, ac->destination().get(), 0, 0);
            sourceBufferNode->startGrain(when, start, end - start);
            ac->holdSourceNodeUntilFinished(sourceBufferNode);

            return sourceBufferNode;
        }
        return nullptr;
    }
示例#2
0
void AudioNode::processIfNecessary(ContextRenderLock& r, size_t framesToProcess)
{
    if (!isInitialized())
        return;
    
    auto ac = r.context();
    if (!ac)
        return;
    
    // Ensure that we only process once per rendering quantum.
    // This handles the "fanout" problem where an output is connected to multiple inputs.
    // The first time we're called during this time slice we process, but after that we don't want to re-process,
    // instead our output(s) will already have the results cached in their bus;
    double currentTime = ac->currentTime();
    if (m_lastProcessingTime != currentTime) {
        m_lastProcessingTime = currentTime; // important to first update this time because of feedback loops in the rendering graph

        pullInputs(r, framesToProcess);

        bool silentInputs = inputsAreSilent(r);
        if (!silentInputs)
            m_lastNonSilentTime = (ac->currentSampleFrame() + framesToProcess) / static_cast<double>(m_sampleRate);

        bool ps = propagatesSilence(r.context()->currentTime());
        if (silentInputs && ps)
            silenceOutputs(r);
        else {
            process(r, framesToProcess);
            unsilenceOutputs(r);
        }
    }
}
示例#3
0
	// Output to the default context output 
	std::shared_ptr<AudioBufferSourceNode> SoundBuffer::play(ContextRenderLock& r, float when)
	{
		if (audioBuffer && r.context())
		{
			return play(r, r.context()->destination(), when);
		}
        return nullptr;
	}
示例#4
0
void AudioParam::calculateTimelineValues(ContextRenderLock& r, float* values, unsigned numberOfValues)
{
    // Calculate values for this render quantum.
    // Normally numberOfValues will equal AudioNode::ProcessingSizeInFrames (the render quantum size).
    double sampleRate = r.context()->sampleRate();
    double startTime = r.context()->currentTime();
    double endTime = startTime + numberOfValues / sampleRate;

    // Note we're running control rate at the sample-rate.
    // Pass in the current value as default value.
    m_value = m_timeline.valuesForTimeRange(startTime, endTime, narrowPrecisionToFloat(m_value), values, numberOfValues, sampleRate, sampleRate);
}
示例#5
0
 bool ADSRNode::finished(ContextRenderLock& r)
 {
     if (!r.context())
         return true;
     
     double now = r.context()->currentTime();
     
     if (now > internalNode->m_noteOffTime)
     {
         internalNode->m_noteOffTime = 0;
     }
     
     return now > internalNode->m_noteOffTime;
 }
示例#6
0
// FIXME: this can go away when we do mixing with gain directly in summing junction of AudioNodeInput
//
// As soon as we know the channel count of our input, we can lazily initialize.
// Sometimes this may be called more than once with different channel counts, in which case we must safely
// uninitialize and then re-initialize with the new channel count.
void GainNode::checkNumberOfChannelsForInput(ContextRenderLock& r, AudioNodeInput* input)
{
    if (!input)
        return;

    ASSERT(r.context());

    if (input != this->input(0).get())
        return;
        
    unsigned numberOfChannels = input->numberOfChannels(r);
    
    if (isInitialized() && numberOfChannels != output(0)->numberOfChannels())
    {
        // We're already initialized but the channel count has changed.
        uninitialize();
    }

    if (!isInitialized())
    {
        // This will propagate the channel count to any nodes connected further downstream in the graph.
        output(0)->setNumberOfChannels(r, numberOfChannels);
        initialize();
    }

    AudioNode::checkNumberOfChannelsForInput(r, input);
}
示例#7
0
bool AudioParam::smooth(ContextRenderLock& r)
{
    // If values have been explicitly scheduled on the timeline, then use the exact value.
    // Smoothing effectively is performed by the timeline.
    bool useTimelineValue = false;
    if (r.context())
        m_value = m_timeline.valueForContextTime(r, narrowPrecisionToFloat(m_value), useTimelineValue);
    
    if (m_smoothedValue == m_value) {
        // Smoothed value has already approached and snapped to value.
        return true;
    }
    
    if (useTimelineValue)
        m_smoothedValue = m_value;
    else {
        // Dezipper - exponential approach.
        m_smoothedValue += (m_value - m_smoothedValue) * m_smoothingConstant;

        // If we get close enough then snap to actual value.
        if (fabs(m_smoothedValue - m_value) < SnapThreshold) // FIXME: the threshold needs to be adjustable depending on range - but this is OK general purpose value.
            m_smoothedValue = m_value;
    }

    return false;
}
示例#8
0
void AudioParam::calculateSampleAccurateValues(ContextRenderLock& r, float* values, unsigned numberOfValues)
{
    bool isSafe = r.context() && values && numberOfValues;
    if (!isSafe)
        return;

    calculateFinalValues(r, values, numberOfValues, true);
}
示例#9
0
void AudioParam::calculateFinalValues(ContextRenderLock& r, float* values, unsigned numberOfValues, bool sampleAccurate)
{
    bool isSafe = r.context() && values && numberOfValues;
    if (!isSafe)
        return;

    // The calculated result will be the "intrinsic" value summed with all audio-rate connections.

    if (sampleAccurate) {
        // Calculate sample-accurate (a-rate) intrinsic values.
        calculateTimelineValues(r, values, numberOfValues);
    }
    else {
        // Calculate control-rate (k-rate) intrinsic value.
        bool hasValue;
        float timelineValue = m_timeline.valueForContextTime(r, narrowPrecisionToFloat(m_value), hasValue);

        if (hasValue)
            m_value = timelineValue;

        values[0] = narrowPrecisionToFloat(m_value);
    }
    
    // if there are rendering connections, be sure they are ready
    updateRenderingState(r);
    
    size_t connectionCount = numberOfRenderingConnections(r);
    if (!connectionCount)
        return;

    // Now sum all of the audio-rate connections together (unity-gain summing junction).
    // Note that parameter connections would normally be mono, so mix down to mono if necessary.
    
    // LabSound: For some reason a bus was temporarily created here and the results discarded.
    // Bug still exists in WebKit top of tree.
    //
    if (m_data->m_internalSummingBus && m_data->m_internalSummingBus->length() < numberOfValues)
        m_data->m_internalSummingBus.reset();
    
    if (!m_data->m_internalSummingBus)
        m_data->m_internalSummingBus.reset(new AudioBus(1, numberOfValues));

    // point the summing bus at the values array
    m_data->m_internalSummingBus->setChannelMemory(0, values, numberOfValues);

    for (size_t i = 0; i < connectionCount; ++i) {
        auto output = renderingOutput(r, i);
        if (!output)
            continue;

        // Render audio from this output.
        AudioBus* connectionBus = output->pull(r, 0, AudioNode::ProcessingSizeInFrames);

        // Sum, with unity-gain.
        m_data->m_internalSummingBus->sumFrom(*connectionBus);
    }
}
示例#10
0
void AudioNode::pullInputs(ContextRenderLock& r, size_t framesToProcess)
{
    ASSERT(r.context());
    
    // Process all of the AudioNodes connected to our inputs.
    for (auto & in : m_inputs)
    {
        in->pull(r, 0, framesToProcess);
    }
}
示例#11
0
void AudioNode::checkNumberOfChannelsForInput(ContextRenderLock& r, AudioNodeInput* input)
{
    ASSERT(r.context());
    for (auto & in : m_inputs)
    {
        if (in.get() == input)
        {
            input->updateInternalBus(r);
            break;
        }
    }
}
示例#12
0
float AudioParam::value(ContextRenderLock& r)
{
    // Update value for timeline.
    if (r.context()) {
        bool hasValue;
        float timelineValue = m_timeline.valueForContextTime(r, narrowPrecisionToFloat(m_value), hasValue);
        
        if (hasValue)
            m_value = timelineValue;
    }

    return narrowPrecisionToFloat(m_value);
}
示例#13
0
	// Output to a specific note 
    std::shared_ptr<AudioBufferSourceNode> SoundBuffer::play(ContextRenderLock& r,
                                                             std::shared_ptr<AudioNode> outputNode, float when)
    {
        auto ac = r.context();
        if (audioBuffer && ac) {
            std::shared_ptr<AudioBufferSourceNode> sourceBufferNode(new AudioBufferSourceNode(outputNode->sampleRate()));
            sourceBufferNode->setBuffer(r, audioBuffer);
            
            // bus the sound to the output node
            sourceBufferNode->start(when);
            ac->connect(sourceBufferNode, outputNode);
            ac->holdSourceNodeUntilFinished(sourceBufferNode);
            return sourceBufferNode;
        }
        return nullptr;
    }
示例#14
0
void PannerNode::pullInputs(ContextRenderLock& r, size_t framesToProcess)
{
    // We override pullInputs(), so we can detect new AudioSourceNodes which have connected to us when new connections are made.
    // These AudioSourceNodes need to be made aware of our existence in order to handle doppler shift pitch changes.
    auto ac = r.context();

    if (!ac)
        return;
    
    if (m_connectionCount != ac->connectionCount()) 
	{
		m_connectionCount = ac->connectionCount();
		// Recursively go through all nodes connected to us.
		// notifyAudioSourcesConnectedToNode(r, this); //@tofix dimitri commented out 
    }

    AudioNode::pullInputs(r, framesToProcess);
}
示例#15
0
bool AudioBufferSourceNode::setBuffer(ContextRenderLock& r, std::shared_ptr<AudioBuffer> buffer)
{
    ASSERT(r.context());
    
    if (buffer) {
        // Do any necesssary re-configuration to the buffer's number of channels.
        unsigned numberOfChannels = buffer->numberOfChannels();

        if (numberOfChannels > AudioContext::maxNumberOfChannels)
            return false;

        output(0)->setNumberOfChannels(r, numberOfChannels);

        m_sourceChannels = std::unique_ptr<const float*[]>(new const float*[numberOfChannels]);
        m_destinationChannels = std::unique_ptr<float*[]>(new float*[numberOfChannels]);

        for (unsigned i = 0; i < numberOfChannels; ++i) 
            m_sourceChannels[i] = buffer->getChannelData(i)->data();
    }

    m_virtualReadIndex = 0;
    m_buffer = buffer;
    return true;
}
示例#16
0
void AudioBufferSourceNode::process(ContextRenderLock& r, size_t framesToProcess)
{
    AudioBus* outputBus = output(0)->bus(r);
    if (!buffer() || !isInitialized() || ! r.context()) {
        outputBus->zero();
        return;
    }

    // After calling setBuffer() with a buffer having a different number of channels, there can in rare cases be a slight delay
    // before the output bus is updated to the new number of channels because of use of tryLocks() in the context's updating system.
    // In this case, if the the buffer has just been changed and we're not quite ready yet, then just output silence.
    if (numberOfChannels(r) != buffer()->numberOfChannels()) {
        outputBus->zero();
        return;
    }

    if (m_startRequested) {
        // Do sanity checking of grain parameters versus buffer size.
        double bufferDuration = buffer()->duration();
        
        double grainOffset = std::max(0.0, m_requestGrainOffset);
        m_grainOffset = std::min(bufferDuration, grainOffset);
        m_grainOffset = grainOffset;
        
        // Handle default/unspecified duration.
        double maxDuration = bufferDuration - grainOffset;
        double grainDuration = m_requestGrainDuration;
        if (!grainDuration)
            grainDuration = maxDuration;
        
        grainDuration = std::max(0.0, grainDuration);
        grainDuration = std::min(maxDuration, grainDuration);
        m_grainDuration = grainDuration;
        
        m_isGrain = true;
        m_startTime = m_requestWhen;
        
        // We call timeToSampleFrame here since at playbackRate == 1 we don't want to go through linear interpolation
        // at a sub-sample position since it will degrade the quality.
        // When aligned to the sample-frame the playback will be identical to the PCM data stored in the buffer.
        // Since playbackRate == 1 is very common, it's worth considering quality.
        m_virtualReadIndex = AudioUtilities::timeToSampleFrame(m_grainOffset, buffer()->sampleRate());
        m_startRequested = false;
    }
    
    size_t quantumFrameOffset;
    size_t bufferFramesToProcess;

    updateSchedulingInfo(r, framesToProcess, outputBus, quantumFrameOffset, bufferFramesToProcess);
                         
    if (!bufferFramesToProcess) 
    {
        outputBus->zero();
        return;
    }

    for (unsigned i = 0; i < outputBus->numberOfChannels(); ++i)
    {
        m_destinationChannels[i] = outputBus->channel(i)->mutableData();
    }

    // Render by reading directly from the buffer.
    if (!renderFromBuffer(r, outputBus, quantumFrameOffset, bufferFramesToProcess))
    {
        outputBus->zero();
        return;
    }

    // Apply the gain (in-place) to the output bus.
    float totalGain = gain()->value(r) * m_buffer->gain();
    outputBus->copyWithGainFrom(*outputBus, &m_lastGain, totalGain);
    outputBus->clearSilentFlag();
}
示例#17
0
        // Processes the source to destination bus. The number of channels must match in source and destination.
        virtual void process(ContextRenderLock& r,
                             const lab::AudioBus * sourceBus, lab::AudioBus* destinationBus,
                             size_t framesToProcess) override
        {
            if (!numberOfChannels())
                return;
            
            std::shared_ptr<lab::AudioContext> c = r.contextPtr();

            if (m_noteOnTime >= 0)
            {
                if (m_currentGain > 0)
                {
                    m_zeroSteps = 16;
                    m_zeroStepSize = -m_currentGain / 16.0f;
                }
                else
                    m_zeroSteps = 0;
                
                m_attackTimeTarget = m_noteOnTime + m_attackTime->value(r);
                
                m_attackSteps = m_attackTime->value(r) * sampleRate();
                m_attackStepSize = m_attackLevel->value(r) / m_attackSteps;
                
                m_decayTimeTarget = m_attackTimeTarget + m_decayTime->value(r);
                
                m_decaySteps = m_decayTime->value(r) * sampleRate();
                m_decayStepSize = (m_sustainLevel->value(r) - m_attackLevel->value(r)) / m_decaySteps;
                
                m_releaseSteps = 0;
                
                m_noteOffTime = std::numeric_limits<double>::max();
                m_noteOnTime = -1.;
            }
            
            // We handle both the 1 -> N and N -> N case here.
            const float* source = sourceBus->channelByType(Channel::First)->data();

            // this will only ever happen once, so if heap contention is an issue it should only ever cause one glitch
            // what would be better, alloca? What does webaudio do elsewhere for this sort of thing?
            if (gainValues.size() < framesToProcess)
                gainValues.resize(framesToProcess);

            float s = m_sustainLevel->value(r);

            for (size_t i = 0; i < framesToProcess; ++i)
            {
                if (m_zeroSteps > 0)
                {
                    --m_zeroSteps;
                    m_currentGain += m_zeroStepSize;
                    gainValues[i] = m_currentGain;
                }
                
                else if (m_attackSteps > 0)
                {
                    --m_attackSteps;
                    m_currentGain += m_attackStepSize;
                    gainValues[i] = m_currentGain;
                }
                
                else if (m_decaySteps > 0)
                {
                    --m_decaySteps;
                    m_currentGain += m_decayStepSize;
                    gainValues[i] = m_currentGain;
                }
                
                else if (m_releaseSteps > 0)
                {
                    --m_releaseSteps;
                    m_currentGain += m_releaseStepSize;
                    gainValues[i] = m_currentGain;
                }
                else
                {
                    m_currentGain = (m_noteOffTime == std::numeric_limits<double>::max()) ? s : 0;
                    gainValues[i] = m_currentGain;
                }
            }

            unsigned numChannels = numberOfChannels();
            for (unsigned int channelIndex = 0; channelIndex < numChannels; ++channelIndex)
            {
                if (sourceBus->numberOfChannels() == numChannels)
                    source = sourceBus->channel(channelIndex)->data();
                
                float * destination = destinationBus->channel(channelIndex)->mutableData();
                
                VectorMath::vmul(source, 1, &gainValues[0], 1, destination, 1, framesToProcess);
            }
        }
示例#18
0
bool AudioBufferSourceNode::renderFromBuffer(ContextRenderLock& r, AudioBus* bus, unsigned destinationFrameOffset, size_t numberOfFrames)
{
    if (!r.context())
        return false;

    // Basic sanity checking
    ASSERT(bus);
    ASSERT(buffer());
    if (!bus || !buffer())
        return false;

    unsigned numChannels = numberOfChannels(r);
    unsigned busNumberOfChannels = bus->numberOfChannels();

    bool channelCountGood = numChannels && numChannels == busNumberOfChannels;
    ASSERT(channelCountGood);
    if (!channelCountGood)
        return false;

    // Sanity check destinationFrameOffset, numberOfFrames.
    size_t destinationLength = bus->length();

    bool isLengthGood = destinationLength <= 4096 && numberOfFrames <= 4096;
    ASSERT(isLengthGood);
    if (!isLengthGood)
        return false;

    bool isOffsetGood = destinationFrameOffset <= destinationLength && destinationFrameOffset + numberOfFrames <= destinationLength;
    ASSERT(isOffsetGood);
    if (!isOffsetGood)
        return false;

    // Potentially zero out initial frames leading up to the offset.
    if (destinationFrameOffset) {
        for (unsigned i = 0; i < numChannels; ++i) 
            memset(m_destinationChannels[i], 0, sizeof(float) * destinationFrameOffset);
    }

    // Offset the pointers to the correct offset frame.
    unsigned writeIndex = destinationFrameOffset;

    size_t bufferLength = buffer()->length();
    double bufferSampleRate = buffer()->sampleRate();

    // Avoid converting from time to sample-frames twice by computing
    // the grain end time first before computing the sample frame.
    unsigned endFrame = m_isGrain ? AudioUtilities::timeToSampleFrame(m_grainOffset + m_grainDuration, bufferSampleRate) : bufferLength;
    
    // This is a HACK to allow for HRTF tail-time - avoids glitch at end.
    // FIXME: implement tailTime for each AudioNode for a more general solution to this problem.
    // https://bugs.webkit.org/show_bug.cgi?id=77224
    if (m_isGrain)
        endFrame += 512;

    // Do some sanity checking.
    if (endFrame > bufferLength)
        endFrame = bufferLength;
    if (m_virtualReadIndex >= endFrame)
        m_virtualReadIndex = 0; // reset to start

    // If the .loop attribute is true, then values of m_loopStart == 0 && m_loopEnd == 0 implies
    // that we should use the entire buffer as the loop, otherwise use the loop values in m_loopStart and m_loopEnd.
    double virtualEndFrame = endFrame;
    double virtualDeltaFrames = endFrame;

    if (loop() && (m_loopStart || m_loopEnd) && m_loopStart >= 0 && m_loopEnd > 0 && m_loopStart < m_loopEnd) {
        // Convert from seconds to sample-frames.
        double loopStartFrame = m_loopStart * buffer()->sampleRate();
        double loopEndFrame = m_loopEnd * buffer()->sampleRate();

        virtualEndFrame = std::min(loopEndFrame, virtualEndFrame);
        virtualDeltaFrames = virtualEndFrame - loopStartFrame;
    }


    double pitchRate = totalPitchRate(r);

    // Sanity check that our playback rate isn't larger than the loop size.
    if (fabs(pitchRate) >= virtualDeltaFrames)
        return false;

    // Get local copy.
    double virtualReadIndex = m_virtualReadIndex;

    // Render loop - reading from the source buffer to the destination using linear interpolation.
    int framesToProcess = numberOfFrames;

    const float** sourceChannels = m_sourceChannels.get();
    float** destinationChannels = m_destinationChannels.get();

    // Optimize for the very common case of playing back with pitchRate == 1.
    // We can avoid the linear interpolation.
    if (pitchRate == 1 && virtualReadIndex == floor(virtualReadIndex)
        && virtualDeltaFrames == floor(virtualDeltaFrames)
        && virtualEndFrame == floor(virtualEndFrame)) {
        unsigned readIndex = static_cast<unsigned>(virtualReadIndex);
        unsigned deltaFrames = static_cast<unsigned>(virtualDeltaFrames);
        endFrame = static_cast<unsigned>(virtualEndFrame);
        while (framesToProcess > 0) {
            int framesToEnd = endFrame - readIndex;
            int framesThisTime = std::min(framesToProcess, framesToEnd);
            framesThisTime = std::max(0, framesThisTime);

            for (unsigned i = 0; i < numChannels; ++i) 
                memcpy(destinationChannels[i] + writeIndex, sourceChannels[i] + readIndex, sizeof(float) * framesThisTime);

            writeIndex += framesThisTime;
            readIndex += framesThisTime;
            framesToProcess -= framesThisTime;

            // Wrap-around.
            if (readIndex >= endFrame) {
                readIndex -= deltaFrames;
                if (renderSilenceAndFinishIfNotLooping(r, bus, writeIndex, framesToProcess))
                    break;
            }
        }
        virtualReadIndex = readIndex;
    } else {
        while (framesToProcess--) {
            unsigned readIndex = static_cast<unsigned>(virtualReadIndex);
            double interpolationFactor = virtualReadIndex - readIndex;

            // For linear interpolation we need the next sample-frame too.
            unsigned readIndex2 = readIndex + 1;
            if (readIndex2 >= bufferLength) {
                if (loop()) {
                    // Make sure to wrap around at the end of the buffer.
                    readIndex2 = static_cast<unsigned>(virtualReadIndex + 1 - virtualDeltaFrames);
                } else
                    readIndex2 = readIndex;
            }

            // Final sanity check on buffer access.
            // FIXME: as an optimization, try to get rid of this inner-loop check and put assertions and guards before the loop.
            if (readIndex >= bufferLength || readIndex2 >= bufferLength)
                break;

            // Linear interpolation.
            for (unsigned i = 0; i < numChannels; ++i) {
                float* destination = destinationChannels[i];
                const float* source = sourceChannels[i];

                double sample1 = source[readIndex];
                double sample2 = source[readIndex2];
                double sample = (1.0 - interpolationFactor) * sample1 + interpolationFactor * sample2;

                destination[writeIndex] = narrowPrecisionToFloat(sample);
            }
            writeIndex++;

            virtualReadIndex += pitchRate;

            // Wrap-around, retaining sub-sample position since virtualReadIndex is floating-point.
            if (virtualReadIndex >= virtualEndFrame) {
                virtualReadIndex -= virtualDeltaFrames;
                if (renderSilenceAndFinishIfNotLooping(r, bus, writeIndex, framesToProcess))
                    break;
            }
        }
    }

    bus->clearSilentFlag();

    m_virtualReadIndex = virtualReadIndex;

    return true;
}