예제 #1
0
DelayNode::DelayNode(AudioContext* aContext, double aMaxDelay)
  : AudioNode(aContext)
  , mDelay(new AudioParam(this, SendDelayToStream, 0.0f, 0.0f, float(aMaxDelay)))
{
  DelayNodeEngine* engine = new DelayNodeEngine(aContext->Destination());
  mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM);
  engine->SetSourceStream(static_cast<AudioNodeStream*> (mStream.get()));
  AudioNodeStream* ns = static_cast<AudioNodeStream*>(mStream.get());
  ns->SetDoubleParameter(DelayNodeEngine::MAX_DELAY, aMaxDelay);
}
예제 #2
0
DelayNode::DelayNode(AudioContext* aContext, double aMaxDelay)
  : AudioNode(aContext,
              2,
              ChannelCountMode::Max,
              ChannelInterpretation::Speakers)
  , mMediaStreamGraphUpdateIndexAtLastInputConnection(0)
  , mDelay(new AudioParam(this, SendDelayToStream, 0.0f))
{
  DelayNodeEngine* engine = new DelayNodeEngine(this, aContext->Destination());
  mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM);
  engine->SetSourceStream(static_cast<AudioNodeStream*> (mStream.get()));
  AudioNodeStream* ns = static_cast<AudioNodeStream*>(mStream.get());
  ns->SetDoubleParameter(DelayNodeEngine::MAX_DELAY, aMaxDelay);
}
void
AudioBufferSourceNode::Start(double aWhen, double aOffset,
                             const Optional<double>& aDuration, ErrorResult& aRv)
{
    if (!WebAudioUtils::IsTimeValid(aWhen) ||
            (aDuration.WasPassed() && !WebAudioUtils::IsTimeValid(aDuration.Value()))) {
        aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
        return;
    }

    if (mStartCalled) {
        aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
        return;
    }
    mStartCalled = true;

    AudioNodeStream* ns = mStream;
    if (!ns) {
        // Nothing to play, or we're already dead for some reason
        return;
    }

    // Remember our arguments so that we can use them when we get a new buffer.
    mOffset = aOffset;
    mDuration = aDuration.WasPassed() ? aDuration.Value()
                : std::numeric_limits<double>::min();

    WEB_AUDIO_API_LOG("%f: %s %u Start(%f, %g, %g)", Context()->CurrentTime(),
                      NodeType(), Id(), aWhen, aOffset, mDuration);

    // We can't send these parameters without a buffer because we don't know the
    // buffer's sample rate or length.
    if (mBuffer) {
        SendOffsetAndDurationParametersToStream(ns);
    }

    // Don't set parameter unnecessarily
    if (aWhen > 0.0) {
        ns->SetDoubleParameter(START, aWhen);
    }
}