/**
 * Converts the data in aSegment to a single chunk aBlock. aSegment must have
 * duration WEBAUDIO_BLOCK_SIZE. aFallbackChannelCount is a superset of the
 * channels in every chunk of aSegment. aBlock must be float format or null.
 */
static void ConvertSegmentToAudioBlock(AudioSegment* aSegment,
                                       AudioChunk* aBlock,
                                       int32_t aFallbackChannelCount)
{
  NS_ASSERTION(aSegment->GetDuration() == WEBAUDIO_BLOCK_SIZE, "Bad segment duration");

  {
    AudioSegment::ChunkIterator ci(*aSegment);
    NS_ASSERTION(!ci.IsEnded(), "Should be at least one chunk!");
    if (ci->GetDuration() == WEBAUDIO_BLOCK_SIZE &&
        (ci->IsNull() || ci->mBufferFormat == AUDIO_FORMAT_FLOAT32)) {
      // Return this chunk directly to avoid copying data.
      *aBlock = *ci;
      return;
    }
  }

  AllocateAudioBlock(aFallbackChannelCount, aBlock);

  uint32_t duration = 0;
  for (AudioSegment::ChunkIterator ci(*aSegment); !ci.IsEnded(); ci.Next()) {
    CopyChunkToBlock(*ci, aBlock, duration);
    duration += ci->GetDuration();
  }
}
Beispiel #2
0
void Reverb::initialize(const nsTArray<const float*>& impulseResponseBuffer,
                        size_t impulseResponseBufferLength, size_t renderSliceSize,
                        size_t maxFFTSize, size_t numberOfChannels, bool useBackgroundThreads)
{
    m_impulseResponseLength = impulseResponseBufferLength;

    // The reverb can handle a mono impulse response and still do stereo processing
    size_t numResponseChannels = impulseResponseBuffer.Length();
    m_convolvers.SetCapacity(numberOfChannels);

    int convolverRenderPhase = 0;
    for (size_t i = 0; i < numResponseChannels; ++i) {
        const float* channel = impulseResponseBuffer[i];
        size_t length = impulseResponseBufferLength;

        nsAutoPtr<ReverbConvolver> convolver(new ReverbConvolver(channel, length, renderSliceSize, maxFFTSize, convolverRenderPhase, useBackgroundThreads));
        m_convolvers.AppendElement(convolver.forget());

        convolverRenderPhase += renderSliceSize;
    }

    // For "True" stereo processing we allocate a temporary buffer to avoid repeatedly allocating it in the process() method.
    // It can be bad to allocate memory in a real-time thread.
    if (numResponseChannels == 4) {
        AllocateAudioBlock(2, &m_tempBuffer);
        WriteZeroesToAudioBlock(&m_tempBuffer, 0, WEBAUDIO_BLOCK_SIZE);
    }
}
 /**
  * Copy as many frames as possible from the source buffer to aOutput, and
  * advance aOffsetWithinBlock and aCurrentPosition based on how many frames
  * we write.  This will never advance aOffsetWithinBlock past
  * WEBAUDIO_BLOCK_SIZE, or aCurrentPosition past mStop.  It takes data from
  * the buffer at aBufferOffset, and never takes more data than aBufferMax.
  * This function knows when it needs to allocate the output buffer, and also
  * optimizes the case where it can avoid memory allocations.
  */
 void CopyFromBuffer(AudioNodeStream* aStream,
                     AudioChunk* aOutput,
                     uint32_t aChannels,
                     uint32_t* aOffsetWithinBlock,
                     TrackTicks* aCurrentPosition,
                     int32_t aBufferMax)
 {
   MOZ_ASSERT(*aCurrentPosition < mStop);
   uint32_t numFrames =
     std::min(std::min<TrackTicks>(WEBAUDIO_BLOCK_SIZE - *aOffsetWithinBlock,
                                   aBufferMax - mBufferPosition),
              mStop - *aCurrentPosition);
   if (numFrames == WEBAUDIO_BLOCK_SIZE && !mResampler) {
     MOZ_ASSERT(mBufferPosition < aBufferMax);
     BorrowFromInputBuffer(aOutput, aChannels);
     *aOffsetWithinBlock += numFrames;
     *aCurrentPosition += numFrames;
     mBufferPosition += numFrames;
   } else {
     if (*aOffsetWithinBlock == 0) {
       AllocateAudioBlock(aChannels, aOutput);
     }
     if (!mResampler) {
       MOZ_ASSERT(mBufferPosition < aBufferMax);
       CopyFromInputBuffer(aOutput, aChannels, *aOffsetWithinBlock, numFrames);
       *aOffsetWithinBlock += numFrames;
       *aCurrentPosition += numFrames;
       mBufferPosition += numFrames;
     } else {
       CopyFromInputBufferWithResampling(aStream, aOutput, aChannels, aOffsetWithinBlock, aCurrentPosition, aBufferMax);
     }
   }
 }
/**
 * Converts the data in aSegment to a single chunk aChunk. Every chunk in
 * aSegment must have the same number of channels (or be null). aSegment must have
 * duration WEBAUDIO_BLOCK_SIZE. Every chunk in aSegment must be in float format.
 */
static void
ConvertSegmentToAudioBlock(AudioSegment* aSegment, AudioChunk* aBlock)
{
  NS_ASSERTION(aSegment->GetDuration() == WEBAUDIO_BLOCK_SIZE, "Bad segment duration");

  {
    AudioSegment::ChunkIterator ci(*aSegment);
    NS_ASSERTION(!ci.IsEnded(), "Segment must have at least one chunk");
    AudioChunk& firstChunk = *ci;
    ci.Next();
    if (ci.IsEnded()) {
      *aBlock = firstChunk;
      return;
    }

    while (ci->IsNull() && !ci.IsEnded()) {
      ci.Next();
    }
    if (ci.IsEnded()) {
      // All null.
      aBlock->SetNull(WEBAUDIO_BLOCK_SIZE);
      return;
    }

    AllocateAudioBlock(ci->mChannelData.Length(), aBlock);
  }

  AudioSegment::ChunkIterator ci(*aSegment);
  uint32_t duration = 0;
  while (!ci.IsEnded()) {
    CopyChunkToBlock(*ci, aBlock, duration);
    duration += ci->GetDuration();
    ci.Next();
  }
}
Beispiel #5
0
void
AudioNodeStream::ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex)
{
  uint32_t inputCount = mInputs.Length();
  uint32_t outputChannelCount = 1;
  nsAutoTArray<AudioChunk*,250> inputChunks;
  for (uint32_t i = 0; i < inputCount; ++i) {
    if (aPortIndex != mInputs[i]->InputNumber()) {
      // This input is connected to a different port
      continue;
    }
    MediaStream* s = mInputs[i]->GetSource();
    AudioNodeStream* a = static_cast<AudioNodeStream*>(s);
    MOZ_ASSERT(a == s->AsAudioNodeStream());
    if (a->IsAudioParamStream()) {
      continue;
    }

    AudioChunk* chunk = &a->mLastChunks[mInputs[i]->OutputNumber()];
    MOZ_ASSERT(chunk);
    if (chunk->IsNull() || chunk->mChannelData.IsEmpty()) {
      continue;
    }

    inputChunks.AppendElement(chunk);
    outputChannelCount =
      GetAudioChannelsSuperset(outputChannelCount, chunk->mChannelData.Length());
  }

  outputChannelCount = ComputedNumberOfChannels(outputChannelCount);

  uint32_t inputChunkCount = inputChunks.Length();
  if (inputChunkCount == 0 ||
      (inputChunkCount == 1 && inputChunks[0]->mChannelData.Length() == 0)) {
    aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
    return;
  }

  if (inputChunkCount == 1 &&
      inputChunks[0]->mChannelData.Length() == outputChannelCount) {
    aTmpChunk = *inputChunks[0];
    return;
  }

  if (outputChannelCount == 0) {
    aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
    return;
  }

  AllocateAudioBlock(outputChannelCount, &aTmpChunk);
  // The static storage here should be 1KB, so it's fine
  nsAutoTArray<float, GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;

  for (uint32_t i = 0; i < inputChunkCount; ++i) {
    AccumulateInputChunk(i, *inputChunks[i], &aTmpChunk, &downmixBuffer);
  }
}
Beispiel #6
0
void
DelayBuffer::Read(const double aPerFrameDelays[WEBAUDIO_BLOCK_SIZE],
                  AudioChunk* aOutputChunk,
                  ChannelInterpretation aChannelInterpretation)
{
  int chunkCount = mChunks.Length();
  if (!chunkCount) {
    aOutputChunk->SetNull(WEBAUDIO_BLOCK_SIZE);
    return;
  }

  // Find the maximum number of contributing channels to determine the output
  // channel count that retains all signal information.  Buffered blocks will
  // be upmixed if necessary.
  //
  // First find the range of "delay" offsets backwards from the current
  // position.  Note that these may be negative for frames that are after the
  // current position (including i).
  double minDelay = aPerFrameDelays[0];
  double maxDelay = minDelay;
  for (unsigned i = 1; i < WEBAUDIO_BLOCK_SIZE; ++i) {
    minDelay = std::min(minDelay, aPerFrameDelays[i] - i);
    maxDelay = std::max(maxDelay, aPerFrameDelays[i] - i);
  }

  // Now find the chunks touched by this range and check their channel counts.
  int oldestChunk = ChunkForDelay(int(maxDelay) + 1);
  int youngestChunk = ChunkForDelay(minDelay);

  uint32_t channelCount = 0;
  for (int i = oldestChunk; true; i = (i + 1) % chunkCount) {
    channelCount = GetAudioChannelsSuperset(channelCount,
                                            mChunks[i].ChannelCount());
    if (i == youngestChunk) {
      break;
    }
  }

  if (channelCount) {
    AllocateAudioBlock(channelCount, aOutputChunk);
    ReadChannels(aPerFrameDelays, aOutputChunk,
                 0, channelCount, aChannelInterpretation);
  } else {
    aOutputChunk->SetNull(WEBAUDIO_BLOCK_SIZE);
  }

  // Remember currentDelayFrames for the next ProcessBlock call
  mCurrentDelay = aPerFrameDelays[WEBAUDIO_BLOCK_SIZE - 1];
}
 /**
  * Fill aOutput with as many zero frames as we can, and advance
  * aOffsetWithinBlock and aCurrentPosition based on how many frames we write.
  * This will never advance aOffsetWithinBlock past WEBAUDIO_BLOCK_SIZE or
  * aCurrentPosition past aMaxPos.  This function knows when it needs to
  * allocate the output buffer, and also optimizes the case where it can avoid
  * memory allocations.
  */
 void FillWithZeroes(AudioChunk* aOutput,
                     uint32_t aChannels,
                     uint32_t* aOffsetWithinBlock,
                     TrackTicks* aCurrentPosition,
                     TrackTicks aMaxPos)
 {
   uint32_t numFrames = std::min(WEBAUDIO_BLOCK_SIZE - *aOffsetWithinBlock,
                                 uint32_t(aMaxPos - *aCurrentPosition));
   if (numFrames == WEBAUDIO_BLOCK_SIZE) {
     aOutput->SetNull(numFrames);
   } else {
     if (aOutput->IsNull()) {
       AllocateAudioBlock(aChannels, aOutput);
     }
     WriteZeroesToAudioBlock(aOutput, *aOffsetWithinBlock, numFrames);
   }
   *aOffsetWithinBlock += numFrames;
   *aCurrentPosition += numFrames;
 }
  virtual void ProduceAudioBlock(AudioNodeStream* aStream,
                                 const AudioChunk& aInput,
                                 AudioChunk* aOutput,
                                 bool* aFinished)
  {
    uint32_t channelCount = aInput.mChannelData.Length();
    if (!mCurve.Length() || !channelCount) {
      // Optimize the case where we don't have a curve buffer,
      // or the input is null.
      *aOutput = aInput;
      return;
    }

    AllocateAudioBlock(channelCount, aOutput);
    for (uint32_t i = 0; i < channelCount; ++i) {
      const float* inputBuffer = static_cast<const float*>(aInput.mChannelData[i]);
      float* outputBuffer = const_cast<float*> (static_cast<const float*>(aOutput->mChannelData[i]));
      float* sampleBuffer;

      switch (mType) {
      case OverSampleType::None:
        mResampler.Reset(channelCount, aStream->SampleRate(), OverSampleType::None);
        ProcessCurve<1>(inputBuffer, outputBuffer);
        break;
      case OverSampleType::_2x:
        mResampler.Reset(channelCount, aStream->SampleRate(), OverSampleType::_2x);
        sampleBuffer = mResampler.UpSample(i, inputBuffer, 2);
        ProcessCurve<2>(sampleBuffer, sampleBuffer);
        mResampler.DownSample(i, outputBuffer, 2);
        break;
      case OverSampleType::_4x:
        mResampler.Reset(channelCount, aStream->SampleRate(), OverSampleType::_4x);
        sampleBuffer = mResampler.UpSample(i, inputBuffer, 4);
        ProcessCurve<4>(sampleBuffer, sampleBuffer);
        mResampler.DownSample(i, outputBuffer, 4);
        break;
      default:
        NS_NOTREACHED("We should never reach here");
      }
    }
  }
  /**
   * Copy as many frames as possible from the source buffer to aOutput, and
   * advance aOffsetWithinBlock and aCurrentPosition based on how many frames
   * we copy.  This will never advance aOffsetWithinBlock past
   * WEBAUDIO_BLOCK_SIZE, or aCurrentPosition past mStop.  It takes data from
   * the buffer at aBufferOffset, and never takes more data than aBufferMax.
   * This function knows when it needs to allocate the output buffer, and also
   * optimizes the case where it can avoid memory allocations.
   */
  void CopyFromBuffer(AudioNodeStream* aStream,
                      AudioChunk* aOutput,
                      uint32_t aChannels,
                      uint32_t* aOffsetWithinBlock,
                      TrackTicks* aCurrentPosition,
                      uint32_t aBufferOffset,
                      uint32_t aBufferMax)
  {
    MOZ_ASSERT(*aCurrentPosition < mStop);
    uint32_t numFrames =
      std::min<TrackTicks>(std::min(WEBAUDIO_BLOCK_SIZE - *aOffsetWithinBlock,
                                    aBufferMax - aBufferOffset),
                           mStop - *aCurrentPosition);
    if (numFrames == WEBAUDIO_BLOCK_SIZE && !ShouldResample(aStream->SampleRate())) {
      BorrowFromInputBuffer(aOutput, aChannels, aBufferOffset);
      *aOffsetWithinBlock += numFrames;
      *aCurrentPosition += numFrames;
      mPosition += numFrames;
    } else {
      if (aOutput->IsNull()) {
        MOZ_ASSERT(*aOffsetWithinBlock == 0);
        AllocateAudioBlock(aChannels, aOutput);
      }
      if (!ShouldResample(aStream->SampleRate())) {
        CopyFromInputBuffer(aOutput, aChannels, aBufferOffset, *aOffsetWithinBlock, numFrames);
        *aOffsetWithinBlock += numFrames;
        *aCurrentPosition += numFrames;
        mPosition += numFrames;
      } else {
        uint32_t framesRead, framesWritten, availableInInputBuffer;

        availableInInputBuffer = aBufferMax - aBufferOffset;

        CopyFromInputBufferWithResampling(aStream, aOutput, aChannels, aBufferOffset, *aOffsetWithinBlock, availableInInputBuffer, framesRead, framesWritten);
        *aOffsetWithinBlock += framesWritten;
        *aCurrentPosition += framesRead;
        mPosition += framesRead;
      }
    }
  }
void
AudioNodeStream::ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex)
{
  uint32_t inputCount = mInputs.Length();
  uint32_t outputChannelCount = 1;
  nsAutoTArray<AudioChunk*,250> inputChunks;
  for (uint32_t i = 0; i < inputCount; ++i) {
    if (aPortIndex != mInputs[i]->InputNumber()) {
      // This input is connected to a different port
      continue;
    }
    MediaStream* s = mInputs[i]->GetSource();
    AudioNodeStream* a = static_cast<AudioNodeStream*>(s);
    MOZ_ASSERT(a == s->AsAudioNodeStream());
    if (a->IsAudioParamStream()) {
      continue;
    }

    // It is possible for mLastChunks to be empty here, because `a` might be a
    // AudioNodeStream that has not been scheduled yet, because it is further
    // down the graph _but_ as a connection to this node. Because we enforce the
    // presence of at least one DelayNode, with at least one block of delay, and
    // because the output of a DelayNode when it has been fed less that
    // `delayTime` amount of audio is silence, we can simply continue here,
    // because this input would not influence the output of this node. Next
    // iteration, a->mLastChunks.IsEmpty() will be false, and everthing will
    // work as usual.
    if (a->mLastChunks.IsEmpty()) {
      continue;
    }

    AudioChunk* chunk = &a->mLastChunks[mInputs[i]->OutputNumber()];
    MOZ_ASSERT(chunk);
    if (chunk->IsNull() || chunk->mChannelData.IsEmpty()) {
      continue;
    }

    inputChunks.AppendElement(chunk);
    outputChannelCount =
      GetAudioChannelsSuperset(outputChannelCount, chunk->mChannelData.Length());
  }

  outputChannelCount = ComputedNumberOfChannels(outputChannelCount);

  uint32_t inputChunkCount = inputChunks.Length();
  if (inputChunkCount == 0 ||
      (inputChunkCount == 1 && inputChunks[0]->mChannelData.Length() == 0)) {
    aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
    return;
  }

  if (inputChunkCount == 1 &&
      inputChunks[0]->mChannelData.Length() == outputChannelCount) {
    aTmpChunk = *inputChunks[0];
    return;
  }

  if (outputChannelCount == 0) {
    aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
    return;
  }

  AllocateAudioBlock(outputChannelCount, &aTmpChunk);
  // The static storage here should be 1KB, so it's fine
  nsAutoTArray<float, GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;

  for (uint32_t i = 0; i < inputChunkCount; ++i) {
    AccumulateInputChunk(i, *inputChunks[i], &aTmpChunk, &downmixBuffer);
  }
}
AudioChunk*
AudioNodeStream::ObtainInputBlock(AudioChunk* aTmpChunk)
{
  uint32_t inputCount = mInputs.Length();
  uint32_t outputChannelCount = 0;
  nsAutoTArray<AudioChunk*,250> inputChunks;
  for (uint32_t i = 0; i < inputCount; ++i) {
    MediaStream* s = mInputs[i]->GetSource();
    AudioNodeStream* a = static_cast<AudioNodeStream*>(s);
    MOZ_ASSERT(a == s->AsAudioNodeStream());
    if (a->IsFinishedOnGraphThread()) {
      continue;
    }
    AudioChunk* chunk = &a->mLastChunk;
    // XXX when we implement DelayNode, this will no longer be true and we'll
    // need to treat a null chunk (when the DelayNode hasn't had a chance
    // to produce data yet) as silence here.
    MOZ_ASSERT(chunk);
    if (chunk->IsNull()) {
      continue;
    }

    inputChunks.AppendElement(chunk);
    outputChannelCount =
      GetAudioChannelsSuperset(outputChannelCount, chunk->mChannelData.Length());
  }

  uint32_t inputChunkCount = inputChunks.Length();
  if (inputChunkCount == 0) {
    aTmpChunk->SetNull(WEBAUDIO_BLOCK_SIZE);
    return aTmpChunk;
  }

  if (inputChunkCount == 1) {
    return inputChunks[0];
  }

  AllocateAudioBlock(outputChannelCount, aTmpChunk);

  for (uint32_t i = 0; i < inputChunkCount; ++i) {
    AudioChunk* chunk = inputChunks[i];
    nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channels;
    channels.AppendElements(chunk->mChannelData);
    if (channels.Length() < outputChannelCount) {
      AudioChannelsUpMix(&channels, outputChannelCount, nullptr);
      NS_ASSERTION(outputChannelCount == channels.Length(),
                   "We called GetAudioChannelsSuperset to avoid this");
    }

    for (uint32_t c = 0; c < channels.Length(); ++c) {
      const float* inputData = static_cast<const float*>(channels[c]);
      float* outputData = static_cast<float*>(const_cast<void*>(aTmpChunk->mChannelData[c]));
      if (inputData) {
        if (i == 0) {
          AudioBlockCopyChannelWithScale(inputData, chunk->mVolume, outputData);
        } else {
          AudioBlockAddChannelWithScale(inputData, chunk->mVolume, outputData);
        }
      } else {
        if (i == 0) {
          memset(outputData, 0, WEBAUDIO_BLOCK_SIZE*sizeof(float));
        }
      }
    }
  }

  return aTmpChunk;
}
void
AudioNodeExternalInputStream::ProcessInput(GraphTime aFrom, GraphTime aTo,
                                           uint32_t aFlags)
{
  // According to spec, number of outputs is always 1.
  MOZ_ASSERT(mLastChunks.Length() == 1);

  // GC stuff can result in our input stream being destroyed before this stream.
  // Handle that.
  if (!IsEnabled() || mInputs.IsEmpty() || mPassThrough) {
    mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
    AdvanceOutputSegment();
    return;
  }

  MOZ_ASSERT(mInputs.Length() == 1);

  MediaStream* source = mInputs[0]->GetSource();
  nsAutoTArray<AudioSegment,1> audioSegments;
  uint32_t inputChannels = 0;
  for (StreamBuffer::TrackIter tracks(source->mBuffer, MediaSegment::AUDIO);
       !tracks.IsEnded(); tracks.Next()) {
    const StreamBuffer::Track& inputTrack = *tracks;
    const AudioSegment& inputSegment =
        *static_cast<AudioSegment*>(inputTrack.GetSegment());
    if (inputSegment.IsNull()) {
      continue;
    }

    AudioSegment& segment = *audioSegments.AppendElement();
    GraphTime next;
    for (GraphTime t = aFrom; t < aTo; t = next) {
      MediaInputPort::InputInterval interval = mInputs[0]->GetNextInputInterval(t);
      interval.mEnd = std::min(interval.mEnd, aTo);
      if (interval.mStart >= interval.mEnd)
        break;
      next = interval.mEnd;

      StreamTime outputStart = GraphTimeToStreamTime(interval.mStart);
      StreamTime outputEnd = GraphTimeToStreamTime(interval.mEnd);
      StreamTime ticks = outputEnd - outputStart;

      if (interval.mInputIsBlocked) {
        segment.AppendNullData(ticks);
      } else {
        StreamTime inputStart =
          std::min(inputSegment.GetDuration(),
                   source->GraphTimeToStreamTime(interval.mStart));
        StreamTime inputEnd =
          std::min(inputSegment.GetDuration(),
                   source->GraphTimeToStreamTime(interval.mEnd));

        segment.AppendSlice(inputSegment, inputStart, inputEnd);
        // Pad if we're looking past the end of the track
        segment.AppendNullData(ticks - (inputEnd - inputStart));
      }
    }

    for (AudioSegment::ChunkIterator iter(segment); !iter.IsEnded(); iter.Next()) {
      inputChannels = GetAudioChannelsSuperset(inputChannels, iter->ChannelCount());
    }
  }

  uint32_t accumulateIndex = 0;
  if (inputChannels) {
    nsAutoTArray<float,GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;
    for (uint32_t i = 0; i < audioSegments.Length(); ++i) {
      AudioChunk tmpChunk;
      ConvertSegmentToAudioBlock(&audioSegments[i], &tmpChunk, inputChannels);
      if (!tmpChunk.IsNull()) {
        if (accumulateIndex == 0) {
          AllocateAudioBlock(inputChannels, &mLastChunks[0]);
        }
        AccumulateInputChunk(accumulateIndex, tmpChunk, &mLastChunks[0], &downmixBuffer);
        accumulateIndex++;
      }
    }
  }
  if (accumulateIndex == 0) {
    mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
  }

  // Using AudioNodeStream's AdvanceOutputSegment to push the media stream graph along with null data.
  AdvanceOutputSegment();
}
void
AudioNodeStream::ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex)
{
  uint32_t inputCount = mInputs.Length();
  uint32_t outputChannelCount = 1;
  nsAutoTArray<AudioChunk*,250> inputChunks;
  for (uint32_t i = 0; i < inputCount; ++i) {
    if (aPortIndex != mInputs[i]->InputNumber()) {
      // This input is connected to a different port
      continue;
    }
    MediaStream* s = mInputs[i]->GetSource();
    AudioNodeStream* a = static_cast<AudioNodeStream*>(s);
    MOZ_ASSERT(a == s->AsAudioNodeStream());
    if (a->IsFinishedOnGraphThread() ||
        a->IsAudioParamStream()) {
      continue;
    }
    AudioChunk* chunk = &a->mLastChunks[mInputs[i]->OutputNumber()];
    MOZ_ASSERT(chunk);
    if (chunk->IsNull()) {
      continue;
    }

    inputChunks.AppendElement(chunk);
    outputChannelCount =
      GetAudioChannelsSuperset(outputChannelCount, chunk->mChannelData.Length());
  }

  switch (mChannelCountMode) {
  case ChannelCountMode::Explicit:
    // Disregard the output channel count that we've calculated, and just use
    // mNumberOfInputChannels.
    outputChannelCount = mNumberOfInputChannels;
    break;
  case ChannelCountMode::Clamped_max:
    // Clamp the computed output channel count to mNumberOfInputChannels.
    outputChannelCount = std::min(outputChannelCount, mNumberOfInputChannels);
    break;
  case ChannelCountMode::Max:
    // Nothing to do here, just shut up the compiler warning.
    break;
  }

  uint32_t inputChunkCount = inputChunks.Length();
  if (inputChunkCount == 0 ||
      (inputChunkCount == 1 && inputChunks[0]->mChannelData.Length() == 0)) {
    aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
    return;
  }

  if (inputChunkCount == 1 &&
      inputChunks[0]->mChannelData.Length() == outputChannelCount) {
    aTmpChunk = *inputChunks[0];
    return;
  }

  AllocateAudioBlock(outputChannelCount, &aTmpChunk);
  float silenceChannel[WEBAUDIO_BLOCK_SIZE] = {0.f};
  // The static storage here should be 1KB, so it's fine
  nsAutoTArray<float, GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;

  for (uint32_t i = 0; i < inputChunkCount; ++i) {
    AudioChunk* chunk = inputChunks[i];
    nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channels;
    channels.AppendElements(chunk->mChannelData);
    if (channels.Length() < outputChannelCount) {
      if (mChannelInterpretation == ChannelInterpretation::Speakers) {
        AudioChannelsUpMix(&channels, outputChannelCount, nullptr);
        NS_ASSERTION(outputChannelCount == channels.Length(),
                     "We called GetAudioChannelsSuperset to avoid this");
      } else {
        // Fill up the remaining channels by zeros
        for (uint32_t j = channels.Length(); j < outputChannelCount; ++j) {
          channels.AppendElement(silenceChannel);
        }
      }
    } else if (channels.Length() > outputChannelCount) {
      if (mChannelInterpretation == ChannelInterpretation::Speakers) {
        nsAutoTArray<float*,GUESS_AUDIO_CHANNELS> outputChannels;
        outputChannels.SetLength(outputChannelCount);
        downmixBuffer.SetLength(outputChannelCount * WEBAUDIO_BLOCK_SIZE);
        for (uint32_t j = 0; j < outputChannelCount; ++j) {
          outputChannels[j] = &downmixBuffer[j * WEBAUDIO_BLOCK_SIZE];
        }

        AudioChannelsDownMix(channels, outputChannels.Elements(),
                             outputChannelCount, WEBAUDIO_BLOCK_SIZE);

        channels.SetLength(outputChannelCount);
        for (uint32_t j = 0; j < channels.Length(); ++j) {
          channels[j] = outputChannels[j];
        }
      } else {
        // Drop the remaining channels
        channels.RemoveElementsAt(outputChannelCount,
                                  channels.Length() - outputChannelCount);
      }
    }

    for (uint32_t c = 0; c < channels.Length(); ++c) {
      const float* inputData = static_cast<const float*>(channels[c]);
      float* outputData = static_cast<float*>(const_cast<void*>(aTmpChunk.mChannelData[c]));
      if (inputData) {
        if (i == 0) {
          AudioBlockCopyChannelWithScale(inputData, chunk->mVolume, outputData);
        } else {
          AudioBlockAddChannelWithScale(inputData, chunk->mVolume, outputData);
        }
      } else {
        if (i == 0) {
          memset(outputData, 0, WEBAUDIO_BLOCK_SIZE*sizeof(float));
        }
      }
    }
  }
}
void
AudioNodeExternalInputStream::ProcessInput(GraphTime aFrom, GraphTime aTo,
                                           uint32_t aFlags)
{
  // According to spec, number of outputs is always 1.
  mLastChunks.SetLength(1);

  // GC stuff can result in our input stream being destroyed before this stream.
  // Handle that.
  if (mInputs.IsEmpty()) {
    mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
    AdvanceOutputSegment();
    return;
  }

  MOZ_ASSERT(mInputs.Length() == 1);

  MediaStream* source = mInputs[0]->GetSource();
  nsAutoTArray<AudioSegment,1> audioSegments;
  nsAutoTArray<bool,1> trackMapEntriesUsed;
  uint32_t inputChannels = 0;
  for (StreamBuffer::TrackIter tracks(source->mBuffer, MediaSegment::AUDIO);
       !tracks.IsEnded(); tracks.Next()) {
    const StreamBuffer::Track& inputTrack = *tracks;
    // Create a TrackMapEntry if necessary.
    size_t trackMapIndex = GetTrackMapEntry(inputTrack, aFrom);
    // Maybe there's nothing in this track yet. If so, ignore it. (While the
    // track is only playing silence, we may not be able to determine the
    // correct number of channels to start resampling.)
    if (trackMapIndex == nsTArray<TrackMapEntry>::NoIndex) {
      continue;
    }

    while (trackMapEntriesUsed.Length() <= trackMapIndex) {
      trackMapEntriesUsed.AppendElement(false);
    }
    trackMapEntriesUsed[trackMapIndex] = true;

    TrackMapEntry* trackMap = &mTrackMap[trackMapIndex];
    AudioSegment segment;
    GraphTime next;
    TrackRate inputTrackRate = inputTrack.GetRate();
    for (GraphTime t = aFrom; t < aTo; t = next) {
      MediaInputPort::InputInterval interval = mInputs[0]->GetNextInputInterval(t);
      interval.mEnd = std::min(interval.mEnd, aTo);
      if (interval.mStart >= interval.mEnd)
        break;
      next = interval.mEnd;

      // Ticks >= startTicks and < endTicks are in the interval
      StreamTime outputEnd = GraphTimeToStreamTime(interval.mEnd);
      TrackTicks startTicks = trackMap->mSamplesPassedToResampler + segment.GetDuration();
      StreamTime outputStart = GraphTimeToStreamTime(interval.mStart);
      NS_ASSERTION(startTicks == TimeToTicksRoundUp(inputTrackRate, outputStart),
                   "Samples missing");
      TrackTicks endTicks = TimeToTicksRoundUp(inputTrackRate, outputEnd);
      TrackTicks ticks = endTicks - startTicks;

      if (interval.mInputIsBlocked) {
        segment.AppendNullData(ticks);
      } else {
        // See comments in TrackUnionStream::CopyTrackData
        StreamTime inputStart = source->GraphTimeToStreamTime(interval.mStart);
        StreamTime inputEnd = source->GraphTimeToStreamTime(interval.mEnd);
        TrackTicks inputTrackEndPoint =
            inputTrack.IsEnded() ? inputTrack.GetEnd() : TRACK_TICKS_MAX;

        if (trackMap->mEndOfLastInputIntervalInInputStream != inputStart ||
            trackMap->mEndOfLastInputIntervalInOutputStream != outputStart) {
          // Start of a new series of intervals where neither stream is blocked.
          trackMap->mEndOfConsumedInputTicks = TimeToTicksRoundDown(inputTrackRate, inputStart) - 1;
        }
        TrackTicks inputStartTicks = trackMap->mEndOfConsumedInputTicks;
        TrackTicks inputEndTicks = inputStartTicks + ticks;
        trackMap->mEndOfConsumedInputTicks = inputEndTicks;
        trackMap->mEndOfLastInputIntervalInInputStream = inputEnd;
        trackMap->mEndOfLastInputIntervalInOutputStream = outputEnd;

        if (inputStartTicks < 0) {
          // Data before the start of the track is just null.
          segment.AppendNullData(-inputStartTicks);
          inputStartTicks = 0;
        }
        if (inputEndTicks > inputStartTicks) {
          segment.AppendSlice(*inputTrack.GetSegment(),
                              std::min(inputTrackEndPoint, inputStartTicks),
                              std::min(inputTrackEndPoint, inputEndTicks));
        }
        // Pad if we're looking past the end of the track
        segment.AppendNullData(ticks - segment.GetDuration());
      }
    }

    trackMap->mSamplesPassedToResampler += segment.GetDuration();
    trackMap->ResampleInputData(&segment);

    if (trackMap->mResampledData.GetDuration() < mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE) {
      // We don't have enough data. Delay it.
      trackMap->mResampledData.InsertNullDataAtStart(
        mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE - trackMap->mResampledData.GetDuration());
    }
    audioSegments.AppendElement()->AppendSlice(trackMap->mResampledData,
      mCurrentOutputPosition, mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE);
    trackMap->mResampledData.ForgetUpTo(mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE);
    inputChannels = GetAudioChannelsSuperset(inputChannels, trackMap->mResamplerChannelCount);
  }

  for (int32_t i = mTrackMap.Length() - 1; i >= 0; --i) {
    if (i >= int32_t(trackMapEntriesUsed.Length()) || !trackMapEntriesUsed[i]) {
      mTrackMap.RemoveElementAt(i);
    }
  }

  uint32_t accumulateIndex = 0;
  if (inputChannels) {
    nsAutoTArray<float,GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;
    for (uint32_t i = 0; i < audioSegments.Length(); ++i) {
      AudioChunk tmpChunk;
      ConvertSegmentToAudioBlock(&audioSegments[i], &tmpChunk);
      if (!tmpChunk.IsNull()) {
        if (accumulateIndex == 0) {
          AllocateAudioBlock(inputChannels, &mLastChunks[0]);
        }
        AccumulateInputChunk(accumulateIndex, tmpChunk, &mLastChunks[0], &downmixBuffer);
        accumulateIndex++;
      }
    }
  }
  if (accumulateIndex == 0) {
    mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
  }
  mCurrentOutputPosition += WEBAUDIO_BLOCK_SIZE;

  // Using AudioNodeStream's AdvanceOutputSegment to push the media stream graph along with null data.
  AdvanceOutputSegment();
}