static void
CopyChunkToBlock(AudioChunk& aInput, AudioBlock *aBlock,
                 uint32_t aOffsetInBlock)
{
  uint32_t blockChannels = aBlock->ChannelCount();
  AutoTArray<const T*,2> channels;
  if (aInput.IsNull()) {
    channels.SetLength(blockChannels);
    PodZero(channels.Elements(), blockChannels);
  } else {
    const nsTArray<const T*>& inputChannels = aInput.ChannelData<T>();
    channels.SetLength(inputChannels.Length());
    PodCopy(channels.Elements(), inputChannels.Elements(), channels.Length());
    if (channels.Length() != blockChannels) {
      // We only need to upmix here because aBlock's channel count has been
      // chosen to be a superset of the channel count of every chunk.
      AudioChannelsUpMix(&channels, blockChannels, static_cast<T*>(nullptr));
    }
  }

  for (uint32_t c = 0; c < blockChannels; ++c) {
    float* outputData = aBlock->ChannelFloatsForWrite(c) + aOffsetInBlock;
    if (channels[c]) {
      ConvertAudioSamplesWithScale(channels[c], outputData, aInput.GetDuration(), aInput.mVolume);
    } else {
      PodZero(outputData, aInput.GetDuration());
    }
  }
}
/**
 * Copies the data in aInput to aOffsetInBlock within aBlock.
 * aBlock must have been allocated with AllocateInputBlock and have a channel
 * count that's a superset of the channels in aInput.
 */
static void
CopyChunkToBlock(const AudioChunk& aInput, AudioChunk *aBlock,
                 uint32_t aOffsetInBlock)
{
  uint32_t blockChannels = aBlock->ChannelCount();
  nsAutoTArray<const void*,2> channels;
  if (aInput.IsNull()) {
    channels.SetLength(blockChannels);
    PodZero(channels.Elements(), blockChannels);
  } else {
    channels.SetLength(aInput.ChannelCount());
    PodCopy(channels.Elements(), aInput.mChannelData.Elements(), channels.Length());
    if (channels.Length() != blockChannels) {
      // We only need to upmix here because aBlock's channel count has been
      // chosen to be a superset of the channel count of every chunk.
      AudioChannelsUpMix(&channels, blockChannels, nullptr);
    }
  }

  uint32_t duration = aInput.GetDuration();
  for (uint32_t c = 0; c < blockChannels; ++c) {
    float* outputData =
      static_cast<float*>(const_cast<void*>(aBlock->mChannelData[c])) + aOffsetInBlock;
    if (channels[c]) {
      switch (aInput.mBufferFormat) {
      case AUDIO_FORMAT_FLOAT32:
        ConvertAudioSamplesWithScale(
            static_cast<const float*>(channels[c]), outputData, duration,
            aInput.mVolume);
        break;
      case AUDIO_FORMAT_S16:
        ConvertAudioSamplesWithScale(
            static_cast<const int16_t*>(channels[c]), outputData, duration,
            aInput.mVolume);
        break;
      default:
        NS_ERROR("Unhandled format");
      }
    } else {
      PodZero(outputData, duration);
    }
  }
}
template<typename SampleFormatType> void
SpeechStreamListener::ConvertAndDispatchAudioChunk(int aDuration, float aVolume,
                                                   SampleFormatType* aData, TrackRate aTrackRate )
{
  nsRefPtr<SharedBuffer> samples(SharedBuffer::Create(aDuration *
                                                      1 * // channel
                                                      sizeof(int16_t)));

  int16_t* to = static_cast<int16_t*>(samples->Data());
  ConvertAudioSamplesWithScale(aData, to, aDuration, aVolume);

  mRecognition->FeedAudioData(samples.forget(), aDuration, this,aTrackRate);
}
/**
 * Copies the data in aInput to aOffsetInBlock within aBlock. All samples must
 * be float. Both chunks must have the same number of channels (or else
 * aInput is null). aBlock must have been allocated with AllocateInputBlock.
 */
static void
CopyChunkToBlock(const AudioChunk& aInput, AudioChunk *aBlock, uint32_t aOffsetInBlock)
{
  uint32_t d = aInput.GetDuration();
  for (uint32_t i = 0; i < aBlock->mChannelData.Length(); ++i) {
    float* out = static_cast<float*>(const_cast<void*>(aBlock->mChannelData[i])) +
      aOffsetInBlock;
    if (aInput.IsNull()) {
      PodZero(out, d);
    } else {
      const float* in = static_cast<const float*>(aInput.mChannelData[i]);
      ConvertAudioSamplesWithScale(in, out, d, aInput.mVolume);
    }
  }
}
long
nsBufferedAudioStream::DataCallback(void* aBuffer, long aFrames)
{
  MonitorAutoLock mon(mMonitor);
  uint32_t bytesWanted = aFrames * mBytesPerFrame;

  // Adjust bytesWanted to fit what is available in mBuffer.
  uint32_t available = NS_MIN(bytesWanted, mBuffer.Length());
  NS_ABORT_IF_FALSE(available % mBytesPerFrame == 0, "Must copy complete frames");

  if (available > 0) {
    // Copy each sample from mBuffer to aBuffer, adjusting the volume during the copy.
    float scaled_volume = float(GetVolumeScale() * mVolume);

    // Fetch input pointers from the ring buffer.
    void* input[2];
    uint32_t input_size[2];
    mBuffer.PopElements(available, &input[0], &input_size[0], &input[1], &input_size[1]);

    uint8_t* output = static_cast<uint8_t*>(aBuffer);
    for (int i = 0; i < 2; ++i) {
      const AudioDataValue* src = static_cast<const AudioDataValue*>(input[i]);
      AudioDataValue* dst = reinterpret_cast<AudioDataValue*>(output);

      ConvertAudioSamplesWithScale(src, dst, input_size[i]/sizeof(AudioDataValue),
                                   scaled_volume);
      output += input_size[i];
    }

    NS_ABORT_IF_FALSE(mBuffer.Length() % mBytesPerFrame == 0, "Must copy complete frames");

    // Notify any blocked Write() call that more space is available in mBuffer.
    mon.NotifyAll();

    // Calculate remaining bytes requested by caller.  If the stream is not
    // draining an underrun has occurred, so fill the remaining buffer with
    // silence.
    bytesWanted -= available;
  }

  if (mState != DRAINING) {
    memset(static_cast<uint8_t*>(aBuffer) + available, 0, bytesWanted);
    mLostFrames += bytesWanted / mBytesPerFrame;
    bytesWanted = 0;
  }

  return aFrames - (bytesWanted / mBytesPerFrame);
}
nsresult nsNativeAudioStream::Write(const AudioDataValue* aBuf, uint32_t aFrames)
{
  NS_ASSERTION(!mPaused, "Don't write audio when paused, you'll block");

  if (mInError)
    return NS_ERROR_FAILURE;

  uint32_t samples = aFrames * mChannels;
  nsAutoArrayPtr<short> s_data(new short[samples]);

  float scaled_volume = float(GetVolumeScale() * mVolume);
  ConvertAudioSamplesWithScale(aBuf, s_data.get(), samples, scaled_volume);

  if (sa_stream_write(static_cast<sa_stream_t*>(mAudioHandle),
                      s_data.get(),
                      samples * sizeof(short)) != SA_SUCCESS)
  {
    PR_LOG(gAudioStreamLog, PR_LOG_ERROR, ("nsNativeAudioStream: sa_stream_write error"));
    mInError = true;
    return NS_ERROR_FAILURE;
  }
  return NS_OK;
}
void
AudioNodeExternalInputStream::TrackMapEntry::ResampleChannels(const nsTArray<const void*>& aBuffers,
                                                              uint32_t aInputDuration,
                                                              AudioSampleFormat aFormat,
                                                              float aVolume)
{
  NS_ASSERTION(aBuffers.Length() == mResamplerChannelCount,
               "Channel count must be correct here");

  nsAutoTArray<nsTArray<float>,2> resampledBuffers;
  resampledBuffers.SetLength(aBuffers.Length());
  nsTArray<float> samplesAdjustedForVolume;
  nsAutoTArray<const float*,2> bufferPtrs;
  bufferPtrs.SetLength(aBuffers.Length());

  for (uint32_t i = 0; i < aBuffers.Length(); ++i) {
    AudioSampleFormat format = aFormat;
    const void* buffer = aBuffers[i];

    if (aVolume != 1.0f) {
      format = AUDIO_FORMAT_FLOAT32;
      samplesAdjustedForVolume.SetLength(aInputDuration);
      switch (aFormat) {
      case AUDIO_FORMAT_FLOAT32:
        ConvertAudioSamplesWithScale(static_cast<const float*>(buffer),
                                     samplesAdjustedForVolume.Elements(),
                                     aInputDuration, aVolume);
        break;
      case AUDIO_FORMAT_S16:
        ConvertAudioSamplesWithScale(static_cast<const int16_t*>(buffer),
                                     samplesAdjustedForVolume.Elements(),
                                     aInputDuration, aVolume);
        break;
      default:
        MOZ_ASSERT(false);
        return;
      }
      buffer = samplesAdjustedForVolume.Elements();
    }

    switch (format) {
    case AUDIO_FORMAT_FLOAT32:
      ResampleChannelBuffer(mResampler, i,
                            static_cast<const float*>(buffer),
                            aInputDuration, &resampledBuffers[i]);
      break;
    case AUDIO_FORMAT_S16:
      ResampleChannelBuffer(mResampler, i,
                            static_cast<const int16_t*>(buffer),
                            aInputDuration, &resampledBuffers[i]);
      break;
    default:
      MOZ_ASSERT(false);
      return;
    }
    bufferPtrs[i] = resampledBuffers[i].Elements();
    NS_ASSERTION(i == 0 ||
                 resampledBuffers[i].Length() == resampledBuffers[0].Length(),
                 "Resampler made different decisions for different channels!");
  }

  uint32_t length = resampledBuffers[0].Length();
  nsRefPtr<ThreadSharedObject> buf = new SharedChannelArrayBuffer<float>(&resampledBuffers);
  mResampledData.AppendFrames(buf.forget(), bufferPtrs, length);
}