long
nsBufferedAudioStream::DataCallback(void* aBuffer, long aFrames)
{
  MonitorAutoLock mon(mMonitor);
  uint32_t bytesWanted = aFrames * mBytesPerFrame;

  // Adjust bytesWanted to fit what is available in mBuffer.
  uint32_t available = NS_MIN(bytesWanted, mBuffer.Length());
  NS_ABORT_IF_FALSE(available % mBytesPerFrame == 0, "Must copy complete frames");

  if (available > 0) {
    // Copy each sample from mBuffer to aBuffer, adjusting the volume during the copy.
    float scaled_volume = float(GetVolumeScale() * mVolume);

    // Fetch input pointers from the ring buffer.
    void* input[2];
    uint32_t input_size[2];
    mBuffer.PopElements(available, &input[0], &input_size[0], &input[1], &input_size[1]);

    uint8_t* output = static_cast<uint8_t*>(aBuffer);
    for (int i = 0; i < 2; ++i) {
      const AudioDataValue* src = static_cast<const AudioDataValue*>(input[i]);
      AudioDataValue* dst = reinterpret_cast<AudioDataValue*>(output);

      ConvertAudioSamplesWithScale(src, dst, input_size[i]/sizeof(AudioDataValue),
                                   scaled_volume);
      output += input_size[i];
    }

    NS_ABORT_IF_FALSE(mBuffer.Length() % mBytesPerFrame == 0, "Must copy complete frames");

    // Notify any blocked Write() call that more space is available in mBuffer.
    mon.NotifyAll();

    // Calculate remaining bytes requested by caller.  If the stream is not
    // draining an underrun has occurred, so fill the remaining buffer with
    // silence.
    bytesWanted -= available;
  }

  if (mState != DRAINING) {
    memset(static_cast<uint8_t*>(aBuffer) + available, 0, bytesWanted);
    mLostFrames += bytesWanted / mBytesPerFrame;
    bytesWanted = 0;
  }

  return aFrames - (bytesWanted / mBytesPerFrame);
}
uint32_t
BufferedAudioStream::Available()
{
  MonitorAutoLock mon(mMonitor);
  NS_ABORT_IF_FALSE(mBuffer.Length() % mBytesPerFrame == 0, "Buffer invariant violated.");
  return BytesToFrames(mBuffer.Available());
}
long
BufferedAudioStream::DataCallback(void* aBuffer, long aFrames)
{
  MonitorAutoLock mon(mMonitor);
  uint32_t available = std::min(static_cast<uint32_t>(FramesToBytes(aFrames)), mBuffer.Length());
  NS_ABORT_IF_FALSE(available % mBytesPerFrame == 0, "Must copy complete frames");
  uint32_t underrunFrames = 0;
  uint32_t servicedFrames = 0;

  if (available) {
    AudioDataValue* output = reinterpret_cast<AudioDataValue*>(aBuffer);
    if (mInRate == mOutRate) {
      servicedFrames = GetUnprocessed(output, aFrames);
    } else {
      servicedFrames = GetTimeStretched(output, aFrames);
    }
    float scaled_volume = float(GetVolumeScale() * mVolume);

    ScaleAudioSamples(output, aFrames * mChannels, scaled_volume);

    NS_ABORT_IF_FALSE(mBuffer.Length() % mBytesPerFrame == 0, "Must copy complete frames");

    // Notify any blocked Write() call that more space is available in mBuffer.
    mon.NotifyAll();
  }

  underrunFrames = aFrames - servicedFrames;

  if (mState != DRAINING) {
    uint8_t* rpos = static_cast<uint8_t*>(aBuffer) + FramesToBytes(aFrames - underrunFrames);
    memset(rpos, 0, FramesToBytes(underrunFrames));
#ifdef PR_LOGGING
    if (underrunFrames) {
      PR_LOG(gAudioStreamLog, PR_LOG_WARNING,
             ("AudioStream %p lost %d frames", this, underrunFrames));
    }
#endif
    mLostFrames += underrunFrames;
    servicedFrames += underrunFrames;
  }

  WriteDumpFile(mDumpFile, this, aFrames, aBuffer);

  mAudioClock.UpdateWritePosition(servicedFrames);
  return servicedFrames;
}
long
BufferedAudioStream::GetUnprocessed(void* aBuffer, long aFrames)
{
  uint8_t* wpos = reinterpret_cast<uint8_t*>(aBuffer);

  // Flush the timestretcher pipeline, if we were playing using a playback rate
  // other than 1.0.
  uint32_t flushedFrames = 0;
  if (mTimeStretcher && mTimeStretcher->numSamples()) {
    flushedFrames = mTimeStretcher->receiveSamples(reinterpret_cast<AudioDataValue*>(wpos), aFrames);
    wpos += FramesToBytes(flushedFrames);
  }
  uint32_t toPopBytes = FramesToBytes(aFrames - flushedFrames);
  uint32_t available = std::min(toPopBytes, mBuffer.Length());

  void* input[2];
  uint32_t input_size[2];
  mBuffer.PopElements(available, &input[0], &input_size[0], &input[1], &input_size[1]);
  memcpy(wpos, input[0], input_size[0]);
  wpos += input_size[0];
  memcpy(wpos, input[1], input_size[1]);
  return BytesToFrames(available) + flushedFrames;
}
long
BufferedAudioStream::GetTimeStretched(void* aBuffer, long aFrames)
{
  long processedFrames = 0;

  // We need to call the non-locking version, because we already have the lock.
  if (AudioStream::EnsureTimeStretcherInitialized() != NS_OK) {
    return 0;
  }

  uint8_t* wpos = reinterpret_cast<uint8_t*>(aBuffer);
  double playbackRate = static_cast<double>(mInRate) / mOutRate;
  uint32_t toPopBytes = FramesToBytes(ceil(aFrames / playbackRate));
  uint32_t available = 0;
  bool lowOnBufferedData = false;
  do {
    // Check if we already have enough data in the time stretcher pipeline.
    if (mTimeStretcher->numSamples() <= static_cast<uint32_t>(aFrames)) {
      void* input[2];
      uint32_t input_size[2];
      available = std::min(mBuffer.Length(), toPopBytes);
      if (available != toPopBytes) {
        lowOnBufferedData = true;
      }
      mBuffer.PopElements(available, &input[0], &input_size[0],
                                     &input[1], &input_size[1]);
      for(uint32_t i = 0; i < 2; i++) {
        mTimeStretcher->putSamples(reinterpret_cast<AudioDataValue*>(input[i]), BytesToFrames(input_size[i]));
      }
    }
    uint32_t receivedFrames = mTimeStretcher->receiveSamples(reinterpret_cast<AudioDataValue*>(wpos), aFrames - processedFrames);
    wpos += FramesToBytes(receivedFrames);
    processedFrames += receivedFrames;
  } while (processedFrames < aFrames && !lowOnBufferedData);

  return processedFrames;
}