示例#1
0
void
AudioStream::GetTimeStretched(AudioBufferWriter& aWriter)
{
    mMonitor.AssertCurrentThreadOwns();

    // We need to call the non-locking version, because we already have the lock.
    if (EnsureTimeStretcherInitializedUnlocked() != NS_OK) {
        return;
    }

    double playbackRate = static_cast<double>(mInRate) / mOutRate;
    uint32_t toPopFrames = ceil(aWriter.Available() * playbackRate);

    while (mTimeStretcher->numSamples() < aWriter.Available()) {
        UniquePtr<Chunk> c = mDataSource.PopFrames(toPopFrames);
        if (c->Frames() == 0) {
            break;
        }
        MOZ_ASSERT(c->Frames() <= toPopFrames);
        if (Downmix(c.get())) {
            mTimeStretcher->putSamples(c->Data(), c->Frames());
        } else {
            // Write silence if downmixing fails.
            nsAutoTArray<AudioDataValue, 1000> buf;
            buf.SetLength(mOutChannels * c->Frames());
            memset(buf.Elements(), 0, buf.Length() * sizeof(AudioDataValue));
            mTimeStretcher->putSamples(buf.Elements(), c->Frames());
        }
    }

    auto timeStretcher = mTimeStretcher;
    aWriter.Write([timeStretcher] (AudioDataValue* aPtr, uint32_t aFrames) {
        return timeStretcher->receiveSamples(aPtr, aFrames);
    }, aWriter.Available());
}
示例#2
0
void
AudioStream::GetTimeStretched(AudioBufferWriter& aWriter)
{
  mMonitor.AssertCurrentThreadOwns();

  // We need to call the non-locking version, because we already have the lock.
  if (EnsureTimeStretcherInitializedUnlocked() != NS_OK) {
    return;
  }

  uint32_t toPopFrames =
    ceil(aWriter.Available() * mAudioClock.GetPlaybackRate());

  while (mTimeStretcher->numSamples() < aWriter.Available()) {
    UniquePtr<Chunk> c = mDataSource.PopFrames(toPopFrames);
    if (c->Frames() == 0) {
      break;
    }
    MOZ_ASSERT(c->Frames() <= toPopFrames);
    if (IsValidAudioFormat(c.get())) {
      mTimeStretcher->putSamples(c->Data(), c->Frames());
    } else {
      // Write silence if invalid format.
      AutoTArray<AudioDataValue, 1000> buf;
      auto size = CheckedUint32(mOutChannels) * c->Frames();
      if (!size.isValid()) {
        // The overflow should not happen in normal case.
        LOGW("Invalid member data: %d channels, %d frames", mOutChannels, c->Frames());
        return;
      }
      buf.SetLength(size.value());
      size = size * sizeof(AudioDataValue);
      if (!size.isValid()) {
        LOGW("The required memory size is too large.");
        return;
      }
      memset(buf.Elements(), 0, size.value());
      mTimeStretcher->putSamples(buf.Elements(), c->Frames());
    }
  }

  auto timeStretcher = mTimeStretcher;
  aWriter.Write([timeStretcher] (AudioDataValue* aPtr, uint32_t aFrames) {
    return timeStretcher->receiveSamples(aPtr, aFrames);
  }, aWriter.Available());
}
示例#3
0
void
AudioStream::GetUnprocessed(AudioBufferWriter& aWriter)
{
    mMonitor.AssertCurrentThreadOwns();

    // Flush the timestretcher pipeline, if we were playing using a playback rate
    // other than 1.0.
    if (mTimeStretcher && mTimeStretcher->numSamples()) {
        auto timeStretcher = mTimeStretcher;
        aWriter.Write([timeStretcher] (AudioDataValue* aPtr, uint32_t aFrames) {
            return timeStretcher->receiveSamples(aPtr, aFrames);
        }, aWriter.Available());

        // TODO: There might be still unprocessed samples in the stretcher.
        // We should either remove or flush them so they won't be in the output
        // next time we switch a playback rate other than 1.0.
        NS_WARN_IF(mTimeStretcher->numUnprocessedSamples() > 0);
    }

    while (aWriter.Available() > 0) {
        UniquePtr<Chunk> c = mDataSource.PopFrames(aWriter.Available());
        if (c->Frames() == 0) {
            break;
        }
        MOZ_ASSERT(c->Frames() <= aWriter.Available());
        if (Downmix(c.get())) {
            aWriter.Write(c->Data(), c->Frames());
        } else {
            // Write silence if downmixing fails.
            aWriter.WriteZeros(c->Frames());
        }
    }
}