void
AudioTrackEncoder::InterleaveTrackData(AudioChunk& aChunk,
                                       int32_t aDuration,
                                       uint32_t aOutputChannels,
                                       AudioDataValue* aOutput)
{
  if (aChunk.mChannelData.Length() < aOutputChannels) {
    // Up-mix. This might make the mChannelData have more than aChannels.
    AudioChannelsUpMix(&aChunk.mChannelData, aOutputChannels, gZeroChannel);
  }

  if (aChunk.mChannelData.Length() > aOutputChannels) {
    DownmixAndInterleave(aChunk.mChannelData, aChunk.mBufferFormat, aDuration,
                         aChunk.mVolume, mChannels, aOutput);
  } else {
    InterleaveAndConvertBuffer(aChunk.mChannelData.Elements(),
                               aChunk.mBufferFormat, aDuration, aChunk.mVolume,
                               mChannels, aOutput);
  }
}
Beispiel #2
0
void
AudioSegment::WriteTo(uint64_t aID, AudioStream* aOutput, AudioMixer* aMixer)
{
  uint32_t outputChannels = aOutput->GetChannels();
  nsAutoTArray<AudioDataValue,AUDIO_PROCESSING_FRAMES*GUESS_AUDIO_CHANNELS> buf;
  nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channelData;
  // Offset in the buffer that will end up sent to the AudioStream, in samples.
  uint32_t offset = 0;

  if (!GetDuration()) {
    return;
  }

  uint32_t outBufferLength = GetDuration() * outputChannels;
  buf.SetLength(outBufferLength);


  for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) {
    AudioChunk& c = *ci;
    uint32_t frames = c.mDuration;

    // If we have written data in the past, or we have real (non-silent) data
    // to write, we can proceed. Otherwise, it means we just started the
    // AudioStream, and we don't have real data to write to it (just silence).
    // To avoid overbuffering in the AudioStream, we simply drop the silence,
    // here. The stream will underrun and output silence anyways.
    if (c.mBuffer || aOutput->GetWritten()) {
      if (c.mBuffer && c.mBufferFormat != AUDIO_FORMAT_SILENCE) {
        channelData.SetLength(c.mChannelData.Length());
        for (uint32_t i = 0; i < channelData.Length(); ++i) {
          channelData[i] = c.mChannelData[i];
        }

        if (channelData.Length() < outputChannels) {
          // Up-mix. Note that this might actually make channelData have more
          // than outputChannels temporarily.
          AudioChannelsUpMix(&channelData, outputChannels, gZeroChannel);
        }

        if (channelData.Length() > outputChannels) {
          // Down-mix.
          DownmixAndInterleave(channelData, c.mBufferFormat, frames,
                               c.mVolume, outputChannels, buf.Elements() + offset);
        } else {
          InterleaveAndConvertBuffer(channelData.Elements(), c.mBufferFormat,
                                     frames, c.mVolume,
                                     outputChannels,
                                     buf.Elements() + offset);
        }
      } else {
        // Assumes that a bit pattern of zeroes == 0.0f
        memset(buf.Elements() + offset, 0, outputChannels * frames * sizeof(AudioDataValue));
      }
      offset += frames * outputChannels;
    }

    if (!c.mTimeStamp.IsNull()) {
      TimeStamp now = TimeStamp::Now();
      // would be more efficient to c.mTimeStamp to ms on create time then pass here
      LogTime(AsyncLatencyLogger::AudioMediaStreamTrack, aID,
              (now - c.mTimeStamp).ToMilliseconds(), c.mTimeStamp);
    }
  }

  aOutput->Write(buf.Elements(), offset / outputChannels, &(mChunks[mChunks.Length() - 1].mTimeStamp));

  if (aMixer) {
    aMixer->Mix(buf.Elements(), outputChannels, GetDuration(), aOutput->GetRate());
  }
  aOutput->Start();
}