Exemplo n.º 1
0
void
InterleaveAndConvertBuffer(const void** aSourceChannels,
                           AudioSampleFormat aSourceFormat,
                           int32_t aLength, float aVolume,
                           int32_t aChannels,
                           AudioDataValue* aOutput)
{
  switch (aSourceFormat) {
  case AUDIO_FORMAT_FLOAT32:
    InterleaveAndConvertBuffer(reinterpret_cast<const float**>(aSourceChannels),
                               aLength,
                               aVolume,
                               aChannels,
                               aOutput);
    break;
  case AUDIO_FORMAT_S16:
    InterleaveAndConvertBuffer(reinterpret_cast<const int16_t**>(aSourceChannels),
                               aLength,
                               aVolume,
                               aChannels,
                               aOutput);
    break;
   case AUDIO_FORMAT_SILENCE:
    // nothing to do here.
    break;
  }
}
Exemplo n.º 2
0
static void
InterleaveAndConvertBuffer(const void* aSource, nsAudioStream::SampleFormat aSourceFormat,
                           int32_t aSourceLength,
                           int32_t aOffset, int32_t aLength,
                           float aVolume,
                           int32_t aChannels,
                           void* aOutput, nsAudioStream::SampleFormat aOutputFormat)
{
  switch (aSourceFormat) {
  case nsAudioStream::FORMAT_FLOAT32:
    InterleaveAndConvertBuffer(static_cast<const float*>(aSource) + aOffset, aSourceLength,
                               aLength,
                               aVolume,
                               aChannels,
                               aOutput, aOutputFormat);
    break;
  case nsAudioStream::FORMAT_S16_LE:
    InterleaveAndConvertBuffer(static_cast<const int16_t*>(aSource) + aOffset, aSourceLength,
                               aLength,
                               aVolume,
                               aChannels,
                               aOutput, aOutputFormat);
    break;
  case nsAudioStream::FORMAT_U8:
    InterleaveAndConvertBuffer(static_cast<const uint8_t*>(aSource) + aOffset, aSourceLength,
                               aLength,
                               aVolume,
                               aChannels,
                               aOutput, aOutputFormat);
    break;
  }
}
Exemplo n.º 3
0
void
AudioSegment::WriteTo(nsAudioStream* aOutput)
{
  NS_ASSERTION(mChannels == aOutput->GetChannels(), "Wrong number of channels");
  nsAutoTArray<uint8_t,STATIC_AUDIO_BUFFER_BYTES> buf;
  uint32_t frameSize = GetSampleSize(aOutput->GetFormat())*mChannels;
  for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) {
    AudioChunk& c = *ci;
    if (frameSize*c.mDuration > PR_UINT32_MAX) {
      NS_ERROR("Buffer overflow");
      return;
    }
    buf.SetLength(int32_t(frameSize*c.mDuration));
    if (c.mBuffer) {
      InterleaveAndConvertBuffer(c.mBuffer->Data(), c.mBufferFormat, c.mBufferLength,
                                 c.mOffset, int32_t(c.mDuration),
                                 c.mVolume,
                                 aOutput->GetChannels(),
                                 buf.Elements(), aOutput->GetFormat());
    } else {
      // Assumes that a bit pattern of zeroes == 0.0f
      memset(buf.Elements(), 0, buf.Length());
    }
    aOutput->Write(buf.Elements(), int32_t(c.mDuration));
  }
}
Exemplo n.º 4
0
void
DownmixAndInterleave(const nsTArray<const void*>& aChannelData,
                     AudioSampleFormat aSourceFormat, int32_t aDuration,
                     float aVolume, uint32_t aOutputChannels,
                     AudioDataValue* aOutput)
{
  nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channelData;
  nsAutoTArray<float,AUDIO_PROCESSING_FRAMES*GUESS_AUDIO_CHANNELS> downmixConversionBuffer;
  nsAutoTArray<float,AUDIO_PROCESSING_FRAMES*GUESS_AUDIO_CHANNELS> downmixOutputBuffer;

  channelData.SetLength(aChannelData.Length());
  if (aSourceFormat != AUDIO_FORMAT_FLOAT32) {
    NS_ASSERTION(aSourceFormat == AUDIO_FORMAT_S16, "unknown format");
    downmixConversionBuffer.SetLength(aDuration*aChannelData.Length());
    for (uint32_t i = 0; i < aChannelData.Length(); ++i) {
      float* conversionBuf = downmixConversionBuffer.Elements() + (i*aDuration);
      const int16_t* sourceBuf = static_cast<const int16_t*>(aChannelData[i]);
      for (uint32_t j = 0; j < (uint32_t)aDuration; ++j) {
        conversionBuf[j] = AudioSampleToFloat(sourceBuf[j]);
      }
      channelData[i] = conversionBuf;
    }
  } else {
    for (uint32_t i = 0; i < aChannelData.Length(); ++i) {
      channelData[i] = aChannelData[i];
    }
  }

  downmixOutputBuffer.SetLength(aDuration*aOutputChannels);
  nsAutoTArray<float*,GUESS_AUDIO_CHANNELS> outputChannelBuffers;
  nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> outputChannelData;
  outputChannelBuffers.SetLength(aOutputChannels);
  outputChannelData.SetLength(aOutputChannels);
  for (uint32_t i = 0; i < (uint32_t)aOutputChannels; ++i) {
    outputChannelData[i] = outputChannelBuffers[i] =
        downmixOutputBuffer.Elements() + aDuration*i;
  }
  if (channelData.Length() > aOutputChannels) {
    AudioChannelsDownMix(channelData, outputChannelBuffers.Elements(),
                         aOutputChannels, aDuration);
  }
  InterleaveAndConvertBuffer(outputChannelData.Elements(), AUDIO_FORMAT_FLOAT32,
                             aDuration, aVolume, aOutputChannels, aOutput);
}
void
AudioTrackEncoder::InterleaveTrackData(AudioChunk& aChunk,
                                       int32_t aDuration,
                                       uint32_t aOutputChannels,
                                       AudioDataValue* aOutput)
{
  if (aChunk.mChannelData.Length() < aOutputChannels) {
    // Up-mix. This might make the mChannelData have more than aChannels.
    AudioChannelsUpMix(&aChunk.mChannelData, aOutputChannels, gZeroChannel);
  }

  if (aChunk.mChannelData.Length() > aOutputChannels) {
    DownmixAndInterleave(aChunk.mChannelData, aChunk.mBufferFormat, aDuration,
                         aChunk.mVolume, mChannels, aOutput);
  } else {
    InterleaveAndConvertBuffer(aChunk.mChannelData.Elements(),
                               aChunk.mBufferFormat, aDuration, aChunk.mVolume,
                               mChannels, aOutput);
  }
}
Exemplo n.º 6
0
void TestInterleaveAndConvert() {
  size_t arraySize = 1024;
  size_t maxChannels = 8;  // 7.1
  for (uint32_t channels = 1; channels < maxChannels; channels++) {
    const SrcT* const* src = GetPlanarChannelArray<SrcT>(channels, arraySize);
    DstT* dst = new DstT[channels * arraySize];

    InterleaveAndConvertBuffer(src, arraySize, 1.0, channels, dst);

    uint32_t channelIndex = 0;
    for (size_t i = 0; i < arraySize * channels; i++) {
      ASSERT_TRUE(FuzzyEqual(
          dst[i], FloatToAudioSample<DstT>(1. / (channelIndex + 1))));
      channelIndex++;
      channelIndex %= channels;
    }

    DeletePlanarChannelsArray(src, channels);
    delete[] dst;
  }
}
Exemplo n.º 7
0
void
AudioSegment::WriteTo(uint64_t aID, AudioStream* aOutput, AudioMixer* aMixer)
{
  uint32_t outputChannels = aOutput->GetChannels();
  nsAutoTArray<AudioDataValue,AUDIO_PROCESSING_FRAMES*GUESS_AUDIO_CHANNELS> buf;
  nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channelData;
  // Offset in the buffer that will end up sent to the AudioStream, in samples.
  uint32_t offset = 0;

  if (!GetDuration()) {
    return;
  }

  uint32_t outBufferLength = GetDuration() * outputChannels;
  buf.SetLength(outBufferLength);


  for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) {
    AudioChunk& c = *ci;
    uint32_t frames = c.mDuration;

    // If we have written data in the past, or we have real (non-silent) data
    // to write, we can proceed. Otherwise, it means we just started the
    // AudioStream, and we don't have real data to write to it (just silence).
    // To avoid overbuffering in the AudioStream, we simply drop the silence,
    // here. The stream will underrun and output silence anyways.
    if (c.mBuffer || aOutput->GetWritten()) {
      if (c.mBuffer && c.mBufferFormat != AUDIO_FORMAT_SILENCE) {
        channelData.SetLength(c.mChannelData.Length());
        for (uint32_t i = 0; i < channelData.Length(); ++i) {
          channelData[i] = c.mChannelData[i];
        }

        if (channelData.Length() < outputChannels) {
          // Up-mix. Note that this might actually make channelData have more
          // than outputChannels temporarily.
          AudioChannelsUpMix(&channelData, outputChannels, gZeroChannel);
        }

        if (channelData.Length() > outputChannels) {
          // Down-mix.
          DownmixAndInterleave(channelData, c.mBufferFormat, frames,
                               c.mVolume, outputChannels, buf.Elements() + offset);
        } else {
          InterleaveAndConvertBuffer(channelData.Elements(), c.mBufferFormat,
                                     frames, c.mVolume,
                                     outputChannels,
                                     buf.Elements() + offset);
        }
      } else {
        // Assumes that a bit pattern of zeroes == 0.0f
        memset(buf.Elements() + offset, 0, outputChannels * frames * sizeof(AudioDataValue));
      }
      offset += frames * outputChannels;
    }

    if (!c.mTimeStamp.IsNull()) {
      TimeStamp now = TimeStamp::Now();
      // would be more efficient to c.mTimeStamp to ms on create time then pass here
      LogTime(AsyncLatencyLogger::AudioMediaStreamTrack, aID,
              (now - c.mTimeStamp).ToMilliseconds(), c.mTimeStamp);
    }
  }

  aOutput->Write(buf.Elements(), offset / outputChannels, &(mChunks[mChunks.Length() - 1].mTimeStamp));

  if (aMixer) {
    aMixer->Mix(buf.Elements(), outputChannels, GetDuration(), aOutput->GetRate());
  }
  aOutput->Start();
}