예제 #1
0
    static void EmptyAudioCallback(void * userdata, unsigned char * stream, int length)
    {
        SDL_memset(stream, 0, length);

        AudioMixer * mixer = (class AudioMixer *)userdata;
        if (mixer)
        {
            mixer->Mix(stream, length);
        }
    }
예제 #2
0
void
AudioSegment::WriteTo(uint64_t aID, AudioMixer& aMixer, uint32_t aOutputChannels, uint32_t aSampleRate)
{
  nsAutoTArray<AudioDataValue,AUDIO_PROCESSING_FRAMES*GUESS_AUDIO_CHANNELS> buf;
  nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channelData;
  // Offset in the buffer that will end up sent to the AudioStream, in samples.
  uint32_t offset = 0;

  if (GetDuration() <= 0) {
    MOZ_ASSERT(GetDuration() == 0);
    return;
  }

  uint32_t outBufferLength = GetDuration() * aOutputChannels;
  buf.SetLength(outBufferLength);


  for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) {
    AudioChunk& c = *ci;
    uint32_t frames = c.mDuration;

    // If we have written data in the past, or we have real (non-silent) data
    // to write, we can proceed. Otherwise, it means we just started the
    // AudioStream, and we don't have real data to write to it (just silence).
    // To avoid overbuffering in the AudioStream, we simply drop the silence,
    // here. The stream will underrun and output silence anyways.
    if (c.mBuffer && c.mBufferFormat != AUDIO_FORMAT_SILENCE) {
      channelData.SetLength(c.mChannelData.Length());
      for (uint32_t i = 0; i < channelData.Length(); ++i) {
        channelData[i] = c.mChannelData[i];
      }
      if (channelData.Length() < aOutputChannels) {
        // Up-mix. Note that this might actually make channelData have more
        // than aOutputChannels temporarily.
        AudioChannelsUpMix(&channelData, aOutputChannels, gZeroChannel);
      }
      if (channelData.Length() > aOutputChannels) {
        // Down-mix.
        DownmixAndInterleave(channelData, c.mBufferFormat, frames,
                             c.mVolume, aOutputChannels, buf.Elements() + offset);
      } else {
        InterleaveAndConvertBuffer(channelData.Elements(), c.mBufferFormat,
                                   frames, c.mVolume,
                                   aOutputChannels,
                                   buf.Elements() + offset);
      }
    } else {
      // Assumes that a bit pattern of zeroes == 0.0f
      memset(buf.Elements() + offset, 0, aOutputChannels * frames * sizeof(AudioDataValue));
    }

    offset += frames * aOutputChannels;

#if !defined(MOZILLA_XPCOMRT_API)
    if (!c.mTimeStamp.IsNull()) {
      TimeStamp now = TimeStamp::Now();
      // would be more efficient to c.mTimeStamp to ms on create time then pass here
      LogTime(AsyncLatencyLogger::AudioMediaStreamTrack, aID,
              (now - c.mTimeStamp).ToMilliseconds(), c.mTimeStamp);
    }
#endif // !defined(MOZILLA_XPCOMRT_API)
  }

  if (offset) {
    aMixer.Mix(buf.Elements(), aOutputChannels, offset / aOutputChannels, aSampleRate);
  }
}