void
MediaEngineWebRTCAudioSource::Process(const int channel,
  const webrtc::ProcessingTypes type, sample* audio10ms,
  const int length, const int samplingFreq, const bool isStereo)
{
  ReentrantMonitorAutoEnter enter(mMonitor);
  if (mState != kStarted)
    return;

  uint32_t len = mSources.Length();
  for (uint32_t i = 0; i < len; i++) {
    nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(length * sizeof(sample));

    sample* dest = static_cast<sample*>(buffer->Data());
    memcpy(dest, audio10ms, length * sizeof(sample));

    AudioSegment segment;
    nsAutoTArray<const sample*,1> channels;
    channels.AppendElement(dest);
    segment.AppendFrames(buffer.forget(), channels, length);

    SourceMediaStream *source = mSources[i];
    if (source) {
      // This is safe from any thread, and is safe if the track is Finished
      // or Destroyed
      source->AppendToTrack(mTrackID, &segment);
    }
  }

  return;
}
 void Generate(AudioSegment& aSegment, const int32_t& aSamples)
 {
   RefPtr<SharedBuffer> buffer = SharedBuffer::Create(aSamples * sizeof(int16_t));
   int16_t* dest = static_cast<int16_t*>(buffer->Data());
   mGenerator.generate(dest, aSamples);
   AutoTArray<const int16_t*, 1> channels;
   for (int32_t i = 0; i < mChannels; i++) {
     channels.AppendElement(dest);
   }
   aSegment.AppendFrames(buffer.forget(), channels, aSamples, PRINCIPAL_HANDLE_NONE);
 }
void
MediaEngineDefaultAudioSource::AppendToSegment(AudioSegment& aSegment,
                                               TrackTicks aSamples)
{
  RefPtr<SharedBuffer> buffer = SharedBuffer::Create(aSamples * sizeof(int16_t));
  int16_t* dest = static_cast<int16_t*>(buffer->Data());

  mSineGenerator->generate(dest, aSamples);
  AutoTArray<const int16_t*,1> channels;
  channels.AppendElement(dest);
  aSegment.AppendFrames(buffer.forget(), channels, aSamples);
}
Example #4
0
NS_IMETHODIMP
MediaEngineDefaultAudioSource::Notify(nsITimer* aTimer)
{
  AudioSegment segment;
  nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(AUDIO_FRAME_LENGTH * sizeof(int16_t));
  int16_t* dest = static_cast<int16_t*>(buffer->Data());

  mSineGenerator->generate(dest, AUDIO_FRAME_LENGTH);
  nsAutoTArray<const int16_t*,1> channels;
  channels.AppendElement(dest);
  segment.AppendFrames(buffer.forget(), channels, AUDIO_FRAME_LENGTH);
  mSource->AppendToTrack(mTrackID, &segment);

  return NS_OK;
}
Example #5
0
void
nsSpeechTask::SendAudioImpl(RefPtr<mozilla::SharedBuffer>& aSamples, uint32_t aDataLen)
{
  if (aDataLen == 0) {
    mStream->EndAllTrackAndFinish();
    return;
  }

  AudioSegment segment;
  nsAutoTArray<const int16_t*, 1> channelData;
  channelData.AppendElement(static_cast<int16_t*>(aSamples->Data()));
  segment.AppendFrames(aSamples.forget(), channelData, aDataLen);
  mStream->AppendToTrack(1, &segment);
  mStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
}
AudioSegment*
SpeechRecognition::CreateAudioSegment(nsTArray<nsRefPtr<SharedBuffer>>& aChunks)
{
  AudioSegment* segment = new AudioSegment();
  for (uint32_t i = 0; i < aChunks.Length(); ++i) {
    nsRefPtr<SharedBuffer> buffer = aChunks[i];
    const int16_t* chunkData = static_cast<const int16_t*>(buffer->Data());

    nsAutoTArray<const int16_t*, 1> channels;
    channels.AppendElement(chunkData);
    segment->AppendFrames(buffer.forget(), channels, mAudioSamplesPerChunk);
  }

  return segment;
}
void
MediaEngineWebRTCAudioSource::Process(int channel,
  webrtc::ProcessingTypes type, sample* audio10ms,
  int length, int samplingFreq, bool isStereo)
{
  MonitorAutoLock lock(mMonitor);
  if (mState != kStarted)
    return;

  uint32_t len = mSources.Length();
  for (uint32_t i = 0; i < len; i++) {
    nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(length * sizeof(sample));

    sample* dest = static_cast<sample*>(buffer->Data());
    memcpy(dest, audio10ms, length * sizeof(sample));

    AudioSegment segment;
    nsAutoTArray<const sample*,1> channels;
    channels.AppendElement(dest);
    segment.AppendFrames(buffer.forget(), channels, length);
    TimeStamp insertTime;
    segment.GetStartTime(insertTime);

    SourceMediaStream *source = mSources[i];
    if (source) {
      // This is safe from any thread, and is safe if the track is Finished
      // or Destroyed.
      // Make sure we include the stream and the track.
      // The 0:1 is a flag to note when we've done the final insert for a given input block.
      LogTime(AsyncLatencyLogger::AudioTrackInsertion, LATENCY_STREAM_ID(source, mTrackID),
              (i+1 < len) ? 0 : 1, insertTime);

      source->AppendToTrack(mTrackID, &segment);
    }
  }

  return;
}
void
MediaEngineWebRTCAudioSource::Process(const int channel,
  const webrtc::ProcessingTypes type, sample* audio10ms,
  const int length, const int samplingFreq, const bool isStereo)
{
  ReentrantMonitorAutoEnter enter(mMonitor);
  if (mState != kStarted)
    return;

  nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(length * sizeof(sample));

  sample* dest = static_cast<sample*>(buffer->Data());
  memcpy(dest, audio10ms, length * sizeof(sample));

  AudioSegment segment;
  segment.Init(CHANNELS);
  segment.AppendFrames(
    buffer.forget(), length, 0, length, AUDIO_FORMAT_S16
  );
  mSource->AppendToTrack(mTrackID, &segment);

  return;
}
NS_IMETHODIMP
MediaEngineDefaultAudioSource::Notify(nsITimer* aTimer)
{
  AudioSegment segment;
  nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(AUDIO_FRAME_LENGTH * sizeof(int16_t));
  int16_t* dest = static_cast<int16_t*>(buffer->Data());

  mSineGenerator->generate(dest, AUDIO_FRAME_LENGTH);
  nsAutoTArray<const int16_t*,1> channels;
  channels.AppendElement(dest);
  segment.AppendFrames(buffer.forget(), channels, AUDIO_FRAME_LENGTH);
  mSource->AppendToTrack(mTrackID, &segment);

  // Generate null data for fake tracks.
  if (mHasFakeTracks) {
    for (int i = 0; i < kFakeAudioTrackCount; ++i) {
      AudioSegment nullSegment;
      nullSegment.AppendNullData(AUDIO_FRAME_LENGTH);
      mSource->AppendToTrack(kTrackCount + kFakeVideoTrackCount+i, &nullSegment);
    }
  }
  return NS_OK;
}
void
nsSpeechTask::SendAudioImpl(int16_t* aData, uint32_t aDataLen)
{
  if (aDataLen == 0) {
    mStream->EndAllTrackAndFinish();
    return;
  }

  nsRefPtr<mozilla::SharedBuffer> samples =
    SharedBuffer::Create(aDataLen * sizeof(int16_t));
  int16_t* frames = static_cast<int16_t*>(samples->Data());

  for (uint32_t i = 0; i < aDataLen; i++) {
    frames[i] = aData[i];
  }

  AudioSegment segment;
  nsAutoTArray<const int16_t*, 1> channelData;
  channelData.AppendElement(frames);
  segment.AppendFrames(samples.forget(), channelData, aDataLen);
  mStream->AppendToTrack(1, &segment);
  mStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
}
void
MediaEngineWebRTCAudioSource::Process(int channel,
  webrtc::ProcessingTypes type, sample* audio10ms,
  int length, int samplingFreq, bool isStereo)
{
  // On initial capture, throw away all far-end data except the most recent sample
  // since it's already irrelevant and we want to keep avoid confusing the AEC far-end
  // input code with "old" audio.
  if (!mStarted) {
    mStarted  = true;
    while (gFarendObserver->Size() > 1) {
      FarEndAudioChunk *buffer = gFarendObserver->Pop(); // only call if size() > 0
      free(buffer);
    }
  }

  while (gFarendObserver->Size() > 0) {
    FarEndAudioChunk *buffer = gFarendObserver->Pop(); // only call if size() > 0
    if (buffer) {
      int length = buffer->mSamples;
      if (mVoERender->ExternalPlayoutData(buffer->mData,
                                          gFarendObserver->PlayoutFrequency(),
                                          gFarendObserver->PlayoutChannels(),
                                          mPlayoutDelay,
                                          length) == -1) {
        return;
      }
    }
    free(buffer);
  }

#ifdef PR_LOGGING
  mSamples += length;
  if (mSamples > samplingFreq) {
    mSamples %= samplingFreq; // just in case mSamples >> samplingFreq
    if (PR_LOG_TEST(GetMediaManagerLog(), PR_LOG_DEBUG)) {
      webrtc::EchoStatistics echo;

      mVoECallReport->GetEchoMetricSummary(echo);
#define DUMP_STATVAL(x) (x).min, (x).max, (x).average
      LOG(("Echo: ERL: %d/%d/%d, ERLE: %d/%d/%d, RERL: %d/%d/%d, NLP: %d/%d/%d",
           DUMP_STATVAL(echo.erl),
           DUMP_STATVAL(echo.erle),
           DUMP_STATVAL(echo.rerl),
           DUMP_STATVAL(echo.a_nlp)));
    }
  }
#endif

  MonitorAutoLock lock(mMonitor);
  if (mState != kStarted)
    return;

  uint32_t len = mSources.Length();
  for (uint32_t i = 0; i < len; i++) {
    nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(length * sizeof(sample));

    sample* dest = static_cast<sample*>(buffer->Data());
    memcpy(dest, audio10ms, length * sizeof(sample));

    AudioSegment segment;
    nsAutoTArray<const sample*,1> channels;
    channels.AppendElement(dest);
    segment.AppendFrames(buffer.forget(), channels, length);
    TimeStamp insertTime;
    segment.GetStartTime(insertTime);

    SourceMediaStream *source = mSources[i];
    if (source) {
      // This is safe from any thread, and is safe if the track is Finished
      // or Destroyed.
      // Make sure we include the stream and the track.
      // The 0:1 is a flag to note when we've done the final insert for a given input block.
      LogTime(AsyncLatencyLogger::AudioTrackInsertion, LATENCY_STREAM_ID(source, mTrackID),
              (i+1 < len) ? 0 : 1, insertTime);

      source->AppendToTrack(mTrackID, &segment);
    }
  }

  return;
}