void
MediaEngineWebRTCAudioSource::Process(const int channel,
  const webrtc::ProcessingTypes type, sample* audio10ms,
  const int length, const int samplingFreq, const bool isStereo)
{
  ReentrantMonitorAutoEnter enter(mMonitor);
  if (mState != kStarted)
    return;

  uint32_t len = mSources.Length();
  for (uint32_t i = 0; i < len; i++) {
    nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(length * sizeof(sample));

    sample* dest = static_cast<sample*>(buffer->Data());
    memcpy(dest, audio10ms, length * sizeof(sample));

    AudioSegment segment;
    nsAutoTArray<const sample*,1> channels;
    channels.AppendElement(dest);
    segment.AppendFrames(buffer.forget(), channels, length);

    SourceMediaStream *source = mSources[i];
    if (source) {
      // This is safe from any thread, and is safe if the track is Finished
      // or Destroyed
      source->AppendToTrack(mTrackID, &segment);
    }
  }

  return;
}
NS_IMETHODIMP
MediaEngineDefaultAudioSource::Notify(nsITimer* aTimer)
{
  TimeStamp now = TimeStamp::Now();
  TimeDuration timeSinceLastNotify = now - mLastNotify;
  mLastNotify = now;
  TrackTicks samplesSinceLastNotify =
    RateConvertTicksRoundUp(AUDIO_RATE, 1000000, timeSinceLastNotify.ToMicroseconds());

  // If it's been longer since the last Notify() than mBufferSize holds, we
  // have underrun and the MSG had to append silence while waiting for us
  // to push more data. In this case we reset to mBufferSize again.
  TrackTicks samplesToAppend = std::min(samplesSinceLastNotify, mBufferSize);

  AudioSegment segment;
  AppendToSegment(segment, samplesToAppend);
  mSource->AppendToTrack(mTrackID, &segment);

  // Generate null data for fake tracks.
  if (mHasFakeTracks) {
    for (int i = 0; i < kFakeAudioTrackCount; ++i) {
      AudioSegment nullSegment;
      nullSegment.AppendNullData(samplesToAppend);
      mSource->AppendToTrack(kTrackCount + kFakeVideoTrackCount+i, &nullSegment);
    }
  }
  return NS_OK;
}
nsresult
MediaEngineDefaultAudioSource::Start(SourceMediaStream* aStream, TrackID aID)
{
    if (mState != kAllocated) {
        return NULL;
    }

    mTimer = do_CreateInstance(NS_TIMER_CONTRACTID);
    if (!mTimer) {
        return NULL;
    }

    mSource = aStream;

    // AddTrack will take ownership of segment
    AudioSegment* segment = new AudioSegment();
    segment->Init(CHANNELS);
    mSource->AddTrack(aID, RATE, 0, segment);

    // We aren't going to add any more tracks
    mSource->AdvanceKnownTracksTime(STREAM_TIME_MAX);

    // Remember TrackID so we can finish later
    mTrackID = aID;

    // 1 Audio frame per Video frame
    mTimer->InitWithCallback(this, 1000 / FPS, nsITimer::TYPE_REPEATING_SLACK);
    mState = kStarted;

    return NS_OK;
}
NS_IMETHODIMP
MediaEngineDefaultAudioSource::Notify(nsITimer* aTimer)
{
  AudioSegment segment;
  segment.InsertNullDataAtStart(AUDIO_RATE/100); // 10ms of fake data

  mSource->AppendToTrack(mTrackID, &segment);

  return NS_OK;
}
nsresult
MediaEngineDefaultAudioSource::Start(SourceMediaStream* aStream, TrackID aID,
                                     const PrincipalHandle& aPrincipalHandle)
{
  if (mState != kAllocated) {
    return NS_ERROR_FAILURE;
  }

  mTimer = do_CreateInstance(NS_TIMER_CONTRACTID);
  if (!mTimer) {
    return NS_ERROR_FAILURE;
  }

  mSource = aStream;

  // We try to keep the appended data at this size.
  // Make it two timer intervals to try to avoid underruns.
  mBufferSize = 2 * (AUDIO_RATE * DEFAULT_AUDIO_TIMER_MS) / 1000;

  // AddTrack will take ownership of segment
  AudioSegment* segment = new AudioSegment();
  AppendToSegment(*segment, mBufferSize);
  mSource->AddAudioTrack(aID, AUDIO_RATE, 0, segment, SourceMediaStream::ADDTRACK_QUEUED);

  if (mHasFakeTracks) {
    for (int i = 0; i < kFakeAudioTrackCount; ++i) {
      segment = new AudioSegment();
      segment->AppendNullData(mBufferSize);
      mSource->AddAudioTrack(kTrackCount + kFakeVideoTrackCount+i,
                             AUDIO_RATE, 0, segment, SourceMediaStream::ADDTRACK_QUEUED);
    }
  }

  // Remember TrackID so we can finish later
  mTrackID = aID;

  // Remember PrincipalHandle since we don't append in NotifyPull.
  mPrincipalHandle = aPrincipalHandle;

  mLastNotify = TimeStamp::Now();

  // 1 Audio frame per 10ms
#if defined(MOZ_WIDGET_GONK) && defined(DEBUG)
// B2G emulator debug is very, very slow and has problems dealing with realtime audio inputs
  mTimer->InitWithCallback(this, DEFAULT_AUDIO_TIMER_MS*10,
                           nsITimer::TYPE_REPEATING_PRECISE_CAN_SKIP);
#else
  mTimer->InitWithCallback(this, DEFAULT_AUDIO_TIMER_MS,
                           nsITimer::TYPE_REPEATING_PRECISE_CAN_SKIP);
#endif
  mState = kStarted;

  return NS_OK;
}
NS_IMETHODIMP
MediaEngineDefaultAudioSource::Notify(nsITimer* aTimer)
{
    AudioSegment segment;
    segment.Init(CHANNELS);
    segment.InsertNullDataAtStart(1);

    mSource->AppendToTrack(mTrackID, &segment);

    return NS_OK;
}
NS_IMETHODIMP
MediaEngineDefaultAudioSource::Notify(nsITimer* aTimer)
{
  AudioSegment segment;

  // Notify timer is set every DEFAULT_AUDIO_TIMER_MS milliseconds.
  segment.InsertNullDataAtStart((AUDIO_RATE * MediaEngine::DEFAULT_AUDIO_TIMER_MS) / 1000);

  mSource->AppendToTrack(mTrackID, &segment);

  return NS_OK;
}
Example #8
0
void
AudioCaptureStream::ProcessInput(GraphTime aFrom, GraphTime aTo,
                                 uint32_t aFlags)
{
  uint32_t inputCount = mInputs.Length();
  StreamBuffer::Track* track = EnsureTrack(mTrackId);
  // Notify the DOM everything is in order.
  if (!mTrackCreated) {
    for (uint32_t i = 0; i < mListeners.Length(); i++) {
      MediaStreamListener* l = mListeners[i];
      AudioSegment tmp;
      l->NotifyQueuedTrackChanges(
        Graph(), mTrackId, 0, MediaStreamListener::TRACK_EVENT_CREATED, tmp);
      l->NotifyFinishedTrackCreation(Graph());
    }
    mTrackCreated = true;
  }

  // If the captured stream is connected back to a object on the page (be it an
  // HTMLMediaElement with a stream as source, or an AudioContext), a cycle
  // situation occur. This can work if it's an AudioContext with at least one
  // DelayNode, but the MSG will mute the whole cycle otherwise.
  if (mFinished || InMutedCycle() || inputCount == 0) {
    track->Get<AudioSegment>()->AppendNullData(aTo - aFrom);
  } else {
    // We mix down all the tracks of all inputs, to a stereo track. Everything
    // is {up,down}-mixed to stereo.
    mMixer.StartMixing();
    AudioSegment output;
    for (uint32_t i = 0; i < inputCount; i++) {
      MediaStream* s = mInputs[i]->GetSource();
      StreamBuffer::TrackIter tracks(s->GetStreamBuffer(), MediaSegment::AUDIO);
      while (!tracks.IsEnded()) {
        AudioSegment* inputSegment = tracks->Get<AudioSegment>();
        StreamTime inputStart = s->GraphTimeToStreamTimeWithBlocking(aFrom);
        StreamTime inputEnd = s->GraphTimeToStreamTimeWithBlocking(aTo);
        AudioSegment toMix;
        toMix.AppendSlice(*inputSegment, inputStart, inputEnd);
        // Care for streams blocked in the [aTo, aFrom] range.
        if (inputEnd - inputStart < aTo - aFrom) {
          toMix.AppendNullData((aTo - aFrom) - (inputEnd - inputStart));
        }
        toMix.Mix(mMixer, MONO, Graph()->GraphRate());
        tracks.Next();
      }
    }
    // This calls MixerCallback below
    mMixer.FinishMixing();
  }

  // Regardless of the status of the input tracks, we go foward.
  mBuffer.AdvanceKnownTracksTime(GraphTimeToStreamTimeWithBlocking((aTo)));
}
Example #9
0
nsresult
OmxAudioTrackEncoder::GetEncodedTrack(EncodedFrameContainer& aData)
{
  PROFILER_LABEL("OmxAACAudioTrackEncoder", "GetEncodedTrack",
    js::ProfileEntry::Category::OTHER);
  AudioSegment segment;
  // Move all the samples from mRawSegment to segment. We only hold
  // the monitor in this block.
  {
    ReentrantMonitorAutoEnter mon(mReentrantMonitor);

    // Wait if mEncoder is not initialized nor canceled.
    while (!mInitialized && !mCanceled) {
      mReentrantMonitor.Wait();
    }

    if (mCanceled || mEncodingComplete) {
      return NS_ERROR_FAILURE;
    }

    segment.AppendFrom(&mRawSegment);
  }

  nsresult rv;
  if (segment.GetDuration() == 0) {
    // Notify EOS at least once, even if segment is empty.
    if (mEndOfStream && !mEosSetInEncoder) {
      mEosSetInEncoder = true;
      rv = mEncoder->Encode(segment, OMXCodecWrapper::BUFFER_EOS);
      NS_ENSURE_SUCCESS(rv, rv);
    }
    // Nothing to encode but encoder could still have encoded data for earlier
    // input.
    return AppendEncodedFrames(aData);
  }

  // OMX encoder has limited input buffers only so we have to feed input and get
  // output more than once if there are too many samples pending in segment.
  while (segment.GetDuration() > 0) {
    rv = mEncoder->Encode(segment,
                          mEndOfStream ? OMXCodecWrapper::BUFFER_EOS : 0);
    NS_ENSURE_SUCCESS(rv, rv);

    rv = AppendEncodedFrames(aData);
    NS_ENSURE_SUCCESS(rv, rv);
  }

  return NS_OK;
}
Example #10
0
void
nsSpeechTask::SendAudioImpl(RefPtr<mozilla::SharedBuffer>& aSamples, uint32_t aDataLen)
{
  if (aDataLen == 0) {
    mStream->EndAllTrackAndFinish();
    return;
  }

  AudioSegment segment;
  nsAutoTArray<const int16_t*, 1> channelData;
  channelData.AppendElement(static_cast<int16_t*>(aSamples->Data()));
  segment.AppendFrames(aSamples.forget(), channelData, aDataLen);
  mStream->AppendToTrack(1, &segment);
  mStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
}
AudioSegment*
SpeechRecognition::CreateAudioSegment(nsTArray<nsRefPtr<SharedBuffer>>& aChunks)
{
  AudioSegment* segment = new AudioSegment();
  for (uint32_t i = 0; i < aChunks.Length(); ++i) {
    nsRefPtr<SharedBuffer> buffer = aChunks[i];
    const int16_t* chunkData = static_cast<const int16_t*>(buffer->Data());

    nsAutoTArray<const int16_t*, 1> channels;
    channels.AppendElement(chunkData);
    segment->AppendFrames(buffer.forget(), channels, mAudioSamplesPerChunk);
  }

  return segment;
}
Example #12
0
NS_IMETHODIMP
MediaEngineDefaultAudioSource::Notify(nsITimer* aTimer)
{
  AudioSegment segment;
  nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(AUDIO_FRAME_LENGTH * sizeof(int16_t));
  int16_t* dest = static_cast<int16_t*>(buffer->Data());

  mSineGenerator->generate(dest, AUDIO_FRAME_LENGTH);
  nsAutoTArray<const int16_t*,1> channels;
  channels.AppendElement(dest);
  segment.AppendFrames(buffer.forget(), channels, AUDIO_FRAME_LENGTH);
  mSource->AppendToTrack(mTrackID, &segment);

  return NS_OK;
}
Example #13
0
static void
SendStreamAudio(DecodedStreamData* aStream, int64_t aStartTime,
                MediaData* aData, AudioSegment* aOutput,
                uint32_t aRate, double aVolume)
{
  MOZ_ASSERT(aData);
  AudioData* audio = aData->As<AudioData>();
  // This logic has to mimic AudioSink closely to make sure we write
  // the exact same silences
  CheckedInt64 audioWrittenOffset = aStream->mAudioFramesWritten +
                                    UsecsToFrames(aStartTime, aRate);
  CheckedInt64 frameOffset = UsecsToFrames(audio->mTime, aRate);

  if (!audioWrittenOffset.isValid() ||
      !frameOffset.isValid() ||
      // ignore packet that we've already processed
      frameOffset.value() + audio->mFrames <= audioWrittenOffset.value()) {
    return;
  }

  if (audioWrittenOffset.value() < frameOffset.value()) {
    int64_t silentFrames = frameOffset.value() - audioWrittenOffset.value();
    // Write silence to catch up
    AudioSegment silence;
    silence.InsertNullDataAtStart(silentFrames);
    aStream->mAudioFramesWritten += silentFrames;
    audioWrittenOffset += silentFrames;
    aOutput->AppendFrom(&silence);
  }

  MOZ_ASSERT(audioWrittenOffset.value() >= frameOffset.value());

  int64_t offset = audioWrittenOffset.value() - frameOffset.value();
  size_t framesToWrite = audio->mFrames - offset;

  audio->EnsureAudioBuffer();
  nsRefPtr<SharedBuffer> buffer = audio->mAudioBuffer;
  AudioDataValue* bufferData = static_cast<AudioDataValue*>(buffer->Data());
  nsAutoTArray<const AudioDataValue*, 2> channels;
  for (uint32_t i = 0; i < audio->mChannels; ++i) {
    channels.AppendElement(bufferData + i * audio->mFrames + offset);
  }
  aOutput->AppendFrames(buffer.forget(), channels, framesToWrite);
  aStream->mAudioFramesWritten += framesToWrite;
  aOutput->ApplyVolume(aVolume);

  aStream->mNextAudioTime = audio->GetEndTime();
}
Example #14
0
static void
SendStreamAudio(DecodedStreamData* aStream, int64_t aStartTime,
                MediaData* aData, AudioSegment* aOutput, uint32_t aRate,
                const PrincipalHandle& aPrincipalHandle)
{
  // The amount of audio frames that is used to fuzz rounding errors.
  static const int64_t AUDIO_FUZZ_FRAMES = 1;

  MOZ_ASSERT(aData);
  AudioData* audio = aData->As<AudioData>();
  // This logic has to mimic AudioSink closely to make sure we write
  // the exact same silences
  CheckedInt64 audioWrittenOffset = aStream->mAudioFramesWritten +
                                    UsecsToFrames(aStartTime, aRate);
  CheckedInt64 frameOffset = UsecsToFrames(audio->mTime, aRate);

  if (!audioWrittenOffset.isValid() ||
      !frameOffset.isValid() ||
      // ignore packet that we've already processed
      audio->GetEndTime() <= aStream->mNextAudioTime) {
    return;
  }

  if (audioWrittenOffset.value() + AUDIO_FUZZ_FRAMES < frameOffset.value()) {
    int64_t silentFrames = frameOffset.value() - audioWrittenOffset.value();
    // Write silence to catch up
    AudioSegment silence;
    silence.InsertNullDataAtStart(silentFrames);
    aStream->mAudioFramesWritten += silentFrames;
    audioWrittenOffset += silentFrames;
    aOutput->AppendFrom(&silence);
  }

  // Always write the whole sample without truncation to be consistent with
  // DecodedAudioDataSink::PlayFromAudioQueue()
  audio->EnsureAudioBuffer();
  RefPtr<SharedBuffer> buffer = audio->mAudioBuffer;
  AudioDataValue* bufferData = static_cast<AudioDataValue*>(buffer->Data());
  AutoTArray<const AudioDataValue*, 2> channels;
  for (uint32_t i = 0; i < audio->mChannels; ++i) {
    channels.AppendElement(bufferData + i * audio->mFrames);
  }
  aOutput->AppendFrames(buffer.forget(), channels, audio->mFrames, aPrincipalHandle);
  aStream->mAudioFramesWritten += audio->mFrames;

  aStream->mNextAudioTime = audio->GetEndTime();
}
//Loop back audio through media-stream
nsresult
MediaEngineWebrtcAudioSource::Start(SourceMediaStream* aStream, TrackID aID)
{
  const int DEFAULT_PORT = 55555;
  printf("\n MediaEngineWebrtcAudioSource : Start: Entered ");
  if (false == mInitDone || mState != kAllocated) {
    return NULL;
  }

  if(!aStream)
  	return NULL;

  mTimer = do_CreateInstance(NS_TIMER_CONTRACTID);
  if (!mTimer) {
    return NULL;
  }

  mSource = aStream;

  AudioSegment* segment = new AudioSegment();
  segment->Init(CHANNELS);
  //segment->InsertNullDataAtStart(1);
  mSource->AddTrack(aID, PLAYOUT_SAMPLE_FREQUENCY, 0, segment);
  mSource->AdvanceKnownTracksTime(STREAM_TIME_MAX);
  mTrackID = aID;

  printf("\n Starting the audio engine ");
  mVoEBase->SetLocalReceiver(mChannel,DEFAULT_PORT);
  mVoEBase->SetSendDestination(mChannel,DEFAULT_PORT,"127.0.0.1");

  if(-1 == mVoEXmedia->SetExternalPlayoutStatus(true)) {
    printf("\n SetExternalPlayoutStatus failed %d ", mVoEBase->LastError() );
  	return NULL;
  } 
  //loopback audio
  mVoEBase->StartPlayout(mChannel);
  mVoEBase->StartSend(mChannel);
  mVoEBase->StartReceive(mChannel);
   
  mState = kStarted;
  // call every 10 milliseconds
  mTimer->InitWithCallback(this, 10, nsITimer::TYPE_REPEATING_SLACK);
  return NS_OK;
}
Example #16
0
void
DecodedStream::SendAudio(double aVolume, bool aIsSameOrigin,
                         const PrincipalHandle& aPrincipalHandle)
{
  AssertOwnerThread();

  if (!mInfo.HasAudio()) {
    return;
  }

  AudioSegment output;
  uint32_t rate = mInfo.mAudio.mRate;
  AutoTArray<RefPtr<MediaData>,10> audio;
  TrackID audioTrackId = mInfo.mAudio.mTrackId;
  SourceMediaStream* sourceStream = mData->mStream;

  // It's OK to hold references to the AudioData because AudioData
  // is ref-counted.
  mAudioQueue.GetElementsAfter(mData->mNextAudioTime, &audio);
  for (uint32_t i = 0; i < audio.Length(); ++i) {
    SendStreamAudio(mData.get(), mStartTime.ref(), audio[i], &output, rate,
                    aPrincipalHandle);
  }

  output.ApplyVolume(aVolume);

  if (!aIsSameOrigin) {
    output.ReplaceWithDisabled();
  }

  // |mNextAudioTime| is updated as we process each audio sample in
  // SendStreamAudio(). This is consistent with how |mNextVideoTime|
  // is updated for video samples.
  if (output.GetDuration() > 0) {
    sourceStream->AppendToTrack(audioTrackId, &output);
  }

  if (mAudioQueue.IsFinished() && !mData->mHaveSentFinishAudio) {
    sourceStream->EndTrack(audioTrackId);
    mData->mHaveSentFinishAudio = true;
  }
}
 void Generate(AudioSegment& aSegment, const int32_t& aSamples)
 {
   RefPtr<SharedBuffer> buffer = SharedBuffer::Create(aSamples * sizeof(int16_t));
   int16_t* dest = static_cast<int16_t*>(buffer->Data());
   mGenerator.generate(dest, aSamples);
   AutoTArray<const int16_t*, 1> channels;
   for (int32_t i = 0; i < mChannels; i++) {
     channels.AppendElement(dest);
   }
   aSegment.AppendFrames(buffer.forget(), channels, aSamples, PRINCIPAL_HANDLE_NONE);
 }
void
MediaEngineDefaultAudioSource::AppendToSegment(AudioSegment& aSegment,
                                               TrackTicks aSamples)
{
  RefPtr<SharedBuffer> buffer = SharedBuffer::Create(aSamples * sizeof(int16_t));
  int16_t* dest = static_cast<int16_t*>(buffer->Data());

  mSineGenerator->generate(dest, aSamples);
  AutoTArray<const int16_t*,1> channels;
  channels.AppendElement(dest);
  aSegment.AppendFrames(buffer.forget(), channels, aSamples);
}
void
MediaEngineWebRTCAudioSource::Process(int channel,
  webrtc::ProcessingTypes type, sample* audio10ms,
  int length, int samplingFreq, bool isStereo)
{
  MonitorAutoLock lock(mMonitor);
  if (mState != kStarted)
    return;

  uint32_t len = mSources.Length();
  for (uint32_t i = 0; i < len; i++) {
    nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(length * sizeof(sample));

    sample* dest = static_cast<sample*>(buffer->Data());
    memcpy(dest, audio10ms, length * sizeof(sample));

    AudioSegment segment;
    nsAutoTArray<const sample*,1> channels;
    channels.AppendElement(dest);
    segment.AppendFrames(buffer.forget(), channels, length);
    TimeStamp insertTime;
    segment.GetStartTime(insertTime);

    SourceMediaStream *source = mSources[i];
    if (source) {
      // This is safe from any thread, and is safe if the track is Finished
      // or Destroyed.
      // Make sure we include the stream and the track.
      // The 0:1 is a flag to note when we've done the final insert for a given input block.
      LogTime(AsyncLatencyLogger::AudioTrackInsertion, LATENCY_STREAM_ID(source, mTrackID),
              (i+1 < len) ? 0 : 1, insertTime);

      source->AppendToTrack(mTrackID, &segment);
    }
  }

  return;
}
void
MediaEngineWebRTCAudioSource::Process(const int channel,
  const webrtc::ProcessingTypes type, sample* audio10ms,
  const int length, const int samplingFreq, const bool isStereo)
{
  ReentrantMonitorAutoEnter enter(mMonitor);
  if (mState != kStarted)
    return;

  nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(length * sizeof(sample));

  sample* dest = static_cast<sample*>(buffer->Data());
  memcpy(dest, audio10ms, length * sizeof(sample));

  AudioSegment segment;
  segment.Init(CHANNELS);
  segment.AppendFrames(
    buffer.forget(), length, 0, length, AUDIO_FORMAT_S16
  );
  mSource->AppendToTrack(mTrackID, &segment);

  return;
}
NS_IMETHODIMP
MediaEngineDefaultAudioSource::Notify(nsITimer* aTimer)
{
  AudioSegment segment;
  nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(AUDIO_FRAME_LENGTH * sizeof(int16_t));
  int16_t* dest = static_cast<int16_t*>(buffer->Data());

  mSineGenerator->generate(dest, AUDIO_FRAME_LENGTH);
  nsAutoTArray<const int16_t*,1> channels;
  channels.AppendElement(dest);
  segment.AppendFrames(buffer.forget(), channels, AUDIO_FRAME_LENGTH);
  mSource->AppendToTrack(mTrackID, &segment);

  // Generate null data for fake tracks.
  if (mHasFakeTracks) {
    for (int i = 0; i < kFakeAudioTrackCount; ++i) {
      AudioSegment nullSegment;
      nullSegment.AppendNullData(AUDIO_FRAME_LENGTH);
      mSource->AppendToTrack(kTrackCount + kFakeVideoTrackCount+i, &nullSegment);
    }
  }
  return NS_OK;
}
void
nsSpeechTask::SendAudioImpl(int16_t* aData, uint32_t aDataLen)
{
  if (aDataLen == 0) {
    mStream->EndAllTrackAndFinish();
    return;
  }

  nsRefPtr<mozilla::SharedBuffer> samples =
    SharedBuffer::Create(aDataLen * sizeof(int16_t));
  int16_t* frames = static_cast<int16_t*>(samples->Data());

  for (uint32_t i = 0; i < aDataLen; i++) {
    frames[i] = aData[i];
  }

  AudioSegment segment;
  nsAutoTArray<const int16_t*, 1> channelData;
  channelData.AppendElement(frames);
  segment.AppendFrames(samples.forget(), channelData, aDataLen);
  mStream->AppendToTrack(1, &segment);
  mStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
}
// The MediaStreamGraph guarantees that this is actually one block, for
// AudioNodeStreams.
void
AudioNodeStream::ProduceOutput(GraphTime aFrom, GraphTime aTo)
{
  if (mMarkAsFinishedAfterThisBlock) {
    // This stream was finished the last time that we looked at it, and all
    // of the depending streams have finished their output as well, so now
    // it's time to mark this stream as finished.
    FinishOutput();
  }

  StreamBuffer::Track* track = EnsureTrack();

  AudioSegment* segment = track->Get<AudioSegment>();

  mLastChunks.SetLength(1);
  mLastChunks[0].SetNull(0);

  if (mInCycle) {
    // XXX DelayNode not supported yet so just produce silence
    mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
  } else {
    // We need to generate at least one input
    uint16_t maxInputs = std::max(uint16_t(1), mEngine->InputCount());
    OutputChunks inputChunks;
    inputChunks.SetLength(maxInputs);
    for (uint16_t i = 0; i < maxInputs; ++i) {
      ObtainInputBlock(inputChunks[i], i);
    }
    bool finished = false;
    if (maxInputs <= 1 && mEngine->OutputCount() <= 1) {
      mEngine->ProduceAudioBlock(this, inputChunks[0], &mLastChunks[0], &finished);
    } else {
      mEngine->ProduceAudioBlocksOnPorts(this, inputChunks, mLastChunks, &finished);
    }
    if (finished) {
      mMarkAsFinishedAfterThisBlock = true;
    }
  }

  if (mKind == MediaStreamGraph::EXTERNAL_STREAM) {
    segment->AppendAndConsumeChunk(&mLastChunks[0]);
  } else {
    segment->AppendNullData(mLastChunks[0].GetDuration());
  }

  for (uint32_t j = 0; j < mListeners.Length(); ++j) {
    MediaStreamListener* l = mListeners[j];
    AudioChunk copyChunk = mLastChunks[0];
    AudioSegment tmpSegment;
    tmpSegment.AppendAndConsumeChunk(&copyChunk);
    l->NotifyQueuedTrackChanges(Graph(), AUDIO_NODE_STREAM_TRACK_ID,
                                IdealAudioRate(), segment->GetDuration(), 0,
                                tmpSegment);
  }
}
Example #24
0
void
AudioNodeStream::AdvanceOutputSegment()
{
  StreamBuffer::Track* track = EnsureTrack(AUDIO_TRACK);
  AudioSegment* segment = track->Get<AudioSegment>();

  if (mKind == MediaStreamGraph::EXTERNAL_STREAM) {
    segment->AppendAndConsumeChunk(&mLastChunks[0]);
  } else {
    segment->AppendNullData(mLastChunks[0].GetDuration());
  }

  for (uint32_t j = 0; j < mListeners.Length(); ++j) {
    MediaStreamListener* l = mListeners[j];
    AudioChunk copyChunk = mLastChunks[0];
    AudioSegment tmpSegment;
    tmpSegment.AppendAndConsumeChunk(&copyChunk);
    l->NotifyQueuedTrackChanges(Graph(), AUDIO_TRACK,
                                segment->GetDuration(), 0, tmpSegment);
  }
}
// The MediaStreamGraph guarantees that this is actually one block, for
// AudioNodeStreams.
void
AudioNodeStream::ProduceOutput(GraphTime aFrom, GraphTime aTo)
{
  StreamBuffer::Track* track = EnsureTrack();

  AudioChunk outputChunk;
  AudioSegment* segment = track->Get<AudioSegment>();

  outputChunk.SetNull(0);

  if (mInCycle) {
    // XXX DelayNode not supported yet so just produce silence
    outputChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
  } else {
    AudioChunk tmpChunk;
    AudioChunk* inputChunk = ObtainInputBlock(&tmpChunk);
    bool finished = false;
    mEngine->ProduceAudioBlock(this, *inputChunk, &outputChunk, &finished);
    if (finished) {
      FinishOutput();
    }
  }

  mLastChunk = outputChunk;
  if (mKind == MediaStreamGraph::EXTERNAL_STREAM) {
    segment->AppendAndConsumeChunk(&outputChunk);
  } else {
    segment->AppendNullData(outputChunk.GetDuration());
  }

  for (uint32_t j = 0; j < mListeners.Length(); ++j) {
    MediaStreamListener* l = mListeners[j];
    AudioChunk copyChunk = outputChunk;
    AudioSegment tmpSegment;
    tmpSegment.AppendAndConsumeChunk(&copyChunk);
    l->NotifyQueuedTrackChanges(Graph(), AUDIO_NODE_STREAM_TRACK_ID,
                                IdealAudioRate(), segment->GetDuration(), 0,
                                tmpSegment);
  }
}
Example #26
0
void
AudioNodeStream::AdvanceOutputSegment()
{
  StreamBuffer::Track* track = EnsureTrack(AUDIO_TRACK);
  // No more tracks will be coming
  mBuffer.AdvanceKnownTracksTime(STREAM_TIME_MAX);

  AudioSegment* segment = track->Get<AudioSegment>();

  if (!mLastChunks[0].IsNull()) {
    segment->AppendAndConsumeChunk(mLastChunks[0].AsMutableChunk());
  } else {
    segment->AppendNullData(mLastChunks[0].GetDuration());
  }

  for (uint32_t j = 0; j < mListeners.Length(); ++j) {
    MediaStreamListener* l = mListeners[j];
    AudioChunk copyChunk = mLastChunks[0].AsAudioChunk();
    AudioSegment tmpSegment;
    tmpSegment.AppendAndConsumeChunk(&copyChunk);
    l->NotifyQueuedTrackChanges(Graph(), AUDIO_TRACK,
                                segment->GetDuration(), 0, tmpSegment);
  }
}
void
AudioNodeExternalInputStream::ProcessInput(GraphTime aFrom, GraphTime aTo,
                                           uint32_t aFlags)
{
  // According to spec, number of outputs is always 1.
  mLastChunks.SetLength(1);

  // GC stuff can result in our input stream being destroyed before this stream.
  // Handle that.
  if (mInputs.IsEmpty()) {
    mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
    AdvanceOutputSegment();
    return;
  }

  MOZ_ASSERT(mInputs.Length() == 1);

  MediaStream* source = mInputs[0]->GetSource();
  nsAutoTArray<AudioSegment,1> audioSegments;
  nsAutoTArray<bool,1> trackMapEntriesUsed;
  uint32_t inputChannels = 0;
  for (StreamBuffer::TrackIter tracks(source->mBuffer, MediaSegment::AUDIO);
       !tracks.IsEnded(); tracks.Next()) {
    const StreamBuffer::Track& inputTrack = *tracks;
    // Create a TrackMapEntry if necessary.
    size_t trackMapIndex = GetTrackMapEntry(inputTrack, aFrom);
    // Maybe there's nothing in this track yet. If so, ignore it. (While the
    // track is only playing silence, we may not be able to determine the
    // correct number of channels to start resampling.)
    if (trackMapIndex == nsTArray<TrackMapEntry>::NoIndex) {
      continue;
    }

    while (trackMapEntriesUsed.Length() <= trackMapIndex) {
      trackMapEntriesUsed.AppendElement(false);
    }
    trackMapEntriesUsed[trackMapIndex] = true;

    TrackMapEntry* trackMap = &mTrackMap[trackMapIndex];
    AudioSegment segment;
    GraphTime next;
    TrackRate inputTrackRate = inputTrack.GetRate();
    for (GraphTime t = aFrom; t < aTo; t = next) {
      MediaInputPort::InputInterval interval = mInputs[0]->GetNextInputInterval(t);
      interval.mEnd = std::min(interval.mEnd, aTo);
      if (interval.mStart >= interval.mEnd)
        break;
      next = interval.mEnd;

      // Ticks >= startTicks and < endTicks are in the interval
      StreamTime outputEnd = GraphTimeToStreamTime(interval.mEnd);
      TrackTicks startTicks = trackMap->mSamplesPassedToResampler + segment.GetDuration();
      StreamTime outputStart = GraphTimeToStreamTime(interval.mStart);
      NS_ASSERTION(startTicks == TimeToTicksRoundUp(inputTrackRate, outputStart),
                   "Samples missing");
      TrackTicks endTicks = TimeToTicksRoundUp(inputTrackRate, outputEnd);
      TrackTicks ticks = endTicks - startTicks;

      if (interval.mInputIsBlocked) {
        segment.AppendNullData(ticks);
      } else {
        // See comments in TrackUnionStream::CopyTrackData
        StreamTime inputStart = source->GraphTimeToStreamTime(interval.mStart);
        StreamTime inputEnd = source->GraphTimeToStreamTime(interval.mEnd);
        TrackTicks inputTrackEndPoint =
            inputTrack.IsEnded() ? inputTrack.GetEnd() : TRACK_TICKS_MAX;

        if (trackMap->mEndOfLastInputIntervalInInputStream != inputStart ||
            trackMap->mEndOfLastInputIntervalInOutputStream != outputStart) {
          // Start of a new series of intervals where neither stream is blocked.
          trackMap->mEndOfConsumedInputTicks = TimeToTicksRoundDown(inputTrackRate, inputStart) - 1;
        }
        TrackTicks inputStartTicks = trackMap->mEndOfConsumedInputTicks;
        TrackTicks inputEndTicks = inputStartTicks + ticks;
        trackMap->mEndOfConsumedInputTicks = inputEndTicks;
        trackMap->mEndOfLastInputIntervalInInputStream = inputEnd;
        trackMap->mEndOfLastInputIntervalInOutputStream = outputEnd;

        if (inputStartTicks < 0) {
          // Data before the start of the track is just null.
          segment.AppendNullData(-inputStartTicks);
          inputStartTicks = 0;
        }
        if (inputEndTicks > inputStartTicks) {
          segment.AppendSlice(*inputTrack.GetSegment(),
                              std::min(inputTrackEndPoint, inputStartTicks),
                              std::min(inputTrackEndPoint, inputEndTicks));
        }
        // Pad if we're looking past the end of the track
        segment.AppendNullData(ticks - segment.GetDuration());
      }
    }

    trackMap->mSamplesPassedToResampler += segment.GetDuration();
    trackMap->ResampleInputData(&segment);

    if (trackMap->mResampledData.GetDuration() < mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE) {
      // We don't have enough data. Delay it.
      trackMap->mResampledData.InsertNullDataAtStart(
        mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE - trackMap->mResampledData.GetDuration());
    }
    audioSegments.AppendElement()->AppendSlice(trackMap->mResampledData,
      mCurrentOutputPosition, mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE);
    trackMap->mResampledData.ForgetUpTo(mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE);
    inputChannels = GetAudioChannelsSuperset(inputChannels, trackMap->mResamplerChannelCount);
  }

  for (int32_t i = mTrackMap.Length() - 1; i >= 0; --i) {
    if (i >= int32_t(trackMapEntriesUsed.Length()) || !trackMapEntriesUsed[i]) {
      mTrackMap.RemoveElementAt(i);
    }
  }

  uint32_t accumulateIndex = 0;
  if (inputChannels) {
    nsAutoTArray<float,GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;
    for (uint32_t i = 0; i < audioSegments.Length(); ++i) {
      AudioChunk tmpChunk;
      ConvertSegmentToAudioBlock(&audioSegments[i], &tmpChunk);
      if (!tmpChunk.IsNull()) {
        if (accumulateIndex == 0) {
          AllocateAudioBlock(inputChannels, &mLastChunks[0]);
        }
        AccumulateInputChunk(accumulateIndex, tmpChunk, &mLastChunks[0], &downmixBuffer);
        accumulateIndex++;
      }
    }
  }
  if (accumulateIndex == 0) {
    mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
  }
  mCurrentOutputPosition += WEBAUDIO_BLOCK_SIZE;

  // Using AudioNodeStream's AdvanceOutputSegment to push the media stream graph along with null data.
  AdvanceOutputSegment();
}
nsresult
OMXAudioEncoder::Encode(AudioSegment& aSegment, int aInputFlags,
                        bool* aSendEOS)
{
#ifndef MOZ_SAMPLE_TYPE_S16
#error MediaCodec accepts only 16-bit PCM data.
#endif

  MOZ_ASSERT(mStarted, "Configure() should be called before Encode().");

  size_t numSamples = aSegment.GetDuration();

  // Get input buffer.
  InputBufferHelper buffer(mCodec, mInputBufs, *this, aInputFlags);
  status_t result = buffer.Dequeue();
  if (result == -EAGAIN) {
    // All input buffers are full. Caller can try again later after consuming
    // some output buffers.
    return NS_OK;
  }
  NS_ENSURE_TRUE(result == OK, NS_ERROR_FAILURE);

  size_t sourceSamplesCopied = 0; // Number of copied samples.

  if (numSamples > 0) {
    // Copy input PCM data to input buffer until queue is empty.
    AudioSegment::ChunkIterator iter(const_cast<AudioSegment&>(aSegment));
    while (!iter.IsEnded()) {
      BufferState result = buffer.ReadChunk(*iter, &sourceSamplesCopied);
      if (result == WAIT_FOR_NEW_BUFFER) {
        // All input buffers are full. Caller can try again later after
        // consuming some output buffers.
        aSegment.RemoveLeading(sourceSamplesCopied);
        return NS_OK;
      } else if (result == BUFFER_FAIL) {
        return NS_ERROR_FAILURE;
      } else {
        iter.Next();
      }
    }
    // Remove the samples already been copied into buffer
    if (sourceSamplesCopied > 0) {
      aSegment.RemoveLeading(sourceSamplesCopied);
    }
  } else if (aInputFlags & BUFFER_EOS) {
    buffer.SendEOSToBuffer(&sourceSamplesCopied);
  }

  // Enqueue the remaining data to buffer
  MOZ_ASSERT(sourceSamplesCopied > 0, "No data needs to be enqueued!");
  int flags = aInputFlags;
  if (aSegment.GetDuration() > 0) {
    // Don't signal EOS until source segment is empty.
    flags &= ~BUFFER_EOS;
  }
  result = buffer.Enqueue(mTimestamp, flags);
  NS_ENSURE_TRUE(result == OK, NS_ERROR_FAILURE);
  if (aSendEOS && (aInputFlags & BUFFER_EOS)) {
    *aSendEOS = true;
  }
  return NS_OK;
}
void
MediaEngineWebRTCAudioSource::Process(int channel,
  webrtc::ProcessingTypes type, sample* audio10ms,
  int length, int samplingFreq, bool isStereo)
{
  // On initial capture, throw away all far-end data except the most recent sample
  // since it's already irrelevant and we want to keep avoid confusing the AEC far-end
  // input code with "old" audio.
  if (!mStarted) {
    mStarted  = true;
    while (gFarendObserver->Size() > 1) {
      FarEndAudioChunk *buffer = gFarendObserver->Pop(); // only call if size() > 0
      free(buffer);
    }
  }

  while (gFarendObserver->Size() > 0) {
    FarEndAudioChunk *buffer = gFarendObserver->Pop(); // only call if size() > 0
    if (buffer) {
      int length = buffer->mSamples;
      if (mVoERender->ExternalPlayoutData(buffer->mData,
                                          gFarendObserver->PlayoutFrequency(),
                                          gFarendObserver->PlayoutChannels(),
                                          mPlayoutDelay,
                                          length) == -1) {
        return;
      }
    }
    free(buffer);
  }

#ifdef PR_LOGGING
  mSamples += length;
  if (mSamples > samplingFreq) {
    mSamples %= samplingFreq; // just in case mSamples >> samplingFreq
    if (PR_LOG_TEST(GetMediaManagerLog(), PR_LOG_DEBUG)) {
      webrtc::EchoStatistics echo;

      mVoECallReport->GetEchoMetricSummary(echo);
#define DUMP_STATVAL(x) (x).min, (x).max, (x).average
      LOG(("Echo: ERL: %d/%d/%d, ERLE: %d/%d/%d, RERL: %d/%d/%d, NLP: %d/%d/%d",
           DUMP_STATVAL(echo.erl),
           DUMP_STATVAL(echo.erle),
           DUMP_STATVAL(echo.rerl),
           DUMP_STATVAL(echo.a_nlp)));
    }
  }
#endif

  MonitorAutoLock lock(mMonitor);
  if (mState != kStarted)
    return;

  uint32_t len = mSources.Length();
  for (uint32_t i = 0; i < len; i++) {
    nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(length * sizeof(sample));

    sample* dest = static_cast<sample*>(buffer->Data());
    memcpy(dest, audio10ms, length * sizeof(sample));

    AudioSegment segment;
    nsAutoTArray<const sample*,1> channels;
    channels.AppendElement(dest);
    segment.AppendFrames(buffer.forget(), channels, length);
    TimeStamp insertTime;
    segment.GetStartTime(insertTime);

    SourceMediaStream *source = mSources[i];
    if (source) {
      // This is safe from any thread, and is safe if the track is Finished
      // or Destroyed.
      // Make sure we include the stream and the track.
      // The 0:1 is a flag to note when we've done the final insert for a given input block.
      LogTime(AsyncLatencyLogger::AudioTrackInsertion, LATENCY_STREAM_ID(source, mTrackID),
              (i+1 < len) ? 0 : 1, insertTime);

      source->AppendToTrack(mTrackID, &segment);
    }
  }

  return;
}
TEST(OpusAudioTrackEncoder, Init)
{
  {
    // The encoder does not normally recieve enough info from null data to
    // init. However, multiple attempts to do so, with sufficiently long
    // duration segments, should result in a best effort attempt. The first
    // attempt should never do this though, even if the duration is long:
    OpusTrackEncoder encoder(48000);
    AudioSegment segment;
    segment.AppendNullData(48000 * 100);
    encoder.TryInit(segment, segment.GetDuration());
    EXPECT_FALSE(encoder.IsInitialized());

    // Multiple init attempts should result in best effort init:
    encoder.TryInit(segment, segment.GetDuration());
    EXPECT_TRUE(encoder.IsInitialized());
  }

  {
    // If the duration of the segments given to the encoder is not long then
    // we shouldn't try a best effort init:
    OpusTrackEncoder encoder(48000);
    AudioSegment segment;
    segment.AppendNullData(1);
    encoder.TryInit(segment, segment.GetDuration());
    EXPECT_FALSE(encoder.IsInitialized());
    encoder.TryInit(segment, segment.GetDuration());
    EXPECT_FALSE(encoder.IsInitialized());
  }

  {
    // For non-null segments we should init immediately
    OpusTrackEncoder encoder(48000);
    AudioSegment segment;
    AudioGenerator generator(2, 48000);
    generator.Generate(segment, 1);
    encoder.TryInit(segment, segment.GetDuration());
    EXPECT_TRUE(encoder.IsInitialized());
  }

  {
    // Test low sample rate bound
    OpusTrackEncoder encoder(7999);
    AudioSegment segment;
    AudioGenerator generator(2, 7999);
    generator.Generate(segment, 1);
    encoder.TryInit(segment, segment.GetDuration());
    EXPECT_FALSE(encoder.IsInitialized());
  }

  {
    // Test low sample rate bound
    OpusTrackEncoder encoder(8000);
    AudioSegment segment;
    AudioGenerator generator(2, 8000);
    generator.Generate(segment, 1);
    encoder.TryInit(segment, segment.GetDuration());
    EXPECT_TRUE(encoder.IsInitialized());
  }

  {
    // Test high sample rate bound
    OpusTrackEncoder encoder(192001);
    AudioSegment segment;
    AudioGenerator generator(2, 192001);
    generator.Generate(segment, 1);
    encoder.TryInit(segment, segment.GetDuration());
    EXPECT_FALSE(encoder.IsInitialized());
  }

  {
    // Test high sample rate bound
    OpusTrackEncoder encoder(192000);
    AudioSegment segment;
    AudioGenerator generator(2, 192000);
    generator.Generate(segment, 1);
    encoder.TryInit(segment, segment.GetDuration());
    EXPECT_TRUE(encoder.IsInitialized());
  }
}