예제 #1
0
void
AudioCaptureStream::MixerCallback(AudioDataValue* aMixedBuffer,
                                  AudioSampleFormat aFormat, uint32_t aChannels,
                                  uint32_t aFrames, uint32_t aSampleRate)
{
  AutoTArray<nsTArray<AudioDataValue>, MONO> output;
  AutoTArray<const AudioDataValue*, MONO> bufferPtrs;
  output.SetLength(MONO);
  bufferPtrs.SetLength(MONO);

  uint32_t written = 0;
  // We need to copy here, because the mixer will reuse the storage, we should
  // not hold onto it. Buffers are in planar format.
  for (uint32_t channel = 0; channel < aChannels; channel++) {
    AudioDataValue* out = output[channel].AppendElements(aFrames);
    PodCopy(out, aMixedBuffer + written, aFrames);
    bufferPtrs[channel] = out;
    written += aFrames;
  }
  AudioChunk chunk;
  chunk.mBuffer = new mozilla::SharedChannelArrayBuffer<AudioDataValue>(&output);
  chunk.mDuration = aFrames;
  chunk.mBufferFormat = aFormat;
  chunk.mVolume = 1.0f;
  chunk.mChannelData.SetLength(MONO);
  for (uint32_t channel = 0; channel < aChannels; channel++) {
    chunk.mChannelData[channel] = bufferPtrs[channel];
  }

  // Now we have mixed data, simply append it to out track.
  EnsureTrack(mTrackId)->Get<AudioSegment>()->AppendAndConsumeChunk(&chunk);
}
예제 #2
0
// The MediaStreamGraph guarantees that this is actually one block, for
// AudioNodeStreams.
void
AudioNodeStream::ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags)
{
  EnsureTrack(AUDIO_TRACK, mSampleRate);
  // No more tracks will be coming
  mBuffer.AdvanceKnownTracksTime(STREAM_TIME_MAX);

  uint16_t outputCount = std::max(uint16_t(1), mEngine->OutputCount());
  mLastChunks.SetLength(outputCount);

  // Consider this stream blocked if it has already finished output. Normally
  // mBlocked would reflect this, but due to rounding errors our audio track may
  // appear to extend slightly beyond aFrom, so we might not be blocked yet.
  bool blocked = mFinished || mBlocked.GetAt(aFrom);
  // If the stream has finished at this time, it will be blocked.
  if (mMuted || blocked) {
    for (uint16_t i = 0; i < outputCount; ++i) {
      mLastChunks[i].SetNull(WEBAUDIO_BLOCK_SIZE);
    }
  } else {
    // We need to generate at least one input
    uint16_t maxInputs = std::max(uint16_t(1), mEngine->InputCount());
    OutputChunks inputChunks;
    inputChunks.SetLength(maxInputs);
    for (uint16_t i = 0; i < maxInputs; ++i) {
      ObtainInputBlock(inputChunks[i], i);
    }
    bool finished = false;
    if (maxInputs <= 1 && mEngine->OutputCount() <= 1) {
      mEngine->ProcessBlock(this, inputChunks[0], &mLastChunks[0], &finished);
    } else {
      mEngine->ProcessBlocksOnPorts(this, inputChunks, mLastChunks, &finished);
    }
    for (uint16_t i = 0; i < outputCount; ++i) {
      NS_ASSERTION(mLastChunks[i].GetDuration() == WEBAUDIO_BLOCK_SIZE,
                   "Invalid WebAudio chunk size");
    }
    if (finished) {
      mMarkAsFinishedAfterThisBlock = true;
    }

    if (mDisabledTrackIDs.Contains(static_cast<TrackID>(AUDIO_TRACK))) {
      for (uint32_t i = 0; i < outputCount; ++i) {
        mLastChunks[i].SetNull(WEBAUDIO_BLOCK_SIZE);
      }
    }
  }

  if (!blocked) {
    // Don't output anything while blocked
    AdvanceOutputSegment();
    if (mMarkAsFinishedAfterThisBlock && (aFlags & ALLOW_FINISH)) {
      // This stream was finished the last time that we looked at it, and all
      // of the depending streams have finished their output as well, so now
      // it's time to mark this stream as finished.
      FinishOutput();
    }
  }
}
예제 #3
0
  virtual void Run() override
  {
    auto ns = static_cast<AudioNodeStream*>(mStream);
    ns->mBufferStartTime -= mAdvance;

    StreamBuffer::Track* track = ns->EnsureTrack(AUDIO_TRACK);
    track->Get<AudioSegment>()->AppendNullData(mAdvance);

    ns->GraphImpl()->DecrementSuspendCount(mStream);
  }
예제 #4
0
// The MediaStreamGraph guarantees that this is actually one block, for
// AudioNodeStreams.
void
AudioNodeStream::ProduceOutput(GraphTime aFrom, GraphTime aTo)
{
  if (mMarkAsFinishedAfterThisBlock) {
    // This stream was finished the last time that we looked at it, and all
    // of the depending streams have finished their output as well, so now
    // it's time to mark this stream as finished.
    FinishOutput();
  }

  StreamBuffer::Track* track = EnsureTrack();

  AudioSegment* segment = track->Get<AudioSegment>();

  mLastChunks.SetLength(1);
  mLastChunks[0].SetNull(0);

  if (mInCycle) {
    // XXX DelayNode not supported yet so just produce silence
    mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
  } else {
    // We need to generate at least one input
    uint16_t maxInputs = std::max(uint16_t(1), mEngine->InputCount());
    OutputChunks inputChunks;
    inputChunks.SetLength(maxInputs);
    for (uint16_t i = 0; i < maxInputs; ++i) {
      ObtainInputBlock(inputChunks[i], i);
    }
    bool finished = false;
    if (maxInputs <= 1 && mEngine->OutputCount() <= 1) {
      mEngine->ProduceAudioBlock(this, inputChunks[0], &mLastChunks[0], &finished);
    } else {
      mEngine->ProduceAudioBlocksOnPorts(this, inputChunks, mLastChunks, &finished);
    }
    if (finished) {
      mMarkAsFinishedAfterThisBlock = true;
    }
  }

  if (mKind == MediaStreamGraph::EXTERNAL_STREAM) {
    segment->AppendAndConsumeChunk(&mLastChunks[0]);
  } else {
    segment->AppendNullData(mLastChunks[0].GetDuration());
  }

  for (uint32_t j = 0; j < mListeners.Length(); ++j) {
    MediaStreamListener* l = mListeners[j];
    AudioChunk copyChunk = mLastChunks[0];
    AudioSegment tmpSegment;
    tmpSegment.AppendAndConsumeChunk(&copyChunk);
    l->NotifyQueuedTrackChanges(Graph(), AUDIO_NODE_STREAM_TRACK_ID,
                                IdealAudioRate(), segment->GetDuration(), 0,
                                tmpSegment);
  }
}
예제 #5
0
void
AudioCaptureStream::ProcessInput(GraphTime aFrom, GraphTime aTo,
                                 uint32_t aFlags)
{
  uint32_t inputCount = mInputs.Length();
  StreamBuffer::Track* track = EnsureTrack(mTrackId);
  // Notify the DOM everything is in order.
  if (!mTrackCreated) {
    for (uint32_t i = 0; i < mListeners.Length(); i++) {
      MediaStreamListener* l = mListeners[i];
      AudioSegment tmp;
      l->NotifyQueuedTrackChanges(
        Graph(), mTrackId, 0, MediaStreamListener::TRACK_EVENT_CREATED, tmp);
      l->NotifyFinishedTrackCreation(Graph());
    }
    mTrackCreated = true;
  }

  // If the captured stream is connected back to a object on the page (be it an
  // HTMLMediaElement with a stream as source, or an AudioContext), a cycle
  // situation occur. This can work if it's an AudioContext with at least one
  // DelayNode, but the MSG will mute the whole cycle otherwise.
  if (mFinished || InMutedCycle() || inputCount == 0) {
    track->Get<AudioSegment>()->AppendNullData(aTo - aFrom);
  } else {
    // We mix down all the tracks of all inputs, to a stereo track. Everything
    // is {up,down}-mixed to stereo.
    mMixer.StartMixing();
    AudioSegment output;
    for (uint32_t i = 0; i < inputCount; i++) {
      MediaStream* s = mInputs[i]->GetSource();
      StreamBuffer::TrackIter tracks(s->GetStreamBuffer(), MediaSegment::AUDIO);
      while (!tracks.IsEnded()) {
        AudioSegment* inputSegment = tracks->Get<AudioSegment>();
        StreamTime inputStart = s->GraphTimeToStreamTimeWithBlocking(aFrom);
        StreamTime inputEnd = s->GraphTimeToStreamTimeWithBlocking(aTo);
        AudioSegment toMix;
        toMix.AppendSlice(*inputSegment, inputStart, inputEnd);
        // Care for streams blocked in the [aTo, aFrom] range.
        if (inputEnd - inputStart < aTo - aFrom) {
          toMix.AppendNullData((aTo - aFrom) - (inputEnd - inputStart));
        }
        toMix.Mix(mMixer, MONO, Graph()->GraphRate());
        tracks.Next();
      }
    }
    // This calls MixerCallback below
    mMixer.FinishMixing();
  }

  // Regardless of the status of the input tracks, we go foward.
  mBuffer.AdvanceKnownTracksTime(GraphTimeToStreamTimeWithBlocking((aTo)));
}
예제 #6
0
// The MediaStreamGraph guarantees that this is actually one block, for
// AudioNodeStreams.
void
AudioNodeStream::ProduceOutput(GraphTime aFrom, GraphTime aTo)
{
  if (mMarkAsFinishedAfterThisBlock) {
    // This stream was finished the last time that we looked at it, and all
    // of the depending streams have finished their output as well, so now
    // it's time to mark this stream as finished.
    FinishOutput();
  }

  EnsureTrack(AUDIO_NODE_STREAM_TRACK_ID, mSampleRate);

  uint16_t outputCount = std::max(uint16_t(1), mEngine->OutputCount());
  mLastChunks.SetLength(outputCount);

  if (mInCycle) {
    // XXX DelayNode not supported yet so just produce silence
    for (uint16_t i = 0; i < outputCount; ++i) {
      mLastChunks[i].SetNull(WEBAUDIO_BLOCK_SIZE);
    }
  } else {
    for (uint16_t i = 0; i < outputCount; ++i) {
      mLastChunks[i].SetNull(0);
    }

    // We need to generate at least one input
    uint16_t maxInputs = std::max(uint16_t(1), mEngine->InputCount());
    OutputChunks inputChunks;
    inputChunks.SetLength(maxInputs);
    for (uint16_t i = 0; i < maxInputs; ++i) {
      ObtainInputBlock(inputChunks[i], i);
    }
    bool finished = false;
    if (maxInputs <= 1 && mEngine->OutputCount() <= 1) {
      mEngine->ProduceAudioBlock(this, inputChunks[0], &mLastChunks[0], &finished);
    } else {
      mEngine->ProduceAudioBlocksOnPorts(this, inputChunks, mLastChunks, &finished);
    }
    if (finished) {
      mMarkAsFinishedAfterThisBlock = true;
    }
  }

  if (mDisabledTrackIDs.Contains(AUDIO_NODE_STREAM_TRACK_ID)) {
    for (uint32_t i = 0; i < mLastChunks.Length(); ++i) {
      mLastChunks[i].SetNull(WEBAUDIO_BLOCK_SIZE);
    }
  }

  AdvanceOutputSegment();
}
예제 #7
0
void
AudioNodeStream::FinishOutput()
{
  StreamBuffer::Track* track = EnsureTrack(AUDIO_TRACK);
  track->SetEnded();

  for (uint32_t j = 0; j < mListeners.Length(); ++j) {
    MediaStreamListener* l = mListeners[j];
    AudioSegment emptySegment;
    l->NotifyQueuedTrackChanges(Graph(), AUDIO_TRACK,
                                track->GetSegment()->GetDuration(),
                                MediaStreamListener::TRACK_EVENT_ENDED, emptySegment);
  }
}
void
AudioNodeStream::FinishOutput()
{
  if (IsFinishedOnGraphThread()) {
    return;
  }

  StreamBuffer::Track* track = EnsureTrack();
  track->SetEnded();
  FinishOnGraphThread();

  for (uint32_t j = 0; j < mListeners.Length(); ++j) {
    MediaStreamListener* l = mListeners[j];
    AudioSegment emptySegment;
    l->NotifyQueuedTrackChanges(Graph(), AUDIO_NODE_STREAM_TRACK_ID,
                                IdealAudioRate(),
                                track->GetSegment()->GetDuration(),
                                MediaStreamListener::TRACK_EVENT_ENDED, emptySegment);
  }
}
예제 #9
0
void
AudioNodeStream::AdvanceOutputSegment()
{
  StreamBuffer::Track* track = EnsureTrack(AUDIO_TRACK);
  AudioSegment* segment = track->Get<AudioSegment>();

  if (mKind == MediaStreamGraph::EXTERNAL_STREAM) {
    segment->AppendAndConsumeChunk(&mLastChunks[0]);
  } else {
    segment->AppendNullData(mLastChunks[0].GetDuration());
  }

  for (uint32_t j = 0; j < mListeners.Length(); ++j) {
    MediaStreamListener* l = mListeners[j];
    AudioChunk copyChunk = mLastChunks[0];
    AudioSegment tmpSegment;
    tmpSegment.AppendAndConsumeChunk(&copyChunk);
    l->NotifyQueuedTrackChanges(Graph(), AUDIO_TRACK,
                                segment->GetDuration(), 0, tmpSegment);
  }
}
// The MediaStreamGraph guarantees that this is actually one block, for
// AudioNodeStreams.
void
AudioNodeStream::ProduceOutput(GraphTime aFrom, GraphTime aTo)
{
  StreamBuffer::Track* track = EnsureTrack();

  AudioChunk outputChunk;
  AudioSegment* segment = track->Get<AudioSegment>();

  outputChunk.SetNull(0);

  if (mInCycle) {
    // XXX DelayNode not supported yet so just produce silence
    outputChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
  } else {
    AudioChunk tmpChunk;
    AudioChunk* inputChunk = ObtainInputBlock(&tmpChunk);
    bool finished = false;
    mEngine->ProduceAudioBlock(this, *inputChunk, &outputChunk, &finished);
    if (finished) {
      FinishOutput();
    }
  }

  mLastChunk = outputChunk;
  if (mKind == MediaStreamGraph::EXTERNAL_STREAM) {
    segment->AppendAndConsumeChunk(&outputChunk);
  } else {
    segment->AppendNullData(outputChunk.GetDuration());
  }

  for (uint32_t j = 0; j < mListeners.Length(); ++j) {
    MediaStreamListener* l = mListeners[j];
    AudioChunk copyChunk = outputChunk;
    AudioSegment tmpSegment;
    tmpSegment.AppendAndConsumeChunk(&copyChunk);
    l->NotifyQueuedTrackChanges(Graph(), AUDIO_NODE_STREAM_TRACK_ID,
                                IdealAudioRate(), segment->GetDuration(), 0,
                                tmpSegment);
  }
}
예제 #11
0
void
AudioNodeStream::AdvanceOutputSegment()
{
  StreamBuffer::Track* track = EnsureTrack(AUDIO_TRACK);
  // No more tracks will be coming
  mBuffer.AdvanceKnownTracksTime(STREAM_TIME_MAX);

  AudioSegment* segment = track->Get<AudioSegment>();

  if (!mLastChunks[0].IsNull()) {
    segment->AppendAndConsumeChunk(mLastChunks[0].AsMutableChunk());
  } else {
    segment->AppendNullData(mLastChunks[0].GetDuration());
  }

  for (uint32_t j = 0; j < mListeners.Length(); ++j) {
    MediaStreamListener* l = mListeners[j];
    AudioChunk copyChunk = mLastChunks[0].AsAudioChunk();
    AudioSegment tmpSegment;
    tmpSegment.AppendAndConsumeChunk(&copyChunk);
    l->NotifyQueuedTrackChanges(Graph(), AUDIO_TRACK,
                                segment->GetDuration(), 0, tmpSegment);
  }
}
예제 #12
0
TrackTicks
AudioNodeStream::GetCurrentPosition()
{
  return EnsureTrack(AUDIO_TRACK, mSampleRate)->Get<AudioSegment>()->GetDuration();
}
TrackTicks
AudioNodeStream::GetCurrentPosition()
{
  return EnsureTrack()->Get<AudioSegment>()->GetDuration();
}
예제 #14
0
StreamTime
AudioNodeStream::GetCurrentPosition()
{
  NS_ASSERTION(!mFinished, "Don't create another track after finishing");
  return EnsureTrack(AUDIO_TRACK)->Get<AudioSegment>()->GetDuration();
}
예제 #15
0
// The MediaStreamGraph guarantees that this is actually one block, for
// AudioNodeStreams.
void
AudioNodeStream::ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags)
{
  if (!mFinished) {
    EnsureTrack(AUDIO_TRACK);
  }
  // No more tracks will be coming
  mBuffer.AdvanceKnownTracksTime(STREAM_TIME_MAX);

  uint16_t outputCount = mLastChunks.Length();
  MOZ_ASSERT(outputCount == std::max(uint16_t(1), mEngine->OutputCount()));

  // Consider this stream blocked if it has already finished output. Normally
  // mBlocked would reflect this, but due to rounding errors our audio track may
  // appear to extend slightly beyond aFrom, so we might not be blocked yet.
  bool blocked = mFinished || mBlocked.GetAt(aFrom);
  // If the stream has finished at this time, it will be blocked.
  if (blocked || InMutedCycle()) {
    mInputChunks.Clear();
    for (uint16_t i = 0; i < outputCount; ++i) {
      mLastChunks[i].SetNull(WEBAUDIO_BLOCK_SIZE);
    }
  } else {
    // We need to generate at least one input
    uint16_t maxInputs = std::max(uint16_t(1), mEngine->InputCount());
    mInputChunks.SetLength(maxInputs);
    for (uint16_t i = 0; i < maxInputs; ++i) {
      ObtainInputBlock(mInputChunks[i], i);
    }
    bool finished = false;
    if (mPassThrough) {
      MOZ_ASSERT(outputCount == 1, "For now, we only support nodes that have one output port");
      mLastChunks[0] = mInputChunks[0];
    } else {
      if (maxInputs <= 1 && outputCount <= 1) {
        mEngine->ProcessBlock(this, mInputChunks[0], &mLastChunks[0], &finished);
      } else {
        mEngine->ProcessBlocksOnPorts(this, mInputChunks, mLastChunks, &finished);
      }
    }
    for (auto& chunk : mInputChunks) {
      // If the buffer is shared then it won't be reused, so release the
      // reference now.  Keep the channel data array to save a free/alloc
      // pair.
      chunk.ReleaseBufferIfShared();
    }
    for (uint16_t i = 0; i < outputCount; ++i) {
      NS_ASSERTION(mLastChunks[i].GetDuration() == WEBAUDIO_BLOCK_SIZE,
                   "Invalid WebAudio chunk size");
    }
    if (finished) {
      mMarkAsFinishedAfterThisBlock = true;
    }

    if (mDisabledTrackIDs.Contains(static_cast<TrackID>(AUDIO_TRACK))) {
      for (uint32_t i = 0; i < outputCount; ++i) {
        mLastChunks[i].SetNull(WEBAUDIO_BLOCK_SIZE);
      }
    }
  }

  if (!blocked) {
    // Don't output anything while blocked
    AdvanceOutputSegment();
    if (mMarkAsFinishedAfterThisBlock && (aFlags & ALLOW_FINISH)) {
      // This stream was finished the last time that we looked at it, and all
      // of the depending streams have finished their output as well, so now
      // it's time to mark this stream as finished.
      FinishOutput();
    }
  }
}