void
CameraPreviewMediaStream::AddListener(MediaStreamListener* aListener)
{
  MutexAutoLock lock(mMutex);

  MediaStreamGraph* gm = MediaStreamGraph::GetInstance();
  MediaStreamListener* listener = *mListeners.AppendElement() = aListener;
  listener->NotifyBlockingChanged(gm, MediaStreamListener::UNBLOCKED);
}
  void TrackUnionStream::CopyTrackData(StreamBuffer::Track* aInputTrack,
                     uint32_t aMapIndex, GraphTime aFrom, GraphTime aTo,
                     bool* aOutputTrackFinished)
  {
    TrackMapEntry* map = &mTrackMap[aMapIndex];
    StreamBuffer::Track* outputTrack = mBuffer.FindTrack(map->mOutputTrackID);
    MOZ_ASSERT(outputTrack && !outputTrack->IsEnded(), "Can't copy to ended track");

    MediaSegment* segment = map->mSegment;
    MediaStream* source = map->mInputPort->GetSource();

    GraphTime next;
    *aOutputTrackFinished = false;
    for (GraphTime t = aFrom; t < aTo; t = next) {
      MediaInputPort::InputInterval interval = map->mInputPort->GetNextInputInterval(t);
      interval.mEnd = std::min(interval.mEnd, aTo);
      StreamTime inputEnd = source->GraphTimeToStreamTime(interval.mEnd);
      StreamTime inputTrackEndPoint = STREAM_TIME_MAX;

      if (aInputTrack->IsEnded() &&
          aInputTrack->GetEnd() <= inputEnd) {
        inputTrackEndPoint = aInputTrack->GetEnd();
        *aOutputTrackFinished = true;
      }

      if (interval.mStart >= interval.mEnd) {
        break;
      }
      StreamTime ticks = interval.mEnd - interval.mStart;
      next = interval.mEnd;

      StreamTime outputStart = outputTrack->GetEnd();

      if (interval.mInputIsBlocked) {
        // Maybe the input track ended?
        segment->AppendNullData(ticks);
        STREAM_LOG(PR_LOG_DEBUG+1, ("TrackUnionStream %p appending %lld ticks of null data to track %d",
                   this, (long long)ticks, outputTrack->GetID()));
      } else if (InMutedCycle()) {
        segment->AppendNullData(ticks);
      } else {
        MOZ_ASSERT(outputTrack->GetEnd() == GraphTimeToStreamTime(interval.mStart),
                   "Samples missing");
        StreamTime inputStart = source->GraphTimeToStreamTime(interval.mStart);
        segment->AppendSlice(*aInputTrack->GetSegment(),
                             std::min(inputTrackEndPoint, inputStart),
                             std::min(inputTrackEndPoint, inputEnd));
      }
      ApplyTrackDisabling(outputTrack->GetID(), segment);
      for (uint32_t j = 0; j < mListeners.Length(); ++j) {
        MediaStreamListener* l = mListeners[j];
        l->NotifyQueuedTrackChanges(Graph(), outputTrack->GetID(),
                                    outputStart, 0, *segment);
      }
      outputTrack->GetSegment()->AppendFrom(segment);
    }
  }
void
CameraPreviewMediaStream::AddListener(MediaStreamListener* aListener)
{
    MutexAutoLock lock(mMutex);

    MediaStreamListener* listener = *mListeners.AppendElement() = aListener;
    listener->NotifyBlockingChanged(mFakeMediaStreamGraph, MediaStreamListener::UNBLOCKED);
    listener->NotifyHasCurrentData(mFakeMediaStreamGraph);
}
// The MediaStreamGraph guarantees that this is actually one block, for
// AudioNodeStreams.
void
AudioNodeStream::ProduceOutput(GraphTime aFrom, GraphTime aTo)
{
  if (mMarkAsFinishedAfterThisBlock) {
    // This stream was finished the last time that we looked at it, and all
    // of the depending streams have finished their output as well, so now
    // it's time to mark this stream as finished.
    FinishOutput();
  }

  StreamBuffer::Track* track = EnsureTrack();

  AudioSegment* segment = track->Get<AudioSegment>();

  mLastChunks.SetLength(1);
  mLastChunks[0].SetNull(0);

  if (mInCycle) {
    // XXX DelayNode not supported yet so just produce silence
    mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
  } else {
    // We need to generate at least one input
    uint16_t maxInputs = std::max(uint16_t(1), mEngine->InputCount());
    OutputChunks inputChunks;
    inputChunks.SetLength(maxInputs);
    for (uint16_t i = 0; i < maxInputs; ++i) {
      ObtainInputBlock(inputChunks[i], i);
    }
    bool finished = false;
    if (maxInputs <= 1 && mEngine->OutputCount() <= 1) {
      mEngine->ProduceAudioBlock(this, inputChunks[0], &mLastChunks[0], &finished);
    } else {
      mEngine->ProduceAudioBlocksOnPorts(this, inputChunks, mLastChunks, &finished);
    }
    if (finished) {
      mMarkAsFinishedAfterThisBlock = true;
    }
  }

  if (mKind == MediaStreamGraph::EXTERNAL_STREAM) {
    segment->AppendAndConsumeChunk(&mLastChunks[0]);
  } else {
    segment->AppendNullData(mLastChunks[0].GetDuration());
  }

  for (uint32_t j = 0; j < mListeners.Length(); ++j) {
    MediaStreamListener* l = mListeners[j];
    AudioChunk copyChunk = mLastChunks[0];
    AudioSegment tmpSegment;
    tmpSegment.AppendAndConsumeChunk(&copyChunk);
    l->NotifyQueuedTrackChanges(Graph(), AUDIO_NODE_STREAM_TRACK_ID,
                                IdealAudioRate(), segment->GetDuration(), 0,
                                tmpSegment);
  }
}
示例#5
0
  uint32_t TrackUnionStream::AddTrack(MediaInputPort* aPort, StreamBuffer::Track* aTrack,
                    GraphTime aFrom)
  {
    TrackID id = aTrack->GetID();
    if (id > mNextAvailableTrackID &&
       mUsedTracks.BinaryIndexOf(id) == mUsedTracks.NoIndex) {
      // Input id available. Mark it used in mUsedTracks.
      mUsedTracks.InsertElementSorted(id);
    } else {
      // Input id taken, allocate a new one.
      id = mNextAvailableTrackID;

      // Update mNextAvailableTrackID and prune any mUsedTracks members it now
      // covers.
      while (1) {
        if (!mUsedTracks.RemoveElementSorted(++mNextAvailableTrackID)) {
          // Not in use. We're done.
          break;
        }
      }
    }

    // Round up the track start time so the track, if anything, starts a
    // little later than the true time. This means we'll have enough
    // samples in our input stream to go just beyond the destination time.
    StreamTime outputStart = GraphTimeToStreamTimeWithBlocking(aFrom);

    nsAutoPtr<MediaSegment> segment;
    segment = aTrack->GetSegment()->CreateEmptyClone();
    for (uint32_t j = 0; j < mListeners.Length(); ++j) {
      MediaStreamListener* l = mListeners[j];
      l->NotifyQueuedTrackChanges(Graph(), id, outputStart,
                                  MediaStreamListener::TRACK_EVENT_CREATED,
                                  *segment,
                                  aPort->GetSource(), aTrack->GetID());
    }
    segment->AppendNullData(outputStart);
    StreamBuffer::Track* track =
      &mBuffer.AddTrack(id, outputStart, segment.forget());
    STREAM_LOG(LogLevel::Debug, ("TrackUnionStream %p adding track %d for input stream %p track %d, start ticks %lld",
                              this, id, aPort->GetSource(), aTrack->GetID(),
                              (long long)outputStart));

    TrackMapEntry* map = mTrackMap.AppendElement();
    map->mEndOfConsumedInputTicks = 0;
    map->mEndOfLastInputIntervalInInputStream = -1;
    map->mEndOfLastInputIntervalInOutputStream = -1;
    map->mInputPort = aPort;
    map->mInputTrackID = aTrack->GetID();
    map->mOutputTrackID = track->GetID();
    map->mSegment = aTrack->GetSegment()->CreateEmptyClone();
    return mTrackMap.Length() - 1;
  }
示例#6
0
void
AudioCaptureStream::ProcessInput(GraphTime aFrom, GraphTime aTo,
                                 uint32_t aFlags)
{
  uint32_t inputCount = mInputs.Length();
  StreamBuffer::Track* track = EnsureTrack(mTrackId);
  // Notify the DOM everything is in order.
  if (!mTrackCreated) {
    for (uint32_t i = 0; i < mListeners.Length(); i++) {
      MediaStreamListener* l = mListeners[i];
      AudioSegment tmp;
      l->NotifyQueuedTrackChanges(
        Graph(), mTrackId, 0, MediaStreamListener::TRACK_EVENT_CREATED, tmp);
      l->NotifyFinishedTrackCreation(Graph());
    }
    mTrackCreated = true;
  }

  // If the captured stream is connected back to a object on the page (be it an
  // HTMLMediaElement with a stream as source, or an AudioContext), a cycle
  // situation occur. This can work if it's an AudioContext with at least one
  // DelayNode, but the MSG will mute the whole cycle otherwise.
  if (mFinished || InMutedCycle() || inputCount == 0) {
    track->Get<AudioSegment>()->AppendNullData(aTo - aFrom);
  } else {
    // We mix down all the tracks of all inputs, to a stereo track. Everything
    // is {up,down}-mixed to stereo.
    mMixer.StartMixing();
    AudioSegment output;
    for (uint32_t i = 0; i < inputCount; i++) {
      MediaStream* s = mInputs[i]->GetSource();
      StreamBuffer::TrackIter tracks(s->GetStreamBuffer(), MediaSegment::AUDIO);
      while (!tracks.IsEnded()) {
        AudioSegment* inputSegment = tracks->Get<AudioSegment>();
        StreamTime inputStart = s->GraphTimeToStreamTimeWithBlocking(aFrom);
        StreamTime inputEnd = s->GraphTimeToStreamTimeWithBlocking(aTo);
        AudioSegment toMix;
        toMix.AppendSlice(*inputSegment, inputStart, inputEnd);
        // Care for streams blocked in the [aTo, aFrom] range.
        if (inputEnd - inputStart < aTo - aFrom) {
          toMix.AppendNullData((aTo - aFrom) - (inputEnd - inputStart));
        }
        toMix.Mix(mMixer, MONO, Graph()->GraphRate());
        tracks.Next();
      }
    }
    // This calls MixerCallback below
    mMixer.FinishMixing();
  }

  // Regardless of the status of the input tracks, we go foward.
  mBuffer.AdvanceKnownTracksTime(GraphTimeToStreamTimeWithBlocking((aTo)));
}
  uint32_t TrackUnionStream::AddTrack(MediaInputPort* aPort, StreamBuffer::Track* aTrack,
                    GraphTime aFrom)
  {
    // Use the ID of the source track if it's not already assigned to a track,
    // otherwise allocate a new unique ID.
    TrackID id = aTrack->GetID();
    TrackID maxTrackID = 0;
    for (uint32_t i = 0; i < mTrackMap.Length(); ++i) {
      TrackID outID = mTrackMap[i].mOutputTrackID;
      maxTrackID = std::max(maxTrackID, outID);
    }
    // Note: we might have removed it here, but it might still be in the
    // StreamBuffer if the TrackUnionStream sees its input stream flip from
    // A to B, where both A and B have a track with the same ID
    while (1) {
      // search until we find one not in use here, and not in mBuffer
      if (!mBuffer.FindTrack(id)) {
        break;
      }
      id = ++maxTrackID;
    }

    // Round up the track start time so the track, if anything, starts a
    // little later than the true time. This means we'll have enough
    // samples in our input stream to go just beyond the destination time.
    StreamTime outputStart = GraphTimeToStreamTime(aFrom);

    nsAutoPtr<MediaSegment> segment;
    segment = aTrack->GetSegment()->CreateEmptyClone();
    for (uint32_t j = 0; j < mListeners.Length(); ++j) {
      MediaStreamListener* l = mListeners[j];
      l->NotifyQueuedTrackChanges(Graph(), id, outputStart,
                                  MediaStreamListener::TRACK_EVENT_CREATED,
                                  *segment);
    }
    segment->AppendNullData(outputStart);
    StreamBuffer::Track* track =
      &mBuffer.AddTrack(id, outputStart, segment.forget());
    STREAM_LOG(PR_LOG_DEBUG, ("TrackUnionStream %p adding track %d for input stream %p track %d, start ticks %lld",
                              this, id, aPort->GetSource(), aTrack->GetID(),
                              (long long)outputStart));

    TrackMapEntry* map = mTrackMap.AppendElement();
    map->mEndOfConsumedInputTicks = 0;
    map->mEndOfLastInputIntervalInInputStream = -1;
    map->mEndOfLastInputIntervalInOutputStream = -1;
    map->mInputPort = aPort;
    map->mInputTrackID = aTrack->GetID();
    map->mOutputTrackID = track->GetID();
    map->mSegment = aTrack->GetSegment()->CreateEmptyClone();
    return mTrackMap.Length() - 1;
  }
示例#8
0
void
AudioNodeStream::FinishOutput()
{
  StreamBuffer::Track* track = EnsureTrack(AUDIO_TRACK);
  track->SetEnded();

  for (uint32_t j = 0; j < mListeners.Length(); ++j) {
    MediaStreamListener* l = mListeners[j];
    AudioSegment emptySegment;
    l->NotifyQueuedTrackChanges(Graph(), AUDIO_TRACK,
                                track->GetSegment()->GetDuration(),
                                MediaStreamListener::TRACK_EVENT_ENDED, emptySegment);
  }
}
void
CameraPreviewMediaStream::RemoveVideoOutput(VideoFrameContainer* aContainer)
{
  MutexAutoLock lock(mMutex);
  RemoveVideoOutputImpl(aContainer);

  if (!mVideoOutputs.IsEmpty()) {
    return;
  }
  mIsConsumed = false;
  for (uint32_t j = 0; j < mListeners.Length(); ++j) {
    MediaStreamListener* l = mListeners[j];
    l->NotifyConsumptionChanged(mFakeMediaStreamGraph, MediaStreamListener::NOT_CONSUMED);
  }
}
StreamBuffer::Track*
AudioNodeStream::EnsureTrack()
{
  StreamBuffer::Track* track = mBuffer.FindTrack(AUDIO_NODE_STREAM_TRACK_ID);
  if (!track) {
    nsAutoPtr<MediaSegment> segment(new AudioSegment());
    for (uint32_t j = 0; j < mListeners.Length(); ++j) {
      MediaStreamListener* l = mListeners[j];
      l->NotifyQueuedTrackChanges(Graph(), AUDIO_NODE_STREAM_TRACK_ID, IdealAudioRate(), 0,
                                  MediaStreamListener::TRACK_EVENT_CREATED,
                                  *segment);
    }
    track = &mBuffer.AddTrack(AUDIO_NODE_STREAM_TRACK_ID, IdealAudioRate(), 0, segment.forget());
  }
  return track;
}
 void TrackUnionStream::EndTrack(uint32_t aIndex)
 {
   StreamBuffer::Track* outputTrack = mBuffer.FindTrack(mTrackMap[aIndex].mOutputTrackID);
   if (!outputTrack || outputTrack->IsEnded())
     return;
   for (uint32_t j = 0; j < mListeners.Length(); ++j) {
     MediaStreamListener* l = mListeners[j];
     StreamTime offset = outputTrack->GetSegment()->GetDuration();
     nsAutoPtr<MediaSegment> segment;
     segment = outputTrack->GetSegment()->CreateEmptyClone();
     l->NotifyQueuedTrackChanges(Graph(), outputTrack->GetID(), offset,
                                 MediaStreamListener::TRACK_EVENT_ENDED,
                                 *segment);
   }
   outputTrack->SetEnded();
 }
void
CameraPreviewMediaStream::AddVideoOutput(VideoFrameContainer* aContainer)
{
  MutexAutoLock lock(mMutex);
  nsRefPtr<VideoFrameContainer> container = aContainer;
  AddVideoOutputImpl(container.forget());

  if (mVideoOutputs.Length() > 1) {
    return;
  }
  mIsConsumed = true;
  for (uint32_t j = 0; j < mListeners.Length(); ++j) {
    MediaStreamListener* l = mListeners[j];
    l->NotifyConsumptionChanged(mFakeMediaStreamGraph, MediaStreamListener::CONSUMED);
  }
}
void
CameraPreviewMediaStream::OnPreviewStateChange(bool aActive)
{
  if (aActive) {
    MutexAutoLock lock(mMutex);
    if (!mTrackCreated) {
      mTrackCreated = true;
      VideoSegment tmpSegment;
      for (uint32_t j = 0; j < mListeners.Length(); ++j) {
        MediaStreamListener* l = mListeners[j];
        l->NotifyQueuedTrackChanges(mFakeMediaStreamGraph, TRACK_VIDEO, 0,
                                    MediaStreamListener::TRACK_EVENT_CREATED,
                                    tmpSegment);
      }
    }
  }
}
void
AudioNodeStream::FinishOutput()
{
  if (IsFinishedOnGraphThread()) {
    return;
  }

  StreamBuffer::Track* track = EnsureTrack();
  track->SetEnded();
  FinishOnGraphThread();

  for (uint32_t j = 0; j < mListeners.Length(); ++j) {
    MediaStreamListener* l = mListeners[j];
    AudioSegment emptySegment;
    l->NotifyQueuedTrackChanges(Graph(), AUDIO_NODE_STREAM_TRACK_ID,
                                IdealAudioRate(),
                                track->GetSegment()->GetDuration(),
                                MediaStreamListener::TRACK_EVENT_ENDED, emptySegment);
  }
}
示例#15
0
void
AudioNodeStream::AdvanceOutputSegment()
{
  StreamBuffer::Track* track = EnsureTrack(AUDIO_TRACK);
  AudioSegment* segment = track->Get<AudioSegment>();

  if (mKind == MediaStreamGraph::EXTERNAL_STREAM) {
    segment->AppendAndConsumeChunk(&mLastChunks[0]);
  } else {
    segment->AppendNullData(mLastChunks[0].GetDuration());
  }

  for (uint32_t j = 0; j < mListeners.Length(); ++j) {
    MediaStreamListener* l = mListeners[j];
    AudioChunk copyChunk = mLastChunks[0];
    AudioSegment tmpSegment;
    tmpSegment.AppendAndConsumeChunk(&copyChunk);
    l->NotifyQueuedTrackChanges(Graph(), AUDIO_TRACK,
                                segment->GetDuration(), 0, tmpSegment);
  }
}
// The MediaStreamGraph guarantees that this is actually one block, for
// AudioNodeStreams.
void
AudioNodeStream::ProduceOutput(GraphTime aFrom, GraphTime aTo)
{
  StreamBuffer::Track* track = EnsureTrack();

  AudioChunk outputChunk;
  AudioSegment* segment = track->Get<AudioSegment>();

  outputChunk.SetNull(0);

  if (mInCycle) {
    // XXX DelayNode not supported yet so just produce silence
    outputChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
  } else {
    AudioChunk tmpChunk;
    AudioChunk* inputChunk = ObtainInputBlock(&tmpChunk);
    bool finished = false;
    mEngine->ProduceAudioBlock(this, *inputChunk, &outputChunk, &finished);
    if (finished) {
      FinishOutput();
    }
  }

  mLastChunk = outputChunk;
  if (mKind == MediaStreamGraph::EXTERNAL_STREAM) {
    segment->AppendAndConsumeChunk(&outputChunk);
  } else {
    segment->AppendNullData(outputChunk.GetDuration());
  }

  for (uint32_t j = 0; j < mListeners.Length(); ++j) {
    MediaStreamListener* l = mListeners[j];
    AudioChunk copyChunk = outputChunk;
    AudioSegment tmpSegment;
    tmpSegment.AppendAndConsumeChunk(&copyChunk);
    l->NotifyQueuedTrackChanges(Graph(), AUDIO_NODE_STREAM_TRACK_ID,
                                IdealAudioRate(), segment->GetDuration(), 0,
                                tmpSegment);
  }
}
 void TrackUnionStream::EndTrack(uint32_t aIndex)
 {
   StreamTracks::Track* outputTrack = mTracks.FindTrack(mTrackMap[aIndex].mOutputTrackID);
   if (!outputTrack || outputTrack->IsEnded())
     return;
   STREAM_LOG(LogLevel::Debug, ("TrackUnionStream %p ending track %d", this, outputTrack->GetID()));
   for (uint32_t j = 0; j < mListeners.Length(); ++j) {
     MediaStreamListener* l = mListeners[j];
     StreamTime offset = outputTrack->GetSegment()->GetDuration();
     nsAutoPtr<MediaSegment> segment;
     segment = outputTrack->GetSegment()->CreateEmptyClone();
     l->NotifyQueuedTrackChanges(Graph(), outputTrack->GetID(), offset,
                                 TrackEventCommand::TRACK_EVENT_ENDED,
                                 *segment,
                                 mTrackMap[aIndex].mInputPort->GetSource(),
                                 mTrackMap[aIndex].mInputTrackID);
   }
   for (TrackBound<MediaStreamTrackListener>& b : mTrackListeners) {
     if (b.mTrackID == outputTrack->GetID()) {
       b.mListener->NotifyEnded();
     }
   }
   outputTrack->SetEnded();
 }
示例#18
0
void
AudioNodeStream::AdvanceOutputSegment()
{
  StreamBuffer::Track* track = EnsureTrack(AUDIO_TRACK);
  // No more tracks will be coming
  mBuffer.AdvanceKnownTracksTime(STREAM_TIME_MAX);

  AudioSegment* segment = track->Get<AudioSegment>();

  if (!mLastChunks[0].IsNull()) {
    segment->AppendAndConsumeChunk(mLastChunks[0].AsMutableChunk());
  } else {
    segment->AppendNullData(mLastChunks[0].GetDuration());
  }

  for (uint32_t j = 0; j < mListeners.Length(); ++j) {
    MediaStreamListener* l = mListeners[j];
    AudioChunk copyChunk = mLastChunks[0].AsAudioChunk();
    AudioSegment tmpSegment;
    tmpSegment.AppendAndConsumeChunk(&copyChunk);
    l->NotifyQueuedTrackChanges(Graph(), AUDIO_TRACK,
                                segment->GetDuration(), 0, tmpSegment);
  }
}
  uint32_t TrackUnionStream::AddTrack(MediaInputPort* aPort, StreamTracks::Track* aTrack,
                    GraphTime aFrom)
  {
    STREAM_LOG(LogLevel::Verbose, ("TrackUnionStream %p adding track %d for "
                                   "input stream %p track %d, desired id %d",
                                   this, aTrack->GetID(), aPort->GetSource(),
                                   aTrack->GetID(),
                                   aPort->GetDestinationTrackId()));

    TrackID id;
    if (IsTrackIDExplicit(id = aPort->GetDestinationTrackId())) {
      MOZ_ASSERT(id >= mNextAvailableTrackID &&
                 mUsedTracks.BinaryIndexOf(id) == mUsedTracks.NoIndex,
                 "Desired destination id taken. Only provide a destination ID "
                 "if you can assure its availability, or we may not be able "
                 "to bind to the correct DOM-side track.");
#ifdef DEBUG
      for (size_t i = 0; mInputs[i] != aPort; ++i) {
        MOZ_ASSERT(mInputs[i]->GetSourceTrackId() != TRACK_ANY,
                   "You are adding a MediaInputPort with a track mapping "
                   "while there already exist generic MediaInputPorts for this "
                   "destination stream. This can lead to TrackID collisions!");
      }
#endif
      mUsedTracks.InsertElementSorted(id);
    } else if ((id = aTrack->GetID()) &&
               id > mNextAvailableTrackID &&
               mUsedTracks.BinaryIndexOf(id) == mUsedTracks.NoIndex) {
      // Input id available. Mark it used in mUsedTracks.
      mUsedTracks.InsertElementSorted(id);
    } else {
      // No desired destination id and Input id taken, allocate a new one.
      id = mNextAvailableTrackID;

      // Update mNextAvailableTrackID and prune any mUsedTracks members it now
      // covers.
      while (1) {
        if (!mUsedTracks.RemoveElementSorted(++mNextAvailableTrackID)) {
          // Not in use. We're done.
          break;
        }
      }
    }

    // Round up the track start time so the track, if anything, starts a
    // little later than the true time. This means we'll have enough
    // samples in our input stream to go just beyond the destination time.
    StreamTime outputStart = GraphTimeToStreamTimeWithBlocking(aFrom);

    nsAutoPtr<MediaSegment> segment;
    segment = aTrack->GetSegment()->CreateEmptyClone();
    for (uint32_t j = 0; j < mListeners.Length(); ++j) {
      MediaStreamListener* l = mListeners[j];
      l->NotifyQueuedTrackChanges(Graph(), id, outputStart,
                                  TrackEventCommand::TRACK_EVENT_CREATED,
                                  *segment,
                                  aPort->GetSource(), aTrack->GetID());
    }
    segment->AppendNullData(outputStart);
    StreamTracks::Track* track =
      &mTracks.AddTrack(id, outputStart, segment.forget());
    STREAM_LOG(LogLevel::Debug, ("TrackUnionStream %p added track %d for input stream %p track %d, start ticks %lld",
                                 this, track->GetID(), aPort->GetSource(), aTrack->GetID(),
                                 (long long)outputStart));

    TrackMapEntry* map = mTrackMap.AppendElement();
    map->mEndOfConsumedInputTicks = 0;
    map->mEndOfLastInputIntervalInInputStream = -1;
    map->mEndOfLastInputIntervalInOutputStream = -1;
    map->mInputPort = aPort;
    map->mInputTrackID = aTrack->GetID();
    map->mOutputTrackID = track->GetID();
    map->mSegment = aTrack->GetSegment()->CreateEmptyClone();

    for (int32_t i = mPendingDirectTrackListeners.Length() - 1; i >= 0; --i) {
      TrackBound<DirectMediaStreamTrackListener>& bound =
        mPendingDirectTrackListeners[i];
      if (bound.mTrackID != map->mOutputTrackID) {
        continue;
      }
      MediaStream* source = map->mInputPort->GetSource();
      map->mOwnedDirectListeners.AppendElement(bound.mListener);
      DisabledTrackMode currentMode = GetDisabledTrackMode(bound.mTrackID);
      if (currentMode != DisabledTrackMode::ENABLED) {
        bound.mListener->IncreaseDisabled(currentMode);
      }
      STREAM_LOG(LogLevel::Debug, ("TrackUnionStream %p adding direct listener "
                                   "%p for track %d. Forwarding to input "
                                   "stream %p track %d.",
                                   this, bound.mListener.get(), bound.mTrackID,
                                   source, map->mInputTrackID));
      source->AddDirectTrackListenerImpl(bound.mListener.forget(),
                                         map->mInputTrackID);
      mPendingDirectTrackListeners.RemoveElementAt(i);
    }

    return mTrackMap.Length() - 1;
  }
  void TrackUnionStream::CopyTrackData(StreamTracks::Track* aInputTrack,
                     uint32_t aMapIndex, GraphTime aFrom, GraphTime aTo,
                     bool* aOutputTrackFinished)
  {
    TrackMapEntry* map = &mTrackMap[aMapIndex];
    StreamTracks::Track* outputTrack = mTracks.FindTrack(map->mOutputTrackID);
    MOZ_ASSERT(outputTrack && !outputTrack->IsEnded(), "Can't copy to ended track");

    MediaSegment* segment = map->mSegment;
    MediaStream* source = map->mInputPort->GetSource();

    GraphTime next;
    *aOutputTrackFinished = false;
    for (GraphTime t = aFrom; t < aTo; t = next) {
      MediaInputPort::InputInterval interval = map->mInputPort->GetNextInputInterval(t);
      interval.mEnd = std::min(interval.mEnd, aTo);
      StreamTime inputEnd = source->GraphTimeToStreamTimeWithBlocking(interval.mEnd);
      StreamTime inputTrackEndPoint = STREAM_TIME_MAX;

      if (aInputTrack->IsEnded() &&
          aInputTrack->GetEnd() <= inputEnd) {
        inputTrackEndPoint = aInputTrack->GetEnd();
        *aOutputTrackFinished = true;
      }

      if (interval.mStart >= interval.mEnd) {
        break;
      }
      StreamTime ticks = interval.mEnd - interval.mStart;
      next = interval.mEnd;

      StreamTime outputStart = outputTrack->GetEnd();

      if (interval.mInputIsBlocked) {
        // Maybe the input track ended?
        segment->AppendNullData(ticks);
        STREAM_LOG(LogLevel::Verbose, ("TrackUnionStream %p appending %lld ticks of null data to track %d",
                   this, (long long)ticks, outputTrack->GetID()));
      } else if (InMutedCycle()) {
        segment->AppendNullData(ticks);
      } else {
        if (source->IsSuspended()) {
          segment->AppendNullData(aTo - aFrom);
        } else {
          MOZ_ASSERT(outputTrack->GetEnd() == GraphTimeToStreamTimeWithBlocking(interval.mStart),
                     "Samples missing");
          StreamTime inputStart = source->GraphTimeToStreamTimeWithBlocking(interval.mStart);
          segment->AppendSlice(*aInputTrack->GetSegment(),
                               std::min(inputTrackEndPoint, inputStart),
                               std::min(inputTrackEndPoint, inputEnd));
        }
      }
      ApplyTrackDisabling(outputTrack->GetID(), segment);
      for (uint32_t j = 0; j < mListeners.Length(); ++j) {
        MediaStreamListener* l = mListeners[j];
        // Separate Audio and Video.
        if (segment->GetType() == MediaSegment::AUDIO) {
          l->NotifyQueuedAudioData(Graph(), outputTrack->GetID(),
                                   outputStart,
                                   *static_cast<AudioSegment*>(segment),
                                   map->mInputPort->GetSource(),
                                   map->mInputTrackID);
        }
      }
      for (TrackBound<MediaStreamTrackListener>& b : mTrackListeners) {
        if (b.mTrackID != outputTrack->GetID()) {
          continue;
        }
        b.mListener->NotifyQueuedChanges(Graph(), outputStart, *segment);
      }
      outputTrack->GetSegment()->AppendFrom(segment);
    }
  }