Exemplo n.º 1
0
double
AudioContext::CurrentTime() const
{
  MediaStream* stream = Destination()->Stream();
  return stream->StreamTimeToSeconds(stream->GetCurrentTime());
}
void
AudioNodeExternalInputStream::ProcessInput(GraphTime aFrom, GraphTime aTo,
                                           uint32_t aFlags)
{
  // According to spec, number of outputs is always 1.
  MOZ_ASSERT(mLastChunks.Length() == 1);

  // GC stuff can result in our input stream being destroyed before this stream.
  // Handle that.
  if (!IsEnabled() || mInputs.IsEmpty() || mPassThrough) {
    mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
    AdvanceOutputSegment();
    return;
  }

  MOZ_ASSERT(mInputs.Length() == 1);

  MediaStream* source = mInputs[0]->GetSource();
  nsAutoTArray<AudioSegment,1> audioSegments;
  uint32_t inputChannels = 0;
  for (StreamBuffer::TrackIter tracks(source->mBuffer, MediaSegment::AUDIO);
       !tracks.IsEnded(); tracks.Next()) {
    const StreamBuffer::Track& inputTrack = *tracks;
    const AudioSegment& inputSegment =
        *static_cast<AudioSegment*>(inputTrack.GetSegment());
    if (inputSegment.IsNull()) {
      continue;
    }

    AudioSegment& segment = *audioSegments.AppendElement();
    GraphTime next;
    for (GraphTime t = aFrom; t < aTo; t = next) {
      MediaInputPort::InputInterval interval = mInputs[0]->GetNextInputInterval(t);
      interval.mEnd = std::min(interval.mEnd, aTo);
      if (interval.mStart >= interval.mEnd)
        break;
      next = interval.mEnd;

      StreamTime outputStart = GraphTimeToStreamTime(interval.mStart);
      StreamTime outputEnd = GraphTimeToStreamTime(interval.mEnd);
      StreamTime ticks = outputEnd - outputStart;

      if (interval.mInputIsBlocked) {
        segment.AppendNullData(ticks);
      } else {
        StreamTime inputStart =
          std::min(inputSegment.GetDuration(),
                   source->GraphTimeToStreamTime(interval.mStart));
        StreamTime inputEnd =
          std::min(inputSegment.GetDuration(),
                   source->GraphTimeToStreamTime(interval.mEnd));

        segment.AppendSlice(inputSegment, inputStart, inputEnd);
        // Pad if we're looking past the end of the track
        segment.AppendNullData(ticks - (inputEnd - inputStart));
      }
    }

    for (AudioSegment::ChunkIterator iter(segment); !iter.IsEnded(); iter.Next()) {
      inputChannels = GetAudioChannelsSuperset(inputChannels, iter->ChannelCount());
    }
  }

  uint32_t accumulateIndex = 0;
  if (inputChannels) {
    nsAutoTArray<float,GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;
    for (uint32_t i = 0; i < audioSegments.Length(); ++i) {
      AudioChunk tmpChunk;
      ConvertSegmentToAudioBlock(&audioSegments[i], &tmpChunk, inputChannels);
      if (!tmpChunk.IsNull()) {
        if (accumulateIndex == 0) {
          AllocateAudioBlock(inputChannels, &mLastChunks[0]);
        }
        AccumulateInputChunk(accumulateIndex, tmpChunk, &mLastChunks[0], &downmixBuffer);
        accumulateIndex++;
      }
    }
  }
  if (accumulateIndex == 0) {
    mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
  }

  // Using AudioNodeStream's AdvanceOutputSegment to push the media stream graph along with null data.
  AdvanceOutputSegment();
}
Exemplo n.º 3
0
 void TrackUnionStream::ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags)
 {
   if (IsFinishedOnGraphThread()) {
     return;
   }
   nsAutoTArray<bool,8> mappedTracksFinished;
   nsAutoTArray<bool,8> mappedTracksWithMatchingInputTracks;
   for (uint32_t i = 0; i < mTrackMap.Length(); ++i) {
     mappedTracksFinished.AppendElement(true);
     mappedTracksWithMatchingInputTracks.AppendElement(false);
   }
   bool allFinished = !mInputs.IsEmpty();
   bool allHaveCurrentData = !mInputs.IsEmpty();
   for (uint32_t i = 0; i < mInputs.Length(); ++i) {
     MediaStream* stream = mInputs[i]->GetSource();
     if (!stream->IsFinishedOnGraphThread()) {
       // XXX we really should check whether 'stream' has finished within time aTo,
       // not just that it's finishing when all its queued data eventually runs
       // out.
       allFinished = false;
     }
     if (!stream->HasCurrentData()) {
       allHaveCurrentData = false;
     }
     bool trackAdded = false;
     for (StreamBuffer::TrackIter tracks(stream->GetStreamBuffer());
          !tracks.IsEnded(); tracks.Next()) {
       bool found = false;
       for (uint32_t j = 0; j < mTrackMap.Length(); ++j) {
         TrackMapEntry* map = &mTrackMap[j];
         if (map->mInputPort == mInputs[i] && map->mInputTrackID == tracks->GetID()) {
           bool trackFinished;
           StreamBuffer::Track* outputTrack = mBuffer.FindTrack(map->mOutputTrackID);
           if (!outputTrack || outputTrack->IsEnded()) {
             trackFinished = true;
           } else {
             CopyTrackData(tracks.get(), j, aFrom, aTo, &trackFinished);
           }
           mappedTracksFinished[j] = trackFinished;
           mappedTracksWithMatchingInputTracks[j] = true;
           found = true;
           break;
         }
       }
       if (!found && (!mFilterCallback || mFilterCallback(tracks.get()))) {
         bool trackFinished = false;
         trackAdded = true;
         uint32_t mapIndex = AddTrack(mInputs[i], tracks.get(), aFrom);
         CopyTrackData(tracks.get(), mapIndex, aFrom, aTo, &trackFinished);
         mappedTracksFinished.AppendElement(trackFinished);
         mappedTracksWithMatchingInputTracks.AppendElement(true);
       }
     }
     if (trackAdded) {
       for (MediaStreamListener* l : mListeners) {
         l->NotifyFinishedTrackCreation(Graph());
       }
     }
   }
   for (int32_t i = mTrackMap.Length() - 1; i >= 0; --i) {
     if (mappedTracksFinished[i]) {
       EndTrack(i);
     } else {
       allFinished = false;
     }
     if (!mappedTracksWithMatchingInputTracks[i]) {
       mTrackMap.RemoveElementAt(i);
     }
   }
   if (allFinished && mAutofinish && (aFlags & ALLOW_FINISH)) {
     // All streams have finished and won't add any more tracks, and
     // all our tracks have actually finished and been removed from our map,
     // so we're finished now.
     FinishOnGraphThread();
   } else {
     mBuffer.AdvanceKnownTracksTime(GraphTimeToStreamTime(aTo));
   }
   if (allHaveCurrentData) {
     // We can make progress if we're not blocked
     mHasCurrentData = true;
   }
 }
Exemplo n.º 4
0
void
AudioNodeStream::ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex)
{
  uint32_t inputCount = mInputs.Length();
  uint32_t outputChannelCount = 1;
  nsAutoTArray<AudioChunk*,250> inputChunks;
  for (uint32_t i = 0; i < inputCount; ++i) {
    if (aPortIndex != mInputs[i]->InputNumber()) {
      // This input is connected to a different port
      continue;
    }
    MediaStream* s = mInputs[i]->GetSource();
    AudioNodeStream* a = static_cast<AudioNodeStream*>(s);
    MOZ_ASSERT(a == s->AsAudioNodeStream());
    if (a->IsFinishedOnGraphThread() ||
        a->IsAudioParamStream()) {
      continue;
    }
    AudioChunk* chunk = &a->mLastChunks[mInputs[i]->OutputNumber()];
    MOZ_ASSERT(chunk);
    if (chunk->IsNull()) {
      continue;
    }

    inputChunks.AppendElement(chunk);
    outputChannelCount =
      GetAudioChannelsSuperset(outputChannelCount, chunk->mChannelData.Length());
  }

  switch (mChannelCountMode) {
  case ChannelCountMode::Explicit:
    // Disregard the output channel count that we've calculated, and just use
    // mNumberOfInputChannels.
    outputChannelCount = mNumberOfInputChannels;
    break;
  case ChannelCountMode::Clamped_max:
    // Clamp the computed output channel count to mNumberOfInputChannels.
    outputChannelCount = std::min(outputChannelCount, mNumberOfInputChannels);
    break;
  case ChannelCountMode::Max:
    // Nothing to do here, just shut up the compiler warning.
    break;
  }

  uint32_t inputChunkCount = inputChunks.Length();
  if (inputChunkCount == 0 ||
      (inputChunkCount == 1 && inputChunks[0]->mChannelData.Length() == 0)) {
    aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
    return;
  }

  if (inputChunkCount == 1 &&
      inputChunks[0]->mChannelData.Length() == outputChannelCount) {
    aTmpChunk = *inputChunks[0];
    return;
  }

  AllocateAudioBlock(outputChannelCount, &aTmpChunk);
  float silenceChannel[WEBAUDIO_BLOCK_SIZE] = {0.f};
  // The static storage here should be 1KB, so it's fine
  nsAutoTArray<float, GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;

  for (uint32_t i = 0; i < inputChunkCount; ++i) {
    AudioChunk* chunk = inputChunks[i];
    nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channels;
    channels.AppendElements(chunk->mChannelData);
    if (channels.Length() < outputChannelCount) {
      if (mChannelInterpretation == ChannelInterpretation::Speakers) {
        AudioChannelsUpMix(&channels, outputChannelCount, nullptr);
        NS_ASSERTION(outputChannelCount == channels.Length(),
                     "We called GetAudioChannelsSuperset to avoid this");
      } else {
        // Fill up the remaining channels by zeros
        for (uint32_t j = channels.Length(); j < outputChannelCount; ++j) {
          channels.AppendElement(silenceChannel);
        }
      }
    } else if (channels.Length() > outputChannelCount) {
      if (mChannelInterpretation == ChannelInterpretation::Speakers) {
        nsAutoTArray<float*,GUESS_AUDIO_CHANNELS> outputChannels;
        outputChannels.SetLength(outputChannelCount);
        downmixBuffer.SetLength(outputChannelCount * WEBAUDIO_BLOCK_SIZE);
        for (uint32_t j = 0; j < outputChannelCount; ++j) {
          outputChannels[j] = &downmixBuffer[j * WEBAUDIO_BLOCK_SIZE];
        }

        AudioChannelsDownMix(channels, outputChannels.Elements(),
                             outputChannelCount, WEBAUDIO_BLOCK_SIZE);

        channels.SetLength(outputChannelCount);
        for (uint32_t j = 0; j < channels.Length(); ++j) {
          channels[j] = outputChannels[j];
        }
      } else {
        // Drop the remaining channels
        channels.RemoveElementsAt(outputChannelCount,
                                  channels.Length() - outputChannelCount);
      }
    }

    for (uint32_t c = 0; c < channels.Length(); ++c) {
      const float* inputData = static_cast<const float*>(channels[c]);
      float* outputData = static_cast<float*>(const_cast<void*>(aTmpChunk.mChannelData[c]));
      if (inputData) {
        if (i == 0) {
          AudioBlockCopyChannelWithScale(inputData, chunk->mVolume, outputData);
        } else {
          AudioBlockAddChannelWithScale(inputData, chunk->mVolume, outputData);
        }
      } else {
        if (i == 0) {
          memset(outputData, 0, WEBAUDIO_BLOCK_SIZE*sizeof(float));
        }
      }
    }
  }
}
Exemplo n.º 5
0
void
AudioCaptureStream::ProcessInput(GraphTime aFrom, GraphTime aTo,
                                 uint32_t aFlags)
{
    if (!mStarted) {
        return;
    }

    uint32_t inputCount = mInputs.Length();
    StreamTracks::Track* track = EnsureTrack(mTrackId);
    // Notify the DOM everything is in order.
    if (!mTrackCreated) {
        for (uint32_t i = 0; i < mListeners.Length(); i++) {
            MediaStreamListener* l = mListeners[i];
            AudioSegment tmp;
            l->NotifyQueuedTrackChanges(
                Graph(), mTrackId, 0, TrackEventCommand::TRACK_EVENT_CREATED, tmp);
            l->NotifyFinishedTrackCreation(Graph());
        }
        mTrackCreated = true;
    }

    if (IsFinishedOnGraphThread()) {
        return;
    }

    // If the captured stream is connected back to a object on the page (be it an
    // HTMLMediaElement with a stream as source, or an AudioContext), a cycle
    // situation occur. This can work if it's an AudioContext with at least one
    // DelayNode, but the MSG will mute the whole cycle otherwise.
    if (InMutedCycle() || inputCount == 0) {
        track->Get<AudioSegment>()->AppendNullData(aTo - aFrom);
    } else {
        // We mix down all the tracks of all inputs, to a stereo track. Everything
        // is {up,down}-mixed to stereo.
        mMixer.StartMixing();
        AudioSegment output;
        for (uint32_t i = 0; i < inputCount; i++) {
            MediaStream* s = mInputs[i]->GetSource();
            StreamTracks::TrackIter tracks(s->GetStreamTracks(), MediaSegment::AUDIO);
            while (!tracks.IsEnded()) {
                AudioSegment* inputSegment = tracks->Get<AudioSegment>();
                StreamTime inputStart = s->GraphTimeToStreamTimeWithBlocking(aFrom);
                StreamTime inputEnd = s->GraphTimeToStreamTimeWithBlocking(aTo);
                if (tracks->IsEnded() && inputSegment->GetDuration() <= inputEnd) {
                    // If the input track has ended and we have consumed all its data it
                    // can be ignored.
                    continue;
                }
                AudioSegment toMix;
                toMix.AppendSlice(*inputSegment, inputStart, inputEnd);
                // Care for streams blocked in the [aTo, aFrom] range.
                if (inputEnd - inputStart < aTo - aFrom) {
                    toMix.AppendNullData((aTo - aFrom) - (inputEnd - inputStart));
                }
                toMix.Mix(mMixer, MONO, Graph()->GraphRate());
                tracks.Next();
            }
        }
        // This calls MixerCallback below
        mMixer.FinishMixing();
    }

    // Regardless of the status of the input tracks, we go foward.
    mTracks.AdvanceKnownTracksTime(GraphTimeToStreamTimeWithBlocking((aTo)));
}
void
AudioNodeExternalInputStream::ProcessInput(GraphTime aFrom, GraphTime aTo,
                                           uint32_t aFlags)
{
  // According to spec, number of outputs is always 1.
  MOZ_ASSERT(mLastChunks.Length() == 1);

  // GC stuff can result in our input stream being destroyed before this stream.
  // Handle that.
  if (!IsEnabled() || mInputs.IsEmpty() || mPassThrough) {
    mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
    return;
  }

  MOZ_ASSERT(mInputs.Length() == 1);

  MediaStream* source = mInputs[0]->GetSource();
  AutoTArray<AudioSegment,1> audioSegments;
  uint32_t inputChannels = 0;
  for (StreamTracks::TrackIter tracks(source->mTracks);
       !tracks.IsEnded(); tracks.Next()) {
    const StreamTracks::Track& inputTrack = *tracks;
    if (!mInputs[0]->PassTrackThrough(tracks->GetID())) {
      continue;
    }

    if (inputTrack.GetSegment()->GetType() == MediaSegment::VIDEO) {
      MOZ_ASSERT(false, "AudioNodeExternalInputStream shouldn't have video tracks");
      continue;
    }

    const AudioSegment& inputSegment =
        *static_cast<AudioSegment*>(inputTrack.GetSegment());
    if (inputSegment.IsNull()) {
      continue;
    }

    AudioSegment& segment = *audioSegments.AppendElement();
    GraphTime next;
    for (GraphTime t = aFrom; t < aTo; t = next) {
      MediaInputPort::InputInterval interval = mInputs[0]->GetNextInputInterval(t);
      interval.mEnd = std::min(interval.mEnd, aTo);
      if (interval.mStart >= interval.mEnd)
        break;
      next = interval.mEnd;

      // We know this stream does not block during the processing interval ---
      // we're not finished, we don't underrun, and we're not suspended.
      StreamTime outputStart = GraphTimeToStreamTime(interval.mStart);
      StreamTime outputEnd = GraphTimeToStreamTime(interval.mEnd);
      StreamTime ticks = outputEnd - outputStart;

      if (interval.mInputIsBlocked) {
        segment.AppendNullData(ticks);
      } else {
        // The input stream is not blocked in this interval, so no need to call
        // GraphTimeToStreamTimeWithBlocking.
        StreamTime inputStart =
          std::min(inputSegment.GetDuration(),
                   source->GraphTimeToStreamTime(interval.mStart));
        StreamTime inputEnd =
          std::min(inputSegment.GetDuration(),
                   source->GraphTimeToStreamTime(interval.mEnd));

        segment.AppendSlice(inputSegment, inputStart, inputEnd);
        // Pad if we're looking past the end of the track
        segment.AppendNullData(ticks - (inputEnd - inputStart));
      }
    }

    for (AudioSegment::ChunkIterator iter(segment); !iter.IsEnded(); iter.Next()) {
      inputChannels = GetAudioChannelsSuperset(inputChannels, iter->ChannelCount());
    }
  }

  uint32_t accumulateIndex = 0;
  if (inputChannels) {
    DownmixBufferType downmixBuffer;
    ASSERT_ALIGNED16(downmixBuffer.Elements());
    for (uint32_t i = 0; i < audioSegments.Length(); ++i) {
      AudioBlock tmpChunk;
      ConvertSegmentToAudioBlock(&audioSegments[i], &tmpChunk, inputChannels);
      if (!tmpChunk.IsNull()) {
        if (accumulateIndex == 0) {
          mLastChunks[0].AllocateChannels(inputChannels);
        }
        AccumulateInputChunk(accumulateIndex, tmpChunk, &mLastChunks[0], &downmixBuffer);
        accumulateIndex++;
      }
    }
  }
  if (accumulateIndex == 0) {
    mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
  }
}
Exemplo n.º 7
0
int MediaStream::ProcessBusMessageWrapper(GstBus* bus, GstMessage* message,
		gpointer user_data)
{
	MediaStream* instance = (MediaStream*)user_data;
	return instance->ProcessBusMessage(bus, message, user_data);
}
Exemplo n.º 8
0
void TrackUnionStream::ProcessInput(GraphTime aFrom, GraphTime aTo,
                                    uint32_t aFlags) {
  TRACE_AUDIO_CALLBACK_COMMENT("TrackUnionStream %p", this);
  if (IsFinishedOnGraphThread()) {
    return;
  }
  AutoTArray<bool, 8> mappedTracksFinished;
  AutoTArray<bool, 8> mappedTracksWithMatchingInputTracks;
  for (uint32_t i = 0; i < mTrackMap.Length(); ++i) {
    mappedTracksFinished.AppendElement(true);
    mappedTracksWithMatchingInputTracks.AppendElement(false);
  }

  AutoTArray<MediaInputPort*, 32> inputs(mInputs);
  inputs.AppendElements(mSuspendedInputs);

  bool allFinished = !inputs.IsEmpty();
  bool allHaveCurrentData = !inputs.IsEmpty();
  for (uint32_t i = 0; i < inputs.Length(); ++i) {
    MediaStream* stream = inputs[i]->GetSource();
    if (!stream->IsFinishedOnGraphThread()) {
      // XXX we really should check whether 'stream' has finished within time
      // aTo, not just that it's finishing when all its queued data eventually
      // runs out.
      allFinished = false;
    }
    if (!stream->HasCurrentData()) {
      allHaveCurrentData = false;
    }
    for (StreamTracks::TrackIter tracks(stream->GetStreamTracks());
         !tracks.IsEnded(); tracks.Next()) {
      bool found = false;
      for (uint32_t j = 0; j < mTrackMap.Length(); ++j) {
        TrackMapEntry* map = &mTrackMap[j];
        if (map->mInputPort == inputs[i] &&
            map->mInputTrackID == tracks->GetID()) {
          bool trackFinished = false;
          StreamTracks::Track* outputTrack =
              mTracks.FindTrack(map->mOutputTrackID);
          found = true;
          if (!outputTrack || outputTrack->IsEnded() ||
              !inputs[i]->PassTrackThrough(tracks->GetID())) {
            trackFinished = true;
          } else {
            CopyTrackData(tracks.get(), j, aFrom, aTo, &trackFinished);
          }
          mappedTracksFinished[j] = trackFinished;
          mappedTracksWithMatchingInputTracks[j] = true;
          break;
        }
      }
      if (!found && inputs[i]->AllowCreationOf(tracks->GetID())) {
        bool trackFinished = false;
        uint32_t mapIndex = AddTrack(inputs[i], tracks.get(), aFrom);
        CopyTrackData(tracks.get(), mapIndex, aFrom, aTo, &trackFinished);
        mappedTracksFinished.AppendElement(trackFinished);
        mappedTracksWithMatchingInputTracks.AppendElement(true);
      }
    }
  }
  for (int32_t i = mTrackMap.Length() - 1; i >= 0; --i) {
    if (mappedTracksFinished[i]) {
      EndTrack(i);
    } else {
      allFinished = false;
    }
    if (!mappedTracksWithMatchingInputTracks[i]) {
      for (auto listener : mTrackMap[i].mOwnedDirectListeners) {
        // Remove listeners while the entry still exists.
        RemoveDirectTrackListenerImpl(listener, mTrackMap[i].mOutputTrackID);
      }
      mTrackMap.RemoveElementAt(i);
    }
  }
  if (allFinished && mAutofinish && (aFlags & ALLOW_FINISH)) {
    // All streams have finished and won't add any more tracks, and
    // all our tracks have actually finished and been removed from our map,
    // so we're finished now.
    FinishOnGraphThread();
  }
  if (allHaveCurrentData) {
    // We can make progress if we're not blocked
    mHasCurrentData = true;
  }
}
Exemplo n.º 9
0
  uint32_t TrackUnionStream::AddTrack(MediaInputPort* aPort, StreamTracks::Track* aTrack,
                    GraphTime aFrom)
  {
    STREAM_LOG(LogLevel::Verbose, ("TrackUnionStream %p adding track %d for "
                                   "input stream %p track %d, desired id %d",
                                   this, aTrack->GetID(), aPort->GetSource(),
                                   aTrack->GetID(),
                                   aPort->GetDestinationTrackId()));

    TrackID id;
    if (IsTrackIDExplicit(id = aPort->GetDestinationTrackId())) {
      MOZ_ASSERT(id >= mNextAvailableTrackID &&
                 mUsedTracks.BinaryIndexOf(id) == mUsedTracks.NoIndex,
                 "Desired destination id taken. Only provide a destination ID "
                 "if you can assure its availability, or we may not be able "
                 "to bind to the correct DOM-side track.");
#ifdef DEBUG
      for (size_t i = 0; mInputs[i] != aPort; ++i) {
        MOZ_ASSERT(mInputs[i]->GetSourceTrackId() != TRACK_ANY,
                   "You are adding a MediaInputPort with a track mapping "
                   "while there already exist generic MediaInputPorts for this "
                   "destination stream. This can lead to TrackID collisions!");
      }
#endif
      mUsedTracks.InsertElementSorted(id);
    } else if ((id = aTrack->GetID()) &&
               id > mNextAvailableTrackID &&
               mUsedTracks.BinaryIndexOf(id) == mUsedTracks.NoIndex) {
      // Input id available. Mark it used in mUsedTracks.
      mUsedTracks.InsertElementSorted(id);
    } else {
      // No desired destination id and Input id taken, allocate a new one.
      id = mNextAvailableTrackID;

      // Update mNextAvailableTrackID and prune any mUsedTracks members it now
      // covers.
      while (1) {
        if (!mUsedTracks.RemoveElementSorted(++mNextAvailableTrackID)) {
          // Not in use. We're done.
          break;
        }
      }
    }

    // Round up the track start time so the track, if anything, starts a
    // little later than the true time. This means we'll have enough
    // samples in our input stream to go just beyond the destination time.
    StreamTime outputStart = GraphTimeToStreamTimeWithBlocking(aFrom);

    nsAutoPtr<MediaSegment> segment;
    segment = aTrack->GetSegment()->CreateEmptyClone();
    for (uint32_t j = 0; j < mListeners.Length(); ++j) {
      MediaStreamListener* l = mListeners[j];
      l->NotifyQueuedTrackChanges(Graph(), id, outputStart,
                                  TrackEventCommand::TRACK_EVENT_CREATED,
                                  *segment,
                                  aPort->GetSource(), aTrack->GetID());
    }
    segment->AppendNullData(outputStart);
    StreamTracks::Track* track =
      &mTracks.AddTrack(id, outputStart, segment.forget());
    STREAM_LOG(LogLevel::Debug, ("TrackUnionStream %p added track %d for input stream %p track %d, start ticks %lld",
                                 this, track->GetID(), aPort->GetSource(), aTrack->GetID(),
                                 (long long)outputStart));

    TrackMapEntry* map = mTrackMap.AppendElement();
    map->mEndOfConsumedInputTicks = 0;
    map->mEndOfLastInputIntervalInInputStream = -1;
    map->mEndOfLastInputIntervalInOutputStream = -1;
    map->mInputPort = aPort;
    map->mInputTrackID = aTrack->GetID();
    map->mOutputTrackID = track->GetID();
    map->mSegment = aTrack->GetSegment()->CreateEmptyClone();

    for (int32_t i = mPendingDirectTrackListeners.Length() - 1; i >= 0; --i) {
      TrackBound<DirectMediaStreamTrackListener>& bound =
        mPendingDirectTrackListeners[i];
      if (bound.mTrackID != map->mOutputTrackID) {
        continue;
      }
      MediaStream* source = map->mInputPort->GetSource();
      map->mOwnedDirectListeners.AppendElement(bound.mListener);
      DisabledTrackMode currentMode = GetDisabledTrackMode(bound.mTrackID);
      if (currentMode != DisabledTrackMode::ENABLED) {
        bound.mListener->IncreaseDisabled(currentMode);
      }
      STREAM_LOG(LogLevel::Debug, ("TrackUnionStream %p adding direct listener "
                                   "%p for track %d. Forwarding to input "
                                   "stream %p track %d.",
                                   this, bound.mListener.get(), bound.mTrackID,
                                   source, map->mInputTrackID));
      source->AddDirectTrackListenerImpl(bound.mListener.forget(),
                                         map->mInputTrackID);
      mPendingDirectTrackListeners.RemoveElementAt(i);
    }

    return mTrackMap.Length() - 1;
  }
Exemplo n.º 10
0
  void TrackUnionStream::CopyTrackData(StreamTracks::Track* aInputTrack,
                     uint32_t aMapIndex, GraphTime aFrom, GraphTime aTo,
                     bool* aOutputTrackFinished)
  {
    TrackMapEntry* map = &mTrackMap[aMapIndex];
    StreamTracks::Track* outputTrack = mTracks.FindTrack(map->mOutputTrackID);
    MOZ_ASSERT(outputTrack && !outputTrack->IsEnded(), "Can't copy to ended track");

    MediaSegment* segment = map->mSegment;
    MediaStream* source = map->mInputPort->GetSource();

    GraphTime next;
    *aOutputTrackFinished = false;
    for (GraphTime t = aFrom; t < aTo; t = next) {
      MediaInputPort::InputInterval interval = map->mInputPort->GetNextInputInterval(t);
      interval.mEnd = std::min(interval.mEnd, aTo);
      StreamTime inputEnd = source->GraphTimeToStreamTimeWithBlocking(interval.mEnd);
      StreamTime inputTrackEndPoint = STREAM_TIME_MAX;

      if (aInputTrack->IsEnded() &&
          aInputTrack->GetEnd() <= inputEnd) {
        inputTrackEndPoint = aInputTrack->GetEnd();
        *aOutputTrackFinished = true;
      }

      if (interval.mStart >= interval.mEnd) {
        break;
      }
      StreamTime ticks = interval.mEnd - interval.mStart;
      next = interval.mEnd;

      StreamTime outputStart = outputTrack->GetEnd();

      if (interval.mInputIsBlocked) {
        // Maybe the input track ended?
        segment->AppendNullData(ticks);
        STREAM_LOG(LogLevel::Verbose, ("TrackUnionStream %p appending %lld ticks of null data to track %d",
                   this, (long long)ticks, outputTrack->GetID()));
      } else if (InMutedCycle()) {
        segment->AppendNullData(ticks);
      } else {
        if (source->IsSuspended()) {
          segment->AppendNullData(aTo - aFrom);
        } else {
          MOZ_ASSERT(outputTrack->GetEnd() == GraphTimeToStreamTimeWithBlocking(interval.mStart),
                     "Samples missing");
          StreamTime inputStart = source->GraphTimeToStreamTimeWithBlocking(interval.mStart);
          segment->AppendSlice(*aInputTrack->GetSegment(),
                               std::min(inputTrackEndPoint, inputStart),
                               std::min(inputTrackEndPoint, inputEnd));
        }
      }
      ApplyTrackDisabling(outputTrack->GetID(), segment);
      for (uint32_t j = 0; j < mListeners.Length(); ++j) {
        MediaStreamListener* l = mListeners[j];
        // Separate Audio and Video.
        if (segment->GetType() == MediaSegment::AUDIO) {
          l->NotifyQueuedAudioData(Graph(), outputTrack->GetID(),
                                   outputStart,
                                   *static_cast<AudioSegment*>(segment),
                                   map->mInputPort->GetSource(),
                                   map->mInputTrackID);
        }
      }
      for (TrackBound<MediaStreamTrackListener>& b : mTrackListeners) {
        if (b.mTrackID != outputTrack->GetID()) {
          continue;
        }
        b.mListener->NotifyQueuedChanges(Graph(), outputStart, *segment);
      }
      outputTrack->GetSegment()->AppendFrom(segment);
    }
  }
Exemplo n.º 11
0
int
readDoneCallback( void* data, const char* cookie, size_t bufferSize, void* buffer )
{
    MediaStream* mediaStream = static_cast< MediaStream * >( data );
    return mediaStream->readDoneCallback( cookie, bufferSize, buffer );
}
Exemplo n.º 12
0
int readCallback( void* data, const char* cookie, int64_t* dts, int64_t* pts, unsigned* flags, size_t* bufferSize, void** buffer )
{
    MediaStream* mediaStream = static_cast< MediaStream * >( data );
    return mediaStream->readCallback( cookie, dts, pts, flags, bufferSize, buffer );
}
void
AudioNodeExternalInputStream::ProcessInput(GraphTime aFrom, GraphTime aTo,
                                           uint32_t aFlags)
{
  // According to spec, number of outputs is always 1.
  mLastChunks.SetLength(1);

  // GC stuff can result in our input stream being destroyed before this stream.
  // Handle that.
  if (mInputs.IsEmpty()) {
    mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
    AdvanceOutputSegment();
    return;
  }

  MOZ_ASSERT(mInputs.Length() == 1);

  MediaStream* source = mInputs[0]->GetSource();
  nsAutoTArray<AudioSegment,1> audioSegments;
  nsAutoTArray<bool,1> trackMapEntriesUsed;
  uint32_t inputChannels = 0;
  for (StreamBuffer::TrackIter tracks(source->mBuffer, MediaSegment::AUDIO);
       !tracks.IsEnded(); tracks.Next()) {
    const StreamBuffer::Track& inputTrack = *tracks;
    // Create a TrackMapEntry if necessary.
    size_t trackMapIndex = GetTrackMapEntry(inputTrack, aFrom);
    // Maybe there's nothing in this track yet. If so, ignore it. (While the
    // track is only playing silence, we may not be able to determine the
    // correct number of channels to start resampling.)
    if (trackMapIndex == nsTArray<TrackMapEntry>::NoIndex) {
      continue;
    }

    while (trackMapEntriesUsed.Length() <= trackMapIndex) {
      trackMapEntriesUsed.AppendElement(false);
    }
    trackMapEntriesUsed[trackMapIndex] = true;

    TrackMapEntry* trackMap = &mTrackMap[trackMapIndex];
    AudioSegment segment;
    GraphTime next;
    TrackRate inputTrackRate = inputTrack.GetRate();
    for (GraphTime t = aFrom; t < aTo; t = next) {
      MediaInputPort::InputInterval interval = mInputs[0]->GetNextInputInterval(t);
      interval.mEnd = std::min(interval.mEnd, aTo);
      if (interval.mStart >= interval.mEnd)
        break;
      next = interval.mEnd;

      // Ticks >= startTicks and < endTicks are in the interval
      StreamTime outputEnd = GraphTimeToStreamTime(interval.mEnd);
      TrackTicks startTicks = trackMap->mSamplesPassedToResampler + segment.GetDuration();
      StreamTime outputStart = GraphTimeToStreamTime(interval.mStart);
      NS_ASSERTION(startTicks == TimeToTicksRoundUp(inputTrackRate, outputStart),
                   "Samples missing");
      TrackTicks endTicks = TimeToTicksRoundUp(inputTrackRate, outputEnd);
      TrackTicks ticks = endTicks - startTicks;

      if (interval.mInputIsBlocked) {
        segment.AppendNullData(ticks);
      } else {
        // See comments in TrackUnionStream::CopyTrackData
        StreamTime inputStart = source->GraphTimeToStreamTime(interval.mStart);
        StreamTime inputEnd = source->GraphTimeToStreamTime(interval.mEnd);
        TrackTicks inputTrackEndPoint =
            inputTrack.IsEnded() ? inputTrack.GetEnd() : TRACK_TICKS_MAX;

        if (trackMap->mEndOfLastInputIntervalInInputStream != inputStart ||
            trackMap->mEndOfLastInputIntervalInOutputStream != outputStart) {
          // Start of a new series of intervals where neither stream is blocked.
          trackMap->mEndOfConsumedInputTicks = TimeToTicksRoundDown(inputTrackRate, inputStart) - 1;
        }
        TrackTicks inputStartTicks = trackMap->mEndOfConsumedInputTicks;
        TrackTicks inputEndTicks = inputStartTicks + ticks;
        trackMap->mEndOfConsumedInputTicks = inputEndTicks;
        trackMap->mEndOfLastInputIntervalInInputStream = inputEnd;
        trackMap->mEndOfLastInputIntervalInOutputStream = outputEnd;

        if (inputStartTicks < 0) {
          // Data before the start of the track is just null.
          segment.AppendNullData(-inputStartTicks);
          inputStartTicks = 0;
        }
        if (inputEndTicks > inputStartTicks) {
          segment.AppendSlice(*inputTrack.GetSegment(),
                              std::min(inputTrackEndPoint, inputStartTicks),
                              std::min(inputTrackEndPoint, inputEndTicks));
        }
        // Pad if we're looking past the end of the track
        segment.AppendNullData(ticks - segment.GetDuration());
      }
    }

    trackMap->mSamplesPassedToResampler += segment.GetDuration();
    trackMap->ResampleInputData(&segment);

    if (trackMap->mResampledData.GetDuration() < mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE) {
      // We don't have enough data. Delay it.
      trackMap->mResampledData.InsertNullDataAtStart(
        mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE - trackMap->mResampledData.GetDuration());
    }
    audioSegments.AppendElement()->AppendSlice(trackMap->mResampledData,
      mCurrentOutputPosition, mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE);
    trackMap->mResampledData.ForgetUpTo(mCurrentOutputPosition + WEBAUDIO_BLOCK_SIZE);
    inputChannels = GetAudioChannelsSuperset(inputChannels, trackMap->mResamplerChannelCount);
  }

  for (int32_t i = mTrackMap.Length() - 1; i >= 0; --i) {
    if (i >= int32_t(trackMapEntriesUsed.Length()) || !trackMapEntriesUsed[i]) {
      mTrackMap.RemoveElementAt(i);
    }
  }

  uint32_t accumulateIndex = 0;
  if (inputChannels) {
    nsAutoTArray<float,GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;
    for (uint32_t i = 0; i < audioSegments.Length(); ++i) {
      AudioChunk tmpChunk;
      ConvertSegmentToAudioBlock(&audioSegments[i], &tmpChunk);
      if (!tmpChunk.IsNull()) {
        if (accumulateIndex == 0) {
          AllocateAudioBlock(inputChannels, &mLastChunks[0]);
        }
        AccumulateInputChunk(accumulateIndex, tmpChunk, &mLastChunks[0], &downmixBuffer);
        accumulateIndex++;
      }
    }
  }
  if (accumulateIndex == 0) {
    mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
  }
  mCurrentOutputPosition += WEBAUDIO_BLOCK_SIZE;

  // Using AudioNodeStream's AdvanceOutputSegment to push the media stream graph along with null data.
  AdvanceOutputSegment();
}
AudioChunk*
AudioNodeStream::ObtainInputBlock(AudioChunk* aTmpChunk)
{
  uint32_t inputCount = mInputs.Length();
  uint32_t outputChannelCount = 0;
  nsAutoTArray<AudioChunk*,250> inputChunks;
  for (uint32_t i = 0; i < inputCount; ++i) {
    MediaStream* s = mInputs[i]->GetSource();
    AudioNodeStream* a = static_cast<AudioNodeStream*>(s);
    MOZ_ASSERT(a == s->AsAudioNodeStream());
    if (a->IsFinishedOnGraphThread()) {
      continue;
    }
    AudioChunk* chunk = &a->mLastChunk;
    // XXX when we implement DelayNode, this will no longer be true and we'll
    // need to treat a null chunk (when the DelayNode hasn't had a chance
    // to produce data yet) as silence here.
    MOZ_ASSERT(chunk);
    if (chunk->IsNull()) {
      continue;
    }

    inputChunks.AppendElement(chunk);
    outputChannelCount =
      GetAudioChannelsSuperset(outputChannelCount, chunk->mChannelData.Length());
  }

  uint32_t inputChunkCount = inputChunks.Length();
  if (inputChunkCount == 0) {
    aTmpChunk->SetNull(WEBAUDIO_BLOCK_SIZE);
    return aTmpChunk;
  }

  if (inputChunkCount == 1) {
    return inputChunks[0];
  }

  AllocateAudioBlock(outputChannelCount, aTmpChunk);

  for (uint32_t i = 0; i < inputChunkCount; ++i) {
    AudioChunk* chunk = inputChunks[i];
    nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channels;
    channels.AppendElements(chunk->mChannelData);
    if (channels.Length() < outputChannelCount) {
      AudioChannelsUpMix(&channels, outputChannelCount, nullptr);
      NS_ASSERTION(outputChannelCount == channels.Length(),
                   "We called GetAudioChannelsSuperset to avoid this");
    }

    for (uint32_t c = 0; c < channels.Length(); ++c) {
      const float* inputData = static_cast<const float*>(channels[c]);
      float* outputData = static_cast<float*>(const_cast<void*>(aTmpChunk->mChannelData[c]));
      if (inputData) {
        if (i == 0) {
          AudioBlockCopyChannelWithScale(inputData, chunk->mVolume, outputData);
        } else {
          AudioBlockAddChannelWithScale(inputData, chunk->mVolume, outputData);
        }
      } else {
        if (i == 0) {
          memset(outputData, 0, WEBAUDIO_BLOCK_SIZE*sizeof(float));
        }
      }
    }
  }

  return aTmpChunk;
}
/* changed by [email protected] (see relaod.txt for info) */
int QTFileBroadcaster::Play(char *mTimeFile)
/* ***************************************************** */
{
    SInt16  err = 0;
    Float64 transmitTime = 0;
    MediaStream *theStreamPtr = NULL;   
    RTpPacket   rtpPacket;
    unsigned int sleptTime;
    SInt32 movieStartOffset = 0; //z
    Bool16      negativeTime = false;
    fMovieDuration = fRTPFilePtr->GetMovieDuration();
    fSendTimeOffset = 0.0;
    fMovieStart = true;
    fNumMoviesPlayed ++;
    
    if (fMovieEndTime > 0) // take into account the movie load time as well as the last movie early end.
    {   UInt64 timeNow = PlayListUtils::Milliseconds();
        fMovieIntervalTime = timeNow - fMovieEndTime;

        SInt32 earlySleepTimeMilli = (SInt32)(fMovieTimeDiffMilli - fMovieIntervalTime);
        earlySleepTimeMilli -= 40; // Don't sleep the entire time we need some time to execute or else we will be late
        if (earlySleepTimeMilli > 0)
        {   OSThread::Sleep( earlySleepTimeMilli);
        }
    }
    
    fMovieStartTime = PlayListUtils::Milliseconds();    
    fMediaStreamList.MovieStarted(fMovieStartTime); 
    
/* changed by [email protected] (see relaod.txt for info) */
    if(mTimeFile!=NULL)
    {
        FILE *fTimeFile = NULL;
        struct timeval start, dur, end;
        struct tm tm_start, tm_dur, tm_end, timeResult;

        memset (&start,0, sizeof(start));

        SInt64 timenow = OS::Milliseconds();
        start.tv_sec = (long) OS::TimeMilli_To_UnixTimeSecs(timenow);
        start.tv_usec = (long) ((OS::TimeMilli_To_UnixTimeMilli(timenow) - (start.tv_sec * 1000)) * 1000);

        dur.tv_sec = (long)fMovieDuration;
        dur.tv_usec = (long)((fMovieDuration - dur.tv_sec) * 1000000);
        
        end.tv_sec = start.tv_sec + dur.tv_sec + (long)((start.tv_usec + dur.tv_usec) / 1000000);
        end.tv_usec = (start.tv_usec + dur.tv_usec) % 1000000;
                time_t startSecs = start.tv_sec;
                time_t endSecs = end.tv_sec;
        memcpy(&tm_start, qtss_localtime(&startSecs, &timeResult), sizeof(struct tm));
        memcpy(&tm_end, qtss_localtime(&endSecs, &timeResult), sizeof(struct tm));

        tm_dur.tm_hour = dur.tv_sec / 3600;
        tm_dur.tm_min = (dur.tv_sec % 3600) / 60;
        tm_dur.tm_sec = (dur.tv_sec % 3600) % 60;
        
        // initialize all current movie parameters to unkown ("-").
        
        ::strcpy(fCurrentMovieName, "-");
        ::strcpy(fCurrentMovieCopyright, "-");
        ::strcpy(fCurrentMovieComment, "-");
        ::strcpy(fCurrentMovieAuthor, "-");
        ::strcpy(fCurrentMovieArtist, "-");
        ::strcpy(fCurrentMovieAlbum, "-");

        /* save start time, stop time and length of currently playing song to .current file */
        fTimeFile = fopen(mTimeFile, "a");
        if(fTimeFile)
        {   
            SimpleString *theQTTextPtr = fMovieSDPParser->fQTTextLines.Begin();
            while (theQTTextPtr != NULL)
            {
                char tmp[256];
                ::memcpy(tmp, theQTTextPtr->fTheString, theQTTextPtr->fLen);
                tmp[theQTTextPtr->fLen] = 0;
                // if this SDP parameter is needed for logging then cache it here so
                // we can log it later.
                if (::strstr(theQTTextPtr->fTheString, "a=x-qt-text-nam:")!=NULL)
                    ::strcpy(fCurrentMovieName, &tmp[16]);
                if (::strstr(theQTTextPtr->fTheString, "a=x-qt-text-cpy:")!=NULL)
                    ::strcpy(fCurrentMovieCopyright, &tmp[16]);
                if (::strstr(theQTTextPtr->fTheString, "a=x-qt-text-cmt:")!=NULL)
                    ::strcpy(fCurrentMovieComment, &tmp[16]);
                if (::strstr(theQTTextPtr->fTheString, "a=x-qt-text-aut:")!=NULL)
                    ::strcpy(fCurrentMovieAuthor, &tmp[16]);
                if (::strstr(theQTTextPtr->fTheString, "a=x-qt-text-ART:")!=NULL)
                    ::strcpy(fCurrentMovieArtist, &tmp[16]);
                if (::strstr(theQTTextPtr->fTheString, "a=x-qt-text-alb:")!=NULL)
                    ::strcpy(fCurrentMovieAlbum, &tmp[16]);
                fwrite(theQTTextPtr->fTheString,theQTTextPtr->fLen, sizeof(char),fTimeFile);
                qtss_fprintf(fTimeFile,"\n");
                theQTTextPtr = fMovieSDPParser->fQTTextLines.Next();
            }

            time_t startTime = (time_t) start.tv_sec;
            time_t endTime = (time_t) end.tv_sec;
            char buffer[kTimeStrSize];
            char *timestringStart = qtss_ctime(&startTime, buffer, sizeof(buffer));
            qtss_fprintf(fTimeFile,"b=%02d:%02d:%02d:%06d %ld %s", (int) tm_start.tm_hour, (int) tm_start.tm_min, (int) tm_start.tm_sec, (int)start.tv_usec, (long int) startTime, timestringStart);
            char *timestringEnd = qtss_ctime(&endTime, buffer, sizeof(buffer));
            qtss_fprintf(fTimeFile,"e=%02d:%02d:%02d:%06d %ld %s", (int)tm_end.tm_hour, (int) tm_end.tm_min,(int)  tm_end.tm_sec, (int) end.tv_usec,(long int) endTime, timestringEnd);
            qtss_fprintf(fTimeFile,"d=%02d:%02d:%02d:%06d %d \n", (int) tm_dur.tm_hour, (int) tm_dur.tm_min,(int)  tm_dur.tm_sec, (int) dur.tv_usec, (int)dur.tv_sec);

            fclose(fTimeFile);
        }   
    }

    while (true) 
    {
        if (fQuitImmediatePtr && *fQuitImmediatePtr){err = 0; break; } // quit now not an error
        
        if (fBroadcastDefPtr->mTheSession)
        {   UInt32 thePacketQLen = 0;
            thePacketQLen = fBroadcastDefPtr->mTheSession->GetPacketQLen();
            SInt64 maxSleep = PlayListUtils::Milliseconds() + 1000; 
            if (thePacketQLen > eMaxPacketQLen)
            {   //qtss_printf("PacketQ too big = %lu \n", (UInt32) thePacketQLen);
                while ( (eMaxPacketQLen/2) < fBroadcastDefPtr->mTheSession->GetPacketQLen())
                {   this->SleepInterval(100.0);
                    if (maxSleep < PlayListUtils::Milliseconds())
                        break;
                }
                //qtss_printf("PacketQ after sleep = %lu \n", (UInt32) fBroadcastDefPtr->mTheSession->GetPacketQLen());
                continue;
            }
        }
        
        
        transmitTime = fRTPFilePtr->GetNextPacket(&rtpPacket.fThePacket, &rtpPacket.fLength);
            theStreamPtr = (MediaStream*)fRTPFilePtr->GetLastPacketTrack()->Cookie1;
        err = fRTPFilePtr->Error();
        if (err != QTRTPFile::errNoError)   {err = eMovieFileInvalid; break; } // error getting packet
        if (NULL == rtpPacket.fThePacket)   {err = 0; break; } // end of movie not an error
        if (NULL == theStreamPtr)           {err = eMovieFileInvalid; break; }// an error

        
        transmitTime *= (Float64) PlayListUtils::eMilli; // convert to milliseconds
        if (transmitTime < 0.0 && negativeTime == false) // Deal with negative transmission times
        {   movieStartOffset += (SInt32) (transmitTime / 15.0);
            negativeTime = true;
        }
        sleptTime = (unsigned int) Sleep(transmitTime);
        
        err = theStreamPtr->Send(&rtpPacket);
            
        if (err != 0)  { break; } 
        err = fMediaStreamList.UpdateStreams();
        if (err != 0)  { break; } 
        
        if (    (fBroadcastDefPtr != NULL)
            &&  (fBroadcastDefPtr->mTheSession != NULL)
            &&  (fBroadcastDefPtr->mTheSession->GetReasonForDying() != BroadcasterSession::kDiedNormally)   
            )   
         { break; } 
    };
    
    fMovieEndTime = (SInt64) PlayListUtils::Milliseconds(); 
    fMediaStreamList.MovieEnded(fMovieEndTime);

    // see if the movie duration is greater than the time it took to send the packets.
    // the difference is a delay that we insert before playing the next movie.
    SInt64 playDurationMilli = (SInt64) fMovieEndTime - (SInt64) fMovieStartTime;
    fMovieTimeDiffMilli =  ((SInt64) ( (Float64) fMovieDuration * (Float64) PlayListUtils::eMilli)) - (SInt64) playDurationMilli;
    fMovieTimeDiffMilli-= (movieStartOffset/2);

    return err;
}
Exemplo n.º 16
0
void
AudioNodeStream::ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex)
{
  uint32_t inputCount = mInputs.Length();
  uint32_t outputChannelCount = 1;
  nsAutoTArray<AudioChunk*,250> inputChunks;
  for (uint32_t i = 0; i < inputCount; ++i) {
    if (aPortIndex != mInputs[i]->InputNumber()) {
      // This input is connected to a different port
      continue;
    }
    MediaStream* s = mInputs[i]->GetSource();
    AudioNodeStream* a = static_cast<AudioNodeStream*>(s);
    MOZ_ASSERT(a == s->AsAudioNodeStream());
    if (a->IsAudioParamStream()) {
      continue;
    }

    // It is possible for mLastChunks to be empty here, because `a` might be a
    // AudioNodeStream that has not been scheduled yet, because it is further
    // down the graph _but_ as a connection to this node. Because we enforce the
    // presence of at least one DelayNode, with at least one block of delay, and
    // because the output of a DelayNode when it has been fed less that
    // `delayTime` amount of audio is silence, we can simply continue here,
    // because this input would not influence the output of this node. Next
    // iteration, a->mLastChunks.IsEmpty() will be false, and everthing will
    // work as usual.
    if (a->mLastChunks.IsEmpty()) {
      continue;
    }

    AudioChunk* chunk = &a->mLastChunks[mInputs[i]->OutputNumber()];
    MOZ_ASSERT(chunk);
    if (chunk->IsNull() || chunk->mChannelData.IsEmpty()) {
      continue;
    }

    inputChunks.AppendElement(chunk);
    outputChannelCount =
      GetAudioChannelsSuperset(outputChannelCount, chunk->mChannelData.Length());
  }

  outputChannelCount = ComputedNumberOfChannels(outputChannelCount);

  uint32_t inputChunkCount = inputChunks.Length();
  if (inputChunkCount == 0 ||
      (inputChunkCount == 1 && inputChunks[0]->mChannelData.Length() == 0)) {
    aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
    return;
  }

  if (inputChunkCount == 1 &&
      inputChunks[0]->mChannelData.Length() == outputChannelCount) {
    aTmpChunk = *inputChunks[0];
    return;
  }

  if (outputChannelCount == 0) {
    aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
    return;
  }

  AllocateAudioBlock(outputChannelCount, &aTmpChunk);
  // The static storage here should be 1KB, so it's fine
  nsAutoTArray<float, GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;

  for (uint32_t i = 0; i < inputChunkCount; ++i) {
    AccumulateInputChunk(i, *inputChunks[i], &aTmpChunk, &downmixBuffer);
  }
}
Exemplo n.º 17
0
void TrackUnionStream::CopyTrackData(StreamTracks::Track* aInputTrack,
                                     uint32_t aMapIndex, GraphTime aFrom,
                                     GraphTime aTo,
                                     bool* aOutputTrackFinished) {
  TrackMapEntry* map = &mTrackMap[aMapIndex];
  TRACE_AUDIO_CALLBACK_COMMENT(
      "Input stream %p track %i -> TrackUnionStream %p track %i",
      map->mInputPort->GetSource(), map->mInputTrackID, this,
      map->mOutputTrackID);
  StreamTracks::Track* outputTrack = mTracks.FindTrack(map->mOutputTrackID);
  MOZ_ASSERT(outputTrack && !outputTrack->IsEnded(),
             "Can't copy to ended track");

  MediaSegment* segment = map->mSegment;
  MediaStream* source = map->mInputPort->GetSource();

  GraphTime next;
  *aOutputTrackFinished = false;
  for (GraphTime t = aFrom; t < aTo; t = next) {
    MediaInputPort::InputInterval interval =
        map->mInputPort->GetNextInputInterval(t);
    interval.mEnd = std::min(interval.mEnd, aTo);
    StreamTime inputEnd =
        source->GraphTimeToStreamTimeWithBlocking(interval.mEnd);

    if (aInputTrack->IsEnded() && aInputTrack->GetEnd() <= inputEnd) {
      *aOutputTrackFinished = true;
      break;
    }

    if (interval.mStart >= interval.mEnd) {
      break;
    }
    StreamTime ticks = interval.mEnd - interval.mStart;
    next = interval.mEnd;

    StreamTime outputStart = outputTrack->GetEnd();

    if (interval.mInputIsBlocked) {
      segment->AppendNullData(ticks);
      STREAM_LOG(
          LogLevel::Verbose,
          ("TrackUnionStream %p appending %lld ticks of null data to track %d",
           this, (long long)ticks, outputTrack->GetID()));
    } else if (InMutedCycle()) {
      segment->AppendNullData(ticks);
    } else {
      if (source->IsSuspended()) {
        segment->AppendNullData(aTo - aFrom);
      } else {
        MOZ_ASSERT(outputTrack->GetEnd() ==
                       GraphTimeToStreamTimeWithBlocking(interval.mStart),
                   "Samples missing");
        StreamTime inputStart =
            source->GraphTimeToStreamTimeWithBlocking(interval.mStart);
        segment->AppendSlice(*aInputTrack->GetSegment(), inputStart, inputEnd);
      }
    }
    ApplyTrackDisabling(outputTrack->GetID(), segment);
    for (TrackBound<MediaStreamTrackListener>& b : mTrackListeners) {
      if (b.mTrackID != outputTrack->GetID()) {
        continue;
      }
      b.mListener->NotifyQueuedChanges(Graph(), outputStart, *segment);
    }
    outputTrack->GetSegment()->AppendFrom(segment);
  }
}