Пример #1
0
bool
AudioCallbackDriver::OSXDeviceSwitchingWorkaround()
{
    MonitorAutoLock mon(GraphImpl()->GetMonitor());
    if (mSelfReference) {
        // Apparently, depending on the osx version, on device switch, the
        // callback is called "some" number of times, and then stops being called,
        // and then gets called again. 10 is to be safe, it's a low-enough number
        // of milliseconds anyways (< 100ms)
        //STREAM_LOG(LogLevel::Debug, ("Callbacks during switch: %d", mCallbackReceivedWhileSwitching+1));
        if (mCallbackReceivedWhileSwitching++ >= 10) {
            STREAM_LOG(LogLevel::Debug, ("Got %d callbacks, switching back to CallbackDriver", mCallbackReceivedWhileSwitching));
            // If we have a self reference, we have fallen back temporarily on a
            // system clock driver, but we just got called back, that means the osx
            // audio backend has switched to the new device.
            // Ask the graph to switch back to the previous AudioCallbackDriver
            // (`this`), and when the graph has effectively switched, we can drop
            // the self reference and unref the SystemClockDriver we fallen back on.
            if (GraphImpl()->CurrentDriver() == this) {
                mSelfReference.Drop(this);
                mNextDriver = nullptr;
            } else {
                GraphImpl()->CurrentDriver()->SwitchAtNextIteration(this);
            }

        }
        return true;
    }

    return false;
}
Пример #2
0
void AudioCallbackDriver::CompleteAudioContextOperations(AsyncCubebOperation aOperation)
{
    nsAutoTArray<StreamAndPromiseForOperation, 1> array;

    // We can't lock for the whole function because AudioContextOperationCompleted
    // will grab the monitor
    {
        MonitorAutoLock mon(GraphImpl()->GetMonitor());
        array.SwapElements(mPromisesForOperation);
    }

    for (uint32_t i = 0; i < array.Length(); i++) {
        StreamAndPromiseForOperation& s = array[i];
        if ((aOperation == AsyncCubebOperation::INIT &&
                s.mOperation == dom::AudioContextOperation::Resume) ||
                (aOperation == AsyncCubebOperation::SHUTDOWN &&
                 s.mOperation != dom::AudioContextOperation::Resume)) {

            GraphImpl()->AudioContextOperationCompleted(s.mStream,
                    s.mPromise,
                    s.mOperation);
            array.RemoveElementAt(i);
            i--;
        }
    }

    if (!array.IsEmpty()) {
        MonitorAutoLock mon(GraphImpl()->GetMonitor());
        mPromisesForOperation.AppendElements(array);
    }
}
Пример #3
0
void
AudioCallbackDriver::DeviceChangedCallback() {
    MonitorAutoLock mon(mGraphImpl->GetMonitor());
    PanOutputIfNeeded(mMicrophoneActive);
    // On OSX, changing the output device causes the audio thread to no call the
    // audio callback, so we're unable to process real-time input data, and this
    // results in latency building up.
    // We switch to a system driver until audio callbacks are called again, so we
    // still pull from the input stream, so that everything works apart from the
    // audio output.
#ifdef XP_MACOSX
    // Don't bother doing the device switching dance if the graph is not RUNNING
    // (starting up, shutting down), because we haven't started pulling from the
    // SourceMediaStream.
    if (!GraphImpl()->Running()) {
        return;
    }

    if (mSelfReference) {
        return;
    }
    STREAM_LOG(LogLevel::Error, ("Switching to SystemClockDriver during output switch"));
    mSelfReference.Take(this);
    mCallbackReceivedWhileSwitching = 0;
    mNextDriver = new SystemClockDriver(GraphImpl());
    mNextDriver->SetGraphTime(this, mIterationStart, mIterationEnd);
    mGraphImpl->SetCurrentDriver(mNextDriver);
    mNextDriver->Start();
#endif
}
Пример #4
0
void
AudioNodeStream::SetChannelMixingParameters(uint32_t aNumberOfChannels,
                                            ChannelCountMode aChannelCountMode,
                                            ChannelInterpretation aChannelInterpretation)
{
  class Message final : public ControlMessage
  {
  public:
    Message(AudioNodeStream* aStream,
            uint32_t aNumberOfChannels,
            ChannelCountMode aChannelCountMode,
            ChannelInterpretation aChannelInterpretation)
      : ControlMessage(aStream),
        mNumberOfChannels(aNumberOfChannels),
        mChannelCountMode(aChannelCountMode),
        mChannelInterpretation(aChannelInterpretation)
    {}
    void Run() override
    {
      static_cast<AudioNodeStream*>(mStream)->
        SetChannelMixingParametersImpl(mNumberOfChannels, mChannelCountMode,
                                       mChannelInterpretation);
    }
    uint32_t mNumberOfChannels;
    ChannelCountMode mChannelCountMode;
    ChannelInterpretation mChannelInterpretation;
  };

  GraphImpl()->AppendMessage(MakeUnique<Message>(this, aNumberOfChannels,
                                                 aChannelCountMode,
                                                 aChannelInterpretation));
}
Пример #5
0
void
AudioNodeStream::SetStreamTimeParameter(uint32_t aIndex, AudioContext* aContext,
                                        double aStreamTime)
{
  class Message : public ControlMessage {
  public:
    Message(AudioNodeStream* aStream, uint32_t aIndex, MediaStream* aRelativeToStream,
            double aStreamTime)
      : ControlMessage(aStream), mStreamTime(aStreamTime),
        mRelativeToStream(aRelativeToStream), mIndex(aIndex) {}
    virtual void Run()
    {
      static_cast<AudioNodeStream*>(mStream)->
          SetStreamTimeParameterImpl(mIndex, mRelativeToStream, mStreamTime);
    }
    double mStreamTime;
    MediaStream* mRelativeToStream;
    uint32_t mIndex;
  };

  MOZ_ASSERT(this);
  GraphImpl()->AppendMessage(new Message(this, aIndex,
      aContext->DestinationStream(),
      aContext->DOMTimeToStreamTime(aStreamTime)));
}
Пример #6
0
void
AudioNodeStream::SetStreamTimeParameter(uint32_t aIndex, AudioContext* aContext,
                                        double aStreamTime)
{
  class Message final : public ControlMessage
  {
  public:
    Message(AudioNodeStream* aStream, uint32_t aIndex, MediaStream* aRelativeToStream,
            double aStreamTime)
      : ControlMessage(aStream), mStreamTime(aStreamTime),
        mRelativeToStream(aRelativeToStream), mIndex(aIndex)
    {}
    void Run() override
    {
      static_cast<AudioNodeStream*>(mStream)->
          SetStreamTimeParameterImpl(mIndex, mRelativeToStream, mStreamTime);
    }
    double mStreamTime;
    MediaStream* mRelativeToStream;
    uint32_t mIndex;
  };

  GraphImpl()->AppendMessage(MakeUnique<Message>(this, aIndex,
                                                 aContext->DestinationStream(),
                                                 aStreamTime));
}
Пример #7
0
void
AudioNodeStream::SetChannelMixingParameters(uint32_t aNumberOfChannels,
                                            ChannelCountMode aChannelCountMode,
                                            ChannelInterpretation aChannelInterpretation)
{
  class Message : public ControlMessage {
  public:
    Message(AudioNodeStream* aStream,
            uint32_t aNumberOfChannels,
            ChannelCountMode aChannelCountMode,
            ChannelInterpretation aChannelInterpretation)
      : ControlMessage(aStream),
        mNumberOfChannels(aNumberOfChannels),
        mChannelCountMode(aChannelCountMode),
        mChannelInterpretation(aChannelInterpretation)
    {}
    virtual void Run()
    {
      static_cast<AudioNodeStream*>(mStream)->
        SetChannelMixingParametersImpl(mNumberOfChannels, mChannelCountMode,
                                       mChannelInterpretation);
    }
    uint32_t mNumberOfChannels;
    ChannelCountMode mChannelCountMode;
    ChannelInterpretation mChannelInterpretation;
  };

  MOZ_ASSERT(this);
  GraphImpl()->AppendMessage(new Message(this, aNumberOfChannels,
                                         aChannelCountMode,
                                         aChannelInterpretation));
}
Пример #8
0
void
AudioNodeStream::SendTimelineEvent(uint32_t aIndex,
                                   const AudioTimelineEvent& aEvent)
{
  class Message final : public ControlMessage
  {
  public:
    Message(AudioNodeStream* aStream, uint32_t aIndex,
            const AudioTimelineEvent& aEvent)
      : ControlMessage(aStream),
        mEvent(aEvent),
        mSampleRate(aStream->SampleRate()),
        mIndex(aIndex)
    {}
    virtual void Run() override
    {
      static_cast<AudioNodeStream*>(mStream)->Engine()->
          RecvTimelineEvent(mIndex, mEvent);
    }
    AudioTimelineEvent mEvent;
    TrackRate mSampleRate;
    uint32_t mIndex;
  };
  GraphImpl()->AppendMessage(new Message(this, aIndex, aEvent));
}
Пример #9
0
void
AudioNodeStream::SetTimelineParameter(uint32_t aIndex,
                                      const AudioParamTimeline& aValue)
{
  class Message final : public ControlMessage
  {
  public:
    Message(AudioNodeStream* aStream, uint32_t aIndex,
            const AudioParamTimeline& aValue)
      : ControlMessage(aStream),
        mValue(aValue),
        mSampleRate(aStream->SampleRate()),
        mIndex(aIndex)
    {}
    virtual void Run() override
    {
      static_cast<AudioNodeStream*>(mStream)->Engine()->
          SetTimelineParameter(mIndex, mValue, mSampleRate);
    }
    AudioParamTimeline mValue;
    TrackRate mSampleRate;
    uint32_t mIndex;
  };
  GraphImpl()->AppendMessage(new Message(this, aIndex, aValue));
}
Пример #10
0
void
AudioNodeStream::CheckForInactive()
{
  if (((mActiveInputCount > 0 || mEngine->IsActive()) &&
       !mMarkAsFinishedAfterThisBlock) ||
      !mIsActive) {
    return;
  }

  mIsActive = false;
  mInputChunks.Clear(); // not required for foreseeable future
  for (auto& chunk : mLastChunks) {
    chunk.SetNull(WEBAUDIO_BLOCK_SIZE);
  }
  if (!(mFlags & EXTERNAL_OUTPUT)) {
    GraphImpl()->IncrementSuspendCount(this);
  }
  if (IsAudioParamStream()) {
    return;
  }

  for (const auto& consumer : mConsumers) {
    AudioNodeStream* ns = consumer->GetDestination()->AsAudioNodeStream();
    if (ns) {
      ns->DecrementActiveInputCount();
    }
  }
}
Пример #11
0
void
AudioNodeStream::ScheduleCheckForInactive()
{
  if (mActiveInputCount > 0 && !mMarkAsFinishedAfterThisBlock) {
    return;
  }

  nsAutoPtr<CheckForInactiveMessage> message(new CheckForInactiveMessage(this));
  GraphImpl()->RunMessageAfterProcessing(Move(message));
}
Пример #12
0
void
AudioNodeStream::ScheduleCheckForInactive()
{
  if (mActiveInputCount > 0 && !mMarkAsFinishedAfterThisBlock) {
    return;
  }

  auto message = MakeUnique<CheckForInactiveMessage>(this);
  GraphImpl()->RunMessageAfterProcessing(Move(message));
}
Пример #13
0
  virtual void Run() override
  {
    auto ns = static_cast<AudioNodeStream*>(mStream);
    ns->mBufferStartTime -= mAdvance;

    StreamBuffer::Track* track = ns->EnsureTrack(AUDIO_TRACK);
    track->Get<AudioSegment>()->AppendNullData(mAdvance);

    ns->GraphImpl()->DecrementSuspendCount(mStream);
  }
Пример #14
0
void
AudioCaptureStream::Start()
{
    class Message : public ControlMessage {
    public:
        explicit Message(AudioCaptureStream* aStream)
            : ControlMessage(aStream), mStream(aStream) {}

        virtual void Run()
        {
            mStream->mStarted = true;
        }

    protected:
        AudioCaptureStream* mStream;
    };
    GraphImpl()->AppendMessage(MakeUnique<Message>(this));
}
Пример #15
0
void
AudioNodeStream::SetPassThrough(bool aPassThrough)
{
  class Message final : public ControlMessage
  {
  public:
    Message(AudioNodeStream* aStream, bool aPassThrough)
      : ControlMessage(aStream), mPassThrough(aPassThrough)
    {}
    virtual void Run() override
    {
      static_cast<AudioNodeStream*>(mStream)->mPassThrough = mPassThrough;
    }
    bool mPassThrough;
  };

  GraphImpl()->AppendMessage(new Message(this, aPassThrough));
}
void
AudioNodeStream::SetTimelineParameter(uint32_t aIndex,
                                      const AudioEventTimeline<ErrorResult>& aValue)
{
  class Message : public ControlMessage {
  public:
    Message(AudioNodeStream* aStream, uint32_t aIndex,
            const AudioEventTimeline<ErrorResult>& aValue)
      : ControlMessage(aStream), mValue(aValue), mIndex(aIndex) {}
    virtual void Run()
    {
      static_cast<AudioNodeStream*>(mStream)->Engine()->
          SetTimelineParameter(mIndex, mValue);
    }
    AudioEventTimeline<ErrorResult> mValue;
    uint32_t mIndex;
  };
  GraphImpl()->AppendMessage(new Message(this, aIndex, aValue));
}
Пример #17
0
void
AudioNodeStream::SetThreeDPointParameter(uint32_t aIndex, const ThreeDPoint& aValue)
{
  class Message : public ControlMessage {
  public:
    Message(AudioNodeStream* aStream, uint32_t aIndex, const ThreeDPoint& aValue)
      : ControlMessage(aStream), mValue(aValue), mIndex(aIndex) {}
    virtual void Run()
    {
      static_cast<AudioNodeStream*>(mStream)->Engine()->
          SetThreeDPointParameter(mIndex, mValue);
    }
    ThreeDPoint mValue;
    uint32_t mIndex;
  };

  MOZ_ASSERT(this);
  GraphImpl()->AppendMessage(new Message(this, aIndex, aValue));
}
Пример #18
0
void
AudioNodeStream::SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList>&& aBuffer)
{
  class Message : public ControlMessage {
  public:
    Message(AudioNodeStream* aStream,
            already_AddRefed<ThreadSharedFloatArrayBufferList>& aBuffer)
      : ControlMessage(aStream), mBuffer(aBuffer) {}
    virtual void Run()
    {
      static_cast<AudioNodeStream*>(mStream)->Engine()->
          SetBuffer(mBuffer.forget());
    }
    nsRefPtr<ThreadSharedFloatArrayBufferList> mBuffer;
  };

  MOZ_ASSERT(this);
  GraphImpl()->AppendMessage(new Message(this, aBuffer));
}
Пример #19
0
void
AudioNodeStream::SetThreeDPointParameter(uint32_t aIndex, const ThreeDPoint& aValue)
{
  class Message final : public ControlMessage
  {
  public:
    Message(AudioNodeStream* aStream, uint32_t aIndex, const ThreeDPoint& aValue)
      : ControlMessage(aStream), mValue(aValue), mIndex(aIndex)
    {}
    void Run() override
    {
      static_cast<AudioNodeStream*>(mStream)->Engine()->
          SetThreeDPointParameter(mIndex, mValue);
    }
    ThreeDPoint mValue;
    uint32_t mIndex;
  };

  GraphImpl()->AppendMessage(MakeUnique<Message>(this, aIndex, aValue));
}
Пример #20
0
void
AudioNodeStream::SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList>&& aBuffer)
{
  class Message final : public ControlMessage
  {
  public:
    Message(AudioNodeStream* aStream,
            already_AddRefed<ThreadSharedFloatArrayBufferList>& aBuffer)
      : ControlMessage(aStream), mBuffer(aBuffer)
    {}
    void Run() override
    {
      static_cast<AudioNodeStream*>(mStream)->Engine()->
          SetBuffer(mBuffer.forget());
    }
    RefPtr<ThreadSharedFloatArrayBufferList> mBuffer;
  };

  GraphImpl()->AppendMessage(MakeUnique<Message>(this, aBuffer));
}
Пример #21
0
void
AudioNodeStream::SetInt32Parameter(uint32_t aIndex, int32_t aValue)
{
  class Message final : public ControlMessage
  {
  public:
    Message(AudioNodeStream* aStream, uint32_t aIndex, int32_t aValue)
      : ControlMessage(aStream), mValue(aValue), mIndex(aIndex)
    {}
    virtual void Run() override
    {
      static_cast<AudioNodeStream*>(mStream)->Engine()->
          SetInt32Parameter(mIndex, mValue);
    }
    int32_t mValue;
    uint32_t mIndex;
  };

  GraphImpl()->AppendMessage(new Message(this, aIndex, aValue));
}
Пример #22
0
void
AudioNodeStream::SetRawArrayData(nsTArray<float>& aData)
{
  class Message : public ControlMessage {
  public:
    Message(AudioNodeStream* aStream,
            nsTArray<float>& aData)
      : ControlMessage(aStream)
    {
      mData.SwapElements(aData);
    }
    virtual void Run()
    {
      static_cast<AudioNodeStream*>(mStream)->Engine()->SetRawArrayData(mData);
    }
    nsTArray<float> mData;
  };

  MOZ_ASSERT(this);
  GraphImpl()->AppendMessage(new Message(this, aData));
}
Пример #23
0
void
AudioNodeStream::SetRawArrayData(nsTArray<float>& aData)
{
  class Message final : public ControlMessage
  {
  public:
    Message(AudioNodeStream* aStream,
            nsTArray<float>& aData)
      : ControlMessage(aStream)
    {
      mData.SwapElements(aData);
    }
    void Run() override
    {
      static_cast<AudioNodeStream*>(mStream)->Engine()->SetRawArrayData(mData);
    }
    nsTArray<float> mData;
  };

  GraphImpl()->AppendMessage(MakeUnique<Message>(this, aData));
}
Пример #24
0
void
AudioNodeStream::SetActive()
{
  if (mIsActive || mMarkAsFinishedAfterThisBlock) {
    return;
  }

  mIsActive = true;
  if (!(mFlags & EXTERNAL_OUTPUT)) {
    GraphImpl()->DecrementSuspendCount(this);
  }
  if (IsAudioParamStream()) {
    // Consumers merely influence stream order.
    // They do not read from the stream.
    return;
  }

  for (const auto& consumer : mConsumers) {
    AudioNodeStream* ns = consumer->GetDestination()->AsAudioNodeStream();
    if (ns) {
      ns->IncrementActiveInputCount();
    }
  }
}
Пример #25
0
  void TrackUnionStream::CopyTrackData(StreamBuffer::Track* aInputTrack,
                     uint32_t aMapIndex, GraphTime aFrom, GraphTime aTo,
                     bool* aOutputTrackFinished)
  {
    TrackMapEntry* map = &mTrackMap[aMapIndex];
    StreamBuffer::Track* outputTrack = mBuffer.FindTrack(map->mOutputTrackID);
    MOZ_ASSERT(outputTrack && !outputTrack->IsEnded(), "Can't copy to ended track");

    MediaSegment* segment = map->mSegment;
    MediaStream* source = map->mInputPort->GetSource();

    GraphTime next;
    *aOutputTrackFinished = false;
    for (GraphTime t = aFrom; t < aTo; t = next) {
      MediaInputPort::InputInterval interval = map->mInputPort->GetNextInputInterval(t);
      interval.mEnd = std::min(interval.mEnd, aTo);
      StreamTime inputEnd = source->GraphTimeToStreamTime(interval.mEnd);
      StreamTime inputTrackEndPoint = STREAM_TIME_MAX;

      if (aInputTrack->IsEnded() &&
          aInputTrack->GetEnd() <= inputEnd) {
        inputTrackEndPoint = aInputTrack->GetEnd();
        *aOutputTrackFinished = true;
      }

      if (interval.mStart >= interval.mEnd) {
        break;
      }
      StreamTime ticks = interval.mEnd - interval.mStart;
      next = interval.mEnd;

      StreamTime outputStart = outputTrack->GetEnd();

      if (interval.mInputIsBlocked) {
        // Maybe the input track ended?
        segment->AppendNullData(ticks);
        STREAM_LOG(LogLevel::Verbose, ("TrackUnionStream %p appending %lld ticks of null data to track %d",
                   this, (long long)ticks, outputTrack->GetID()));
      } else if (InMutedCycle()) {
        segment->AppendNullData(ticks);
      } else {
        if (GraphImpl()->StreamSuspended(source)) {
          segment->AppendNullData(aTo - aFrom);
        } else {
          MOZ_ASSERT(outputTrack->GetEnd() == GraphTimeToStreamTime(interval.mStart),
                     "Samples missing");
          StreamTime inputStart = source->GraphTimeToStreamTime(interval.mStart);
          segment->AppendSlice(*aInputTrack->GetSegment(),
                               std::min(inputTrackEndPoint, inputStart),
                               std::min(inputTrackEndPoint, inputEnd));
        }
      }
      ApplyTrackDisabling(outputTrack->GetID(), segment);
      for (uint32_t j = 0; j < mListeners.Length(); ++j) {
        MediaStreamListener* l = mListeners[j];
        l->NotifyQueuedTrackChanges(Graph(), outputTrack->GetID(),
                                    outputStart, 0, *segment);
      }
      outputTrack->GetSegment()->AppendFrom(segment);
    }
  }
Пример #26
0
void
AudioCallbackDriver::Init()
{
    cubeb_stream_params params;
    uint32_t latency;

    MOZ_ASSERT(!NS_IsMainThread(),
               "This is blocking and should never run on the main thread.");

    mSampleRate = params.rate = CubebUtils::PreferredSampleRate();

#if defined(__ANDROID__)
#if defined(MOZ_B2G)
    params.stream_type = CubebUtils::ConvertChannelToCubebType(mAudioChannel);
#else
    params.stream_type = CUBEB_STREAM_TYPE_MUSIC;
#endif
    if (params.stream_type == CUBEB_STREAM_TYPE_MAX) {
        NS_WARNING("Bad stream type");
        return;
    }
#else
    (void)mAudioChannel;
#endif

    params.channels = mGraphImpl->AudioChannelCount();
    if (AUDIO_OUTPUT_FORMAT == AUDIO_FORMAT_S16) {
        params.format = CUBEB_SAMPLE_S16NE;
    } else {
        params.format = CUBEB_SAMPLE_FLOAT32NE;
    }

    if (cubeb_get_min_latency(CubebUtils::GetCubebContext(), params, &latency) != CUBEB_OK) {
        NS_WARNING("Could not get minimal latency from cubeb.");
        return;
    }

    cubeb_stream* stream;
    if (cubeb_stream_init(CubebUtils::GetCubebContext(), &stream,
                          "AudioCallbackDriver", params, latency,
                          DataCallback_s, StateCallback_s, this) == CUBEB_OK) {
        mAudioStream.own(stream);
    } else {
        NS_WARNING("Could not create a cubeb stream for MediaStreamGraph, falling back to a SystemClockDriver");
        // Fall back to a driver using a normal thread.
        mNextDriver = new SystemClockDriver(GraphImpl());
        mNextDriver->SetGraphTime(this, mIterationStart, mIterationEnd);
        mGraphImpl->SetCurrentDriver(mNextDriver);
        DebugOnly<bool> found = mGraphImpl->RemoveMixerCallback(this);
        NS_WARN_IF_FALSE(!found, "Mixer callback not added when switching?");
        mNextDriver->Start();
        return;
    }

    cubeb_stream_register_device_changed_callback(mAudioStream,
            AudioCallbackDriver::DeviceChangedCallback_s);

    StartStream();

    STREAM_LOG(LogLevel::Debug, ("AudioCallbackDriver started."));
}
Пример #27
0
void
AudioNodeStream::AdvanceAndResume(StreamTime aAdvance)
{
  mMainThreadCurrentTime += aAdvance;
  GraphImpl()->AppendMessage(new AdvanceAndResumeMessage(this, aAdvance));
}