Esempio n. 1
0
MediaStream::MediaStream(ExecutionContext* context,
                         MediaStreamDescriptor* streamDescriptor)
    : m_descriptor(streamDescriptor),
      m_scheduledEventTimer(this, &MediaStream::scheduledEventTimerFired),
      m_executionContext(context) {
  m_descriptor->setClient(this);

  size_t numberOfAudioTracks = m_descriptor->numberOfAudioComponents();
  m_audioTracks.reserveCapacity(numberOfAudioTracks);
  for (size_t i = 0; i < numberOfAudioTracks; i++) {
    MediaStreamTrack* newTrack =
        MediaStreamTrack::create(context, m_descriptor->audioComponent(i));
    newTrack->registerMediaStream(this);
    m_audioTracks.append(newTrack);
  }

  size_t numberOfVideoTracks = m_descriptor->numberOfVideoComponents();
  m_videoTracks.reserveCapacity(numberOfVideoTracks);
  for (size_t i = 0; i < numberOfVideoTracks; i++) {
    MediaStreamTrack* newTrack =
        MediaStreamTrack::create(context, m_descriptor->videoComponent(i));
    newTrack->registerMediaStream(this);
    m_videoTracks.append(newTrack);
  }

  if (emptyOrOnlyEndedTracks()) {
    m_descriptor->setActive(false);
  }
}
Esempio n. 2
0
void MediaStream::addRemoteTrack(MediaStreamComponent* component) {
  DCHECK(component);
  if (m_executionContext->isContextDestroyed())
    return;

  MediaStreamTrack* track =
      MediaStreamTrack::create(m_executionContext, component);
  switch (component->source()->type()) {
    case MediaStreamSource::TypeAudio:
      m_audioTracks.append(track);
      break;
    case MediaStreamSource::TypeVideo:
      m_videoTracks.append(track);
      break;
  }
  track->registerMediaStream(this);
  m_descriptor->addComponent(component);

  scheduleDispatchEvent(
      MediaStreamTrackEvent::create(EventTypeNames::addtrack, track));

  if (!active() && !track->ended()) {
    m_descriptor->setActive(true);
    scheduleDispatchEvent(Event::create(EventTypeNames::active));
  }
}
Esempio n. 3
0
MediaStreamAudioSourceNode* AudioContext::createMediaStreamSource(MediaStream* mediaStream, ExceptionState& exceptionState)
{
    ASSERT(isMainThread());

    if (isContextClosed()) {
        throwExceptionForClosedState(exceptionState);
        return nullptr;
    }

    if (!mediaStream) {
        exceptionState.throwDOMException(
            InvalidStateError,
            "invalid MediaStream source");
        return nullptr;
    }

    MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks();
    if (audioTracks.isEmpty()) {
        exceptionState.throwDOMException(
            InvalidStateError,
            "MediaStream has no audio track");
        return nullptr;
    }

    // Use the first audio track in the media stream.
    MediaStreamTrack* audioTrack = audioTracks[0];
    OwnPtr<AudioSourceProvider> provider = audioTrack->createWebAudioSource();
    MediaStreamAudioSourceNode* node = MediaStreamAudioSourceNode::create(this, mediaStream, audioTrack, provider.release());

    // FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams.
    node->mediaStreamAudioSourceHandler().setFormat(2, sampleRate());

    refNode(node); // context keeps reference until node is disconnected
    return node;
}
Esempio n. 4
0
  already_AddRefed<MediaStreamTrackSource>
  GetMediaStreamTrackSource(TrackID aInputTrackID) override
  {
    MediaStreamTrack* sourceTrack =
      mStream->FindOwnedDOMTrack(mStream->GetOwnedStream(), aInputTrackID);
    MOZ_RELEASE_ASSERT(sourceTrack);

    return do_AddRef(&sourceTrack->GetSource());
  }
MediaStreamTrack::MediaStreamTrack(MediaStreamTrack& other)
    : RefCounted()
    , ActiveDOMObject(other.scriptExecutionContext())
    , m_privateTrack(*other.privateTrack().clone())
    , m_eventDispatchScheduled(false)
    , m_stoppingTrack(false)
{
    suspendIfNeeded();

    m_privateTrack->setClient(this);
}
Esempio n. 6
0
void
DOMMediaStream::AddTrack(MediaStreamTrack& aTrack)
{
  MOZ_RELEASE_ASSERT(mPlaybackStream);

  RefPtr<ProcessedMediaStream> dest = mPlaybackStream->AsProcessedStream();
  MOZ_ASSERT(dest);
  if (!dest) {
    return;
  }

  LOG(LogLevel::Info, ("DOMMediaStream %p Adding track %p (from stream %p with ID %d)",
                       this, &aTrack, aTrack.mOwningStream.get(), aTrack.mTrackID));

  if (mPlaybackStream->Graph() != aTrack.Graph()) {
    NS_ASSERTION(false, "Cannot combine tracks from different MediaStreamGraphs");
    LOG(LogLevel::Error, ("DOMMediaStream %p Own MSG %p != aTrack's MSG %p",
                         this, mPlaybackStream->Graph(), aTrack.Graph()));

    nsAutoString trackId;
    aTrack.GetId(trackId);
    const char16_t* params[] = { trackId.get() };
    nsCOMPtr<nsPIDOMWindowInner> pWindow = GetParentObject();
    nsIDocument* document = pWindow ? pWindow->GetExtantDoc() : nullptr;
    nsContentUtils::ReportToConsole(nsIScriptError::errorFlag,
                                    NS_LITERAL_CSTRING("Media"),
                                    document,
                                    nsContentUtils::eDOM_PROPERTIES,
                                    "MediaStreamAddTrackDifferentAudioChannel",
                                    params, ArrayLength(params));
    return;
  }

  if (HasTrack(aTrack)) {
    LOG(LogLevel::Debug, ("DOMMediaStream %p already contains track %p", this, &aTrack));
    return;
  }

  // Hook up the underlying track with our underlying playback stream.
  RefPtr<MediaInputPort> inputPort =
    GetPlaybackStream()->AllocateInputPort(aTrack.GetOwnedStream(),
                                           aTrack.mTrackID);
  RefPtr<TrackPort> trackPort =
    new TrackPort(inputPort, &aTrack, TrackPort::InputPortOwnership::OWNED);
  mTracks.AppendElement(trackPort.forget());
  NotifyTrackAdded(&aTrack);

  LOG(LogLevel::Debug, ("DOMMediaStream %p Added track %p", this, &aTrack));
}
void
DOMMediaStream::RemoveTrack(MediaStreamTrack& aTrack)
{
  LOG(LogLevel::Info, ("DOMMediaStream %p Removing track %p (from stream %p with ID %d)",
                       this, &aTrack, aTrack.GetStream(), aTrack.GetTrackID()));

  RefPtr<TrackPort> toRemove = FindPlaybackTrackPort(aTrack);
  if (!toRemove) {
    LOG(LogLevel::Debug, ("DOMMediaStream %p does not contain track %p", this, &aTrack));
    return;
  }

  // If the track comes from a TRACK_ANY input port (i.e., mOwnedPort), we need
  // to block it in the port. Doing this for a locked track is still OK as it
  // will first block the track, then destroy the port. Both cause the track to
  // end.
  toRemove->BlockTrackId(aTrack.GetTrackID());

  DebugOnly<bool> removed = mTracks.RemoveElement(toRemove);
  MOZ_ASSERT(removed);
  LOG(LogLevel::Debug, ("DOMMediaStream %p Removed track %p", this, &aTrack));
}
Esempio n. 8
0
void MediaStream::removeRemoteTrack(MediaStreamComponent* component) {
  DCHECK(component);
  if (m_executionContext->isContextDestroyed())
    return;

  MediaStreamTrackVector* tracks = 0;
  switch (component->source()->type()) {
    case MediaStreamSource::TypeAudio:
      tracks = &m_audioTracks;
      break;
    case MediaStreamSource::TypeVideo:
      tracks = &m_videoTracks;
      break;
  }

  size_t index = kNotFound;
  for (size_t i = 0; i < tracks->size(); ++i) {
    if ((*tracks)[i]->component() == component) {
      index = i;
      break;
    }
  }
  if (index == kNotFound)
    return;

  m_descriptor->removeComponent(component);

  MediaStreamTrack* track = (*tracks)[index];
  track->unregisterMediaStream(this);
  tracks->remove(index);
  scheduleDispatchEvent(
      MediaStreamTrackEvent::create(EventTypeNames::removetrack, track));

  if (active() && emptyOrOnlyEndedTracks()) {
    m_descriptor->setActive(false);
    scheduleDispatchEvent(Event::create(EventTypeNames::inactive));
  }
}
void MediaEndpointPeerConnection::createOfferTask(RTCOfferOptions&, SessionDescriptionPromise& promise)
{
    ASSERT(!m_dtlsFingerprint.isEmpty());

    RefPtr<MediaEndpointSessionConfiguration> configurationSnapshot = MediaEndpointSessionConfiguration::create();

    configurationSnapshot->setSessionVersion(m_sdpSessionVersion++);

    RtpSenderVector senders = m_client->getSenders();

    // Add media descriptions for senders.
    for (auto& sender : senders) {
        RefPtr<PeerMediaDescription> mediaDescription = PeerMediaDescription::create();
        MediaStreamTrack* track = sender->track();

        mediaDescription->setMediaStreamId(sender->mediaStreamIds()[0]);
        mediaDescription->setMediaStreamTrackId(track->id());
        mediaDescription->setType(track->kind());
        mediaDescription->setPayloads(track->kind() == "audio" ? m_defaultAudioPayloads : m_defaultVideoPayloads);
        mediaDescription->setDtlsFingerprintHashFunction(m_dtlsFingerprintFunction);
        mediaDescription->setDtlsFingerprint(m_dtlsFingerprint);
        mediaDescription->setCname(m_cname);
        mediaDescription->addSsrc(cryptographicallyRandomNumber());
        mediaDescription->setIceUfrag(m_iceUfrag);
        mediaDescription->setIcePassword(m_icePassword);

        configurationSnapshot->addMediaDescription(WTFMove(mediaDescription));
    }

    String sdpString;
    SDPProcessor::Result result = m_sdpProcessor->generate(*configurationSnapshot, sdpString);
    if (result != SDPProcessor::Result::Success) {
        LOG_ERROR("SDPProcessor internal error");
        return;
    }

    promise.resolve(RTCSessionDescription::create("offer", sdpString));
}
MediaStreamTrack* MediaStreamTrack::create(ExecutionContext* context, MediaStreamComponent* component)
{
    MediaStreamTrack* track = adoptRefCountedGarbageCollectedWillBeNoop(new MediaStreamTrack(context, component));
    track->suspendIfNeeded();
    return track;
}
MediaStreamTrack* MediaStreamTrack::create(ExecutionContext* context, MediaStreamComponent* component)
{
    MediaStreamTrack* track = new MediaStreamTrack(context, component);
    track->suspendIfNeeded();
    return track;
}
Esempio n. 12
0
bool
DOMMediaStream::OwnsTrack(const MediaStreamTrack& aTrack) const
{
  return (aTrack.GetStream() == this) && HasTrack(aTrack);
}