예제 #1
0
// Start
void CAVMediaFlow::Start (bool startvideo, 
			  bool startaudio,
			  bool starttext)
{
  if (m_pConfig == NULL) {
    return;
  }
  
  if (!startvideo && !startaudio && !starttext) return;

  // Create audio and video sources
  if (m_pConfig->GetBoolValue(CONFIG_AUDIO_ENABLE) &&
      m_audioSource == NULL &&
      startaudio) {
    m_audioSource = CreateAudioSource(m_pConfig, m_videoSource);
  }
  if (m_pConfig->GetBoolValue(CONFIG_TEXT_ENABLE) &&
      m_textSource == NULL &&
      starttext) {
    m_textSource = CreateTextSource(m_pConfig);
    debug_message("Created text source %p", m_textSource);
  }

  if (m_pConfig->GetBoolValue(CONFIG_VIDEO_ENABLE) && startvideo) {
    if (m_videoSource == NULL) {
      debug_message("start - creating video source");
      m_videoSource = CreateVideoSource(m_pConfig);
    }
    if (m_audioSource != NULL) {
      m_audioSource->SetVideoSource(m_videoSource);
    }
  }

  m_maxAudioSamplesPerFrame = 0;

  CMediaStream *s;
  s = m_stream_list->GetHead();

  // Create the components for each stream.  This make sure
  // that no more than 1 instance of a profile is being encoded.
  while (s != NULL) {
    CAudioEncoder *ae_ptr = NULL;
    CVideoEncoder *ve_ptr = NULL;
    CTextEncoder *te_ptr = NULL;
    if (s->GetBoolValue(STREAM_VIDEO_ENABLED) && startvideo) {
      // see if profile has already been started
      ve_ptr = FindOrCreateVideoEncoder(s->GetVideoProfile());
      s->SetVideoEncoder(ve_ptr);
      m_pConfig->SetBoolValue(CONFIG_VIDEO_ENABLE, true);
    }
    if (s->GetBoolValue(STREAM_AUDIO_ENABLED) && startaudio) {
      // see if profile has already been started
      ae_ptr = FindOrCreateAudioEncoder(s->GetAudioProfile());
      s->SetAudioEncoder(ae_ptr);
      m_pConfig->SetBoolValue(CONFIG_AUDIO_ENABLE, true);
      // when we start the encoder, we will have to pass the channels
      // configured, as well as the initial sample rate (basically, 
      // replicate SetAudioSrc here...
    }
    if (s->GetBoolValue(STREAM_TEXT_ENABLED) && starttext) {
      // see if profile has already been started
      te_ptr = FindOrCreateTextEncoder(s->GetTextProfile());
      s->SetTextEncoder(te_ptr);
      m_pConfig->SetBoolValue(CONFIG_TEXT_ENABLE, true);
    }
    if (s->GetBoolValue(STREAM_TRANSMIT)) {
      // check if transmitter has been started on encoder
      // create rtp destination, add to transmitter
      if (ve_ptr != NULL) {
	ve_ptr->AddRtpDestination(s,
				  m_pConfig->GetBoolValue(CONFIG_RTP_DISABLE_TS_OFFSET),
				  m_pConfig->GetIntegerValue(CONFIG_RTP_MCAST_TTL),
				  s->GetIntegerValue(STREAM_VIDEO_SRC_PORT));
      }
      if (ae_ptr != NULL) {
	ae_ptr->AddRtpDestination(s,
				  m_pConfig->GetBoolValue(CONFIG_RTP_DISABLE_TS_OFFSET),
				  m_pConfig->GetIntegerValue(CONFIG_RTP_MCAST_TTL),
				  s->GetIntegerValue(STREAM_AUDIO_SRC_PORT));
      }
      if (te_ptr != NULL) {
	te_ptr->AddRtpDestination(s,
				  m_pConfig->GetBoolValue(CONFIG_RTP_DISABLE_TS_OFFSET),
				  m_pConfig->GetIntegerValue(CONFIG_RTP_MCAST_TTL),
				  s->GetIntegerValue(STREAM_TEXT_SRC_PORT));
      }
      createStreamSdp(m_pConfig, s);
    }

    if (s->GetBoolValue(STREAM_RECORD)) {
      // create file sink, add to above encoders.
      CMediaSink *recorder = s->CreateFileRecorder(m_pConfig);
      if (ve_ptr != NULL) {
	ve_ptr->AddSink(recorder);
      }
      if (ae_ptr != NULL) {
	ae_ptr->AddSink(recorder);
      }
      if (te_ptr != NULL) {
	te_ptr->AddSink(recorder);
      }
    }
    s = s->GetNext();
  }
  
  if (m_audioSource) {
    m_audioSource->SetAudioSrcSamplesPerFrame(m_maxAudioSamplesPerFrame);
    debug_message("Setting source sample per frame %u", m_maxAudioSamplesPerFrame);
  }
  // If we need raw stuff, we do it here
  bool createdRaw = false;
  if (m_pConfig->GetBoolValue(CONFIG_RAW_ENABLE)) {
    if (m_rawSink == NULL) {
      m_rawSink = new CRawFileSink();
      m_rawSink->SetConfig(m_pConfig);
      createdRaw = true;
    }
    if (m_audioSource != NULL) {
      m_audioSource->AddSink(m_rawSink);
    }
    if (m_videoSource != NULL) {
      m_videoSource->AddSink(m_rawSink);
    }
    if (createdRaw) {
      m_rawSink->StartThread();
      m_rawSink->Start();
    }
  }

  if (m_pConfig->GetBoolValue(CONFIG_RECORD_RAW_IN_MP4)) {
    if (m_pConfig->GetBoolValue(CONFIG_RECORD_RAW_IN_MP4_VIDEO) ||
	m_pConfig->GetBoolValue(CONFIG_RECORD_RAW_IN_MP4_AUDIO)) {
      bool createMp4Raw = false;
      if (m_mp4RawRecorder == NULL) {
	m_mp4RawRecorder = new CMp4Recorder(NULL);
	m_mp4RawRecorder->SetConfig(m_pConfig);
	createMp4Raw = true;
      }
      if (m_audioSource != NULL &&
	  m_pConfig->GetBoolValue(CONFIG_RECORD_RAW_IN_MP4_AUDIO)) {
	m_audioSource->AddSink(m_mp4RawRecorder);
      }
      if (m_videoSource != NULL &&
	  m_pConfig->GetBoolValue(CONFIG_RECORD_RAW_IN_MP4_VIDEO)) {
	m_videoSource->AddSink(m_mp4RawRecorder);
      }
      if (createMp4Raw) {
	m_mp4RawRecorder->StartThread();
	m_mp4RawRecorder->Start();
      }
    }
  }
  // start encoders and any sinks...  This may result in some sinks
  // file, in particular, receiving multiple starts
  CMediaCodec *mc = m_video_encoder_list;
  if (mc == NULL && m_videoSource != m_audioSource) {
    delete m_videoSource;
    m_videoSource = NULL;
  }
  while (mc != NULL) {
    mc->Start();
    mc->StartSinks();
    mc = mc->GetNext();
  }
  mc = m_audio_encoder_list;
  while (mc != NULL) {
    mc->Start();
    mc->StartSinks();
    mc = mc->GetNext();
  }
  mc = m_text_encoder_list;
  while (mc != NULL) {
    mc->Start();
    mc->StartSinks();
    mc = mc->GetNext();
  }
  // finally, start sources...
  if (m_videoSource && m_videoSource == m_audioSource) {
    m_videoSource->Start();
  } else {
    if (m_audioSource) {
      m_audioSource->Start();
    }
    if (m_videoSource) {
      m_videoSource->Start();
    }
  }
  
  if (m_textSource != NULL) {
    m_textSource->Start();
  }

  if (m_videoSource && startaudio) {
    // force video source to generate a key frame
    // so that sinks can quickly sync up
    m_videoSource->RequestKeyFrame(0);
  }
  
  m_started = true;
}
예제 #2
0
bool OmxDecoder::Init()
{
#if defined(MOZ_WIDGET_ANDROID)
  // OMXClient::connect() always returns OK and aborts fatally if
  // it can't connect. We may need to implement the connect functionality
  // ourselves if this proves to be an issue.
  if (!sClientInstance.IsValid()) {
    LOG("OMXClient failed to connect");
    return false;
  }
#endif

  //register sniffers, if they are not registered in this process.
  DataSource::RegisterDefaultSniffers();

  sp<DataSource> dataSource =
    DataSource::CreateFromURI(static_cast<char*>(mDecoder->mResource));
  if (!dataSource.get() || dataSource->initCheck()) {
    return false;
  }

  sp<MediaExtractor> extractor = MediaExtractor::Create(dataSource);
  if (extractor == nullptr) {
    return false;
  }

  ssize_t audioTrackIndex = -1;
  ssize_t videoTrackIndex = -1;
  const char *audioMime = nullptr;
  const char *videoMime = nullptr;

  for (size_t i = 0; i < extractor->countTracks(); ++i) {
    sp<MetaData> meta = extractor->getTrackMetaData(i);

    const char *mime;
    if (!meta->findCString(kKeyMIMEType, &mime)) {
      continue;
    }

    if (videoTrackIndex == -1 && !strncasecmp(mime, "video/", 6)) {
      videoTrackIndex = i;
      videoMime = mime;
    } else if (audioTrackIndex == -1 && !strncasecmp(mime, "audio/", 6)) {
      audioTrackIndex = i;
      audioMime = mime;
    }
  }

  if (videoTrackIndex == -1 && audioTrackIndex == -1) {
    return false;
  }

  int64_t totalDurationUs = 0;

#ifdef MOZ_WIDGET_GONK
  sp<IOMX> omx = GetOMX();
#else
  sp<IOMX> omx = sClientInstance.get()->interface();
#endif

  sp<MediaSource> videoTrack;
  sp<MediaSource> videoSource;
  if (videoTrackIndex != -1 && (videoTrack = extractor->getTrack(videoTrackIndex)) != nullptr) {
    videoSource = CreateVideoSource(mPluginHost, omx, videoTrack);
    if (videoSource == nullptr) {
      LOG("OMXCodec failed to initialize video decoder for \"%s\"", videoMime);
      return false;
    }
    status_t status = videoSource->start();
    if (status != OK) {
      LOG("videoSource->start() failed with status %#x", status);
      return false;
    }
    int64_t durationUs;
    if (videoTrack->getFormat()->findInt64(kKeyDuration, &durationUs)) {
      if (durationUs < 0)
        LOG("video duration %lld should be nonnegative", durationUs);
      if (durationUs > totalDurationUs)
        totalDurationUs = durationUs;
    }
  }

  sp<MediaSource> audioTrack;
  sp<MediaSource> audioSource;
  if (audioTrackIndex != -1 && (audioTrack = extractor->getTrack(audioTrackIndex)) != nullptr)
  {
    if (!strcasecmp(audioMime, "audio/raw")) {
      audioSource = audioTrack;
    } else {
      audioSource = OMXCodec::Create(omx,
                                     audioTrack->getFormat(),
                                     false, // decoder
                                     audioTrack);
    }

    if (audioSource == nullptr) {
      LOG("OMXCodec failed to initialize audio decoder for \"%s\"", audioMime);
      return false;
    }

    status_t status = audioSource->start();
    if (status != OK) {
      LOG("audioSource->start() failed with status %#x", status);
      return false;
    }

    int64_t durationUs;
    if (audioTrack->getFormat()->findInt64(kKeyDuration, &durationUs)) {
      if (durationUs < 0)
        LOG("audio duration %lld should be nonnegative", durationUs);
      if (durationUs > totalDurationUs)
        totalDurationUs = durationUs;
    }
  }

  // set decoder state
  mVideoTrack = videoTrack;
  mVideoSource = videoSource;
  mAudioTrack = audioTrack;
  mAudioSource = audioSource;
  mDurationUs = totalDurationUs;

  if (mVideoSource.get() && !SetVideoFormat())
    return false;

  // To reliably get the channel and sample rate data we need to read from the
  // audio source until we get a INFO_FORMAT_CHANGE status
  if (mAudioSource.get()) {
    if (mAudioSource->read(&mAudioBuffer) != INFO_FORMAT_CHANGED) {
      sp<MetaData> meta = mAudioSource->getFormat();
      if (!meta->findInt32(kKeyChannelCount, &mAudioChannels) ||
          !meta->findInt32(kKeySampleRate, &mAudioSampleRate)) {
        return false;
      }
      mAudioMetadataRead = true;

      if (mAudioChannels < 0) {
        LOG("audio channel count %d must be nonnegative", mAudioChannels);
        return false;
      }

      if (mAudioSampleRate < 0) {
        LOG("audio sample rate %d must be nonnegative", mAudioSampleRate);
        return false;
      }
    }
    else if (!SetAudioFormat()) {
        return false;
    }
  }
  return true;
}