Пример #1
0
static demuxer_t* demux_open_hack_avi(demuxer_t *demuxer)
{
   struct MPOpts *opts = demuxer->opts;
   sh_audio_t* sh_a;

   demuxer = demux_open_avi(demuxer);
   if(!demuxer) return NULL; // failed to open
   sh_a = demuxer->audio->sh;
   if(demuxer->audio->id != -2 && sh_a) {
#ifdef CONFIG_OGGVORBIS
    // support for Ogg-in-AVI:
    if(sh_a->format == 0xFFFE)
      demuxer = init_avi_with_ogg(demuxer);
    else if(sh_a->format == 0x674F) {
      stream_t* s;
      demuxer_t  *od;
      s = new_ds_stream(demuxer->audio);
      od = new_demuxer(opts, s,DEMUXER_TYPE_OGG,-1,-2,-2,NULL);
      if(!demux_ogg_open(od)) {
        mp_tmsg( MSGT_DEMUXER,MSGL_ERR,"Unable to open the Ogg demuxer.\n");
        free_stream(s);
        demuxer->audio->id = -2;
      } else
        demuxer = new_demuxers_demuxer(demuxer,od,demuxer);
   }
#endif
   }

   return demuxer;
}
static demuxer_t* demux_open_hack_avi(demuxer_t *demuxer)
{
   sh_audio_t* sh_a;

   demuxer = demux_open_avi(demuxer);
   if(!demuxer) return NULL; // failed to open
   sh_a = demuxer->audio->sh;

    /*demux_avi no do  parsing in for audio,
      then here will tell decodec to do paring before decodeced data*/
   if(sh_a)
     sh_a->need_parsing = 1; //add by gysun

   if(demuxer->audio->id != -2 && sh_a) {
#ifdef CONFIG_OGGVORBIS
    // support for Ogg-in-AVI:
    if(sh_a->format == 0xFFFE)
      demuxer = init_avi_with_ogg(demuxer);
    else if(sh_a->format == 0x674F) {
      stream_t* s;
      demuxer_t  *od;
      s = new_ds_stream(demuxer->audio);
      od = new_demuxer(s,DEMUXER_TYPE_OGG,-1,-2,-2,NULL);
      if(!demux_ogg_open(od)) {
        mp_msg( MSGT_DEMUXER,MSGL_ERR,MSGTR_ErrorOpeningOGGDemuxer);
        free_stream(s);
        demuxer->audio->id = -2;
      } else
        demuxer = new_demuxers_demuxer(demuxer,od,demuxer);
   }
#endif
   }

   return demuxer;
}
Пример #3
0
extern "C" demuxer_t* demux_open_rtp(demuxer_t* demuxer) {
  struct MPOpts *opts = demuxer->opts;
  Boolean success = False;
  do {
    TaskScheduler* scheduler = BasicTaskScheduler::createNew();
    if (scheduler == NULL) break;
    UsageEnvironment* env = BasicUsageEnvironment::createNew(*scheduler);
    if (env == NULL) break;

    RTSPClient* rtspClient = NULL;
    SIPClient* sipClient = NULL;

    if (demuxer == NULL || demuxer->stream == NULL) break;  // shouldn't happen
    demuxer->stream->eof = 0; // just in case

    // Look at the stream's 'priv' field to see if we were initiated
    // via a SDP description:
    char* sdpDescription = (char*)(demuxer->stream->priv);
    if (sdpDescription == NULL) {
      // We weren't given a SDP description directly, so assume that
      // we were given a RTSP or SIP URL:
      char const* protocol = demuxer->stream->streaming_ctrl->url->protocol;
      char const* url = demuxer->stream->streaming_ctrl->url->url;
      extern int verbose;
      if (strcmp(protocol, "rtsp") == 0) {
	rtspClient = RTSPClient::createNew(*env, verbose, "MPlayer");
	if (rtspClient == NULL) {
	  fprintf(stderr, "Failed to create RTSP client: %s\n",
		  env->getResultMsg());
	  break;
	}
	sdpDescription = openURL_rtsp(rtspClient, url);
      } else { // SIP
	unsigned char desiredAudioType = 0; // PCMU (use 3 for GSM)
	sipClient = SIPClient::createNew(*env, desiredAudioType, NULL,
					 verbose, "MPlayer");
	if (sipClient == NULL) {
	  fprintf(stderr, "Failed to create SIP client: %s\n",
		  env->getResultMsg());
	  break;
	}
	sipClient->setClientStartPortNum(8000);
	sdpDescription = openURL_sip(sipClient, url);
      }

      if (sdpDescription == NULL) {
	fprintf(stderr, "Failed to get a SDP description from URL \"%s\": %s\n",
		url, env->getResultMsg());
	break;
      }
    }

    // Now that we have a SDP description, create a MediaSession from it:
    MediaSession* mediaSession = MediaSession::createNew(*env, sdpDescription);
    if (mediaSession == NULL) break;


    // Create a 'RTPState' structure containing the state that we just created,
    // and store it in the demuxer's 'priv' field, for future reference:
    RTPState* rtpState = new RTPState;
    rtpState->sdpDescription = sdpDescription;
    rtpState->rtspClient = rtspClient;
    rtpState->sipClient = sipClient;
    rtpState->mediaSession = mediaSession;
    rtpState->audioBufferQueue = rtpState->videoBufferQueue = NULL;
    rtpState->flags = 0;
    rtpState->firstSyncTime.tv_sec = rtpState->firstSyncTime.tv_usec = 0;
    demuxer->priv = rtpState;

    int audiofound = 0, videofound = 0;
    // Create RTP receivers (sources) for each subsession:
    MediaSubsessionIterator iter(*mediaSession);
    MediaSubsession* subsession;
    unsigned desiredReceiveBufferSize;
    while ((subsession = iter.next()) != NULL) {
      // Ignore any subsession that's not audio or video:
      if (strcmp(subsession->mediumName(), "audio") == 0) {
	if (audiofound) {
	  fprintf(stderr, "Additional subsession \"audio/%s\" skipped\n", subsession->codecName());
	  continue;
	}
	desiredReceiveBufferSize = 100000;
      } else if (strcmp(subsession->mediumName(), "video") == 0) {
	if (videofound) {
	  fprintf(stderr, "Additional subsession \"video/%s\" skipped\n", subsession->codecName());
	  continue;
	}
	desiredReceiveBufferSize = 2000000;
      } else {
	continue;
      }

      if (rtsp_port)
          subsession->setClientPortNum (rtsp_port);

      if (!subsession->initiate()) {
	fprintf(stderr, "Failed to initiate \"%s/%s\" RTP subsession: %s\n", subsession->mediumName(), subsession->codecName(), env->getResultMsg());
      } else {
	fprintf(stderr, "Initiated \"%s/%s\" RTP subsession on port %d\n", subsession->mediumName(), subsession->codecName(), subsession->clientPortNum());

	// Set the OS's socket receive buffer sufficiently large to avoid
	// incoming packets getting dropped between successive reads from this
	// subsession's demuxer.  Depending on the bitrate(s) that you expect,
	// you may wish to tweak the "desiredReceiveBufferSize" values above.
	int rtpSocketNum = subsession->rtpSource()->RTPgs()->socketNum();
	int receiveBufferSize
	  = increaseReceiveBufferTo(*env, rtpSocketNum,
				    desiredReceiveBufferSize);
	if (verbose > 0) {
	  fprintf(stderr, "Increased %s socket receive buffer to %d bytes \n",
		  subsession->mediumName(), receiveBufferSize);
	}

	if (rtspClient != NULL) {
	  // Issue a RTSP "SETUP" command on the chosen subsession:
	  if (!rtspClient->setupMediaSubsession(*subsession, False,
						rtsp_transport_tcp)) break;
	  if (!strcmp(subsession->mediumName(), "audio"))
	    audiofound = 1;
	  if (!strcmp(subsession->mediumName(), "video"))
            videofound = 1;
	}
      }
    }

    if (rtspClient != NULL) {
      // Issue a RTSP aggregate "PLAY" command on the whole session:
      if (!rtspClient->playMediaSession(*mediaSession)) break;
    } else if (sipClient != NULL) {
      sipClient->sendACK(); // to start the stream flowing
    }

    // Now that the session is ready to be read, do additional
    // MPlayer codec-specific initialization on each subsession:
    iter.reset();
    while ((subsession = iter.next()) != NULL) {
      if (subsession->readSource() == NULL) continue; // not reading this

      unsigned flags = 0;
      if (strcmp(subsession->mediumName(), "audio") == 0) {
	rtpState->audioBufferQueue
	  = new ReadBufferQueue(subsession, demuxer, "audio");
	rtpState->audioBufferQueue->otherQueue = &(rtpState->videoBufferQueue);
	rtpCodecInitialize_audio(demuxer, subsession, flags);
      } else if (strcmp(subsession->mediumName(), "video") == 0) {
	rtpState->videoBufferQueue
	  = new ReadBufferQueue(subsession, demuxer, "video");
	rtpState->videoBufferQueue->otherQueue = &(rtpState->audioBufferQueue);
	rtpCodecInitialize_video(demuxer, subsession, flags);
      }
      rtpState->flags |= flags;
    }
    success = True;
  } while (0);
  if (!success) return NULL; // an error occurred

  // Hack: If audio and video are demuxed together on a single RTP stream,
  // then create a new "demuxer_t" structure to allow the higher-level
  // code to recognize this:
  if (demux_is_multiplexed_rtp_stream(demuxer)) {
    stream_t* s = new_ds_stream(demuxer->video);
    demuxer_t* od = demux_open(opts, s, DEMUXER_TYPE_UNKNOWN,
			       opts->audio_id, opts->video_id, opts->sub_id,
                               NULL);
    demuxer = new_demuxers_demuxer(od, od, od);
  }

  return demuxer;
}