MPEG4ESVideoRTPSink
::MPEG4ESVideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, u_int32_t rtpTimestampFrequency,
		      u_int8_t profileAndLevelIndication, char const* configStr)
  : VideoRTPSink(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, "MP4V-ES"),
    fVOPIsPresent(False), fProfileAndLevelIndication(profileAndLevelIndication), fFmtpSDPLine(NULL) {
  fConfigBytes = parseGeneralConfigStr(configStr, fNumConfigBytes);
}
void MPEG4VideoStreamFramer
::setConfigInfo(u_int8_t profileAndLevelIndication, char const* configStr) {
  fProfileAndLevelIndication = profileAndLevelIndication;

  delete[] fConfigBytes;
  fConfigBytes = parseGeneralConfigStr(configStr, fNumConfigBytes);
}
Beispiel #3
0
void setupStreams() {
  static MediaSubsessionIterator* setupIter = NULL;
  if (setupIter == NULL) setupIter = new MediaSubsessionIterator(*session);
  while ((subsession = setupIter->next()) != NULL) {
    // We have another subsession left to set up:
    if (subsession->clientPortNum() == 0) continue; // port # was not set

    setupSubsession(subsession, streamUsingTCP, continueAfterSETUP);
    return;
  }

  // We're done setting up subsessions.
  delete setupIter;
  if (!madeProgress) shutdown();

  // Create output files:
  if (createReceivers) {
#if 0 /*wayde*/
    if (outputQuickTimeFile) {
      // Create a "QuickTimeFileSink", to write to 'stdout':
      qtOut = QuickTimeFileSink::createNew(*env, *session, "stdout",
					   fileSinkBufferSize,
					   movieWidth, movieHeight,
					   movieFPS,
					   packetLossCompensate,
					   syncStreams,
					   generateHintTracks,
					   generateMP4Format);
      if (qtOut == NULL) {
	*env << "Failed to create QuickTime file sink for stdout: " << env->getResultMsg();
	shutdown();
      }

      qtOut->startPlaying(sessionAfterPlaying, NULL);
    } else if (outputAVIFile) {
      // Create an "AVIFileSink", to write to 'stdout':
      aviOut = AVIFileSink::createNew(*env, *session, "stdout",
				      fileSinkBufferSize,
				      movieWidth, movieHeight,
				      movieFPS,
				      packetLossCompensate);
      if (aviOut == NULL) {
	*env << "Failed to create AVI file sink for stdout: " << env->getResultMsg();
	shutdown();
      }

      aviOut->startPlaying(sessionAfterPlaying, NULL);
#endif /*wayde*/
    } else {
      // Create and start "FileSink"s for each subsession:
      madeProgress = False;
      MediaSubsessionIterator iter(*session);
      while ((subsession = iter.next()) != NULL) {
	if (subsession->readSource() == NULL) continue; // was not initiated

	// Create an output file for each desired stream:
	char outFileName[1000];
	if (singleMedium == NULL) {
	  // Output file name is
	  //     "<filename-prefix><medium_name>-<codec_name>-<counter>"
	  static unsigned streamCounter = 0;
	  snprintf(outFileName, sizeof outFileName, "%s%s-%s-%d",
		   fileNamePrefix, subsession->mediumName(),
		   subsession->codecName(), ++streamCounter);
	} else {
	  sprintf(outFileName, "stdout");
	}
	FileSink* fileSink;
	if (strcmp(subsession->mediumName(), "audio") == 0 &&
	    (strcmp(subsession->codecName(), "AMR") == 0 ||
	     strcmp(subsession->codecName(), "AMR-WB") == 0)) {
	  // For AMR audio streams, we use a special sink that inserts AMR frame hdrs:
	  fileSink = AMRAudioFileSink::createNew(*env, outFileName,
						 fileSinkBufferSize, oneFilePerFrame);
	} else if (strcmp(subsession->mediumName(), "video") == 0 &&
	    (strcmp(subsession->codecName(), "H264") == 0)) {
	  // For H.264 video stream, we use a special sink that insert start_codes:
	  fileSink = H264VideoFileSink::createNew(*env, outFileName,
						 fileSinkBufferSize, oneFilePerFrame);
	} else {
	  // Normal case:
	  fileSink = FileSink::createNew(*env, outFileName,
					 fileSinkBufferSize, oneFilePerFrame);
	}
	subsession->sink = fileSink;
	if (subsession->sink == NULL) {
	  *env << "Failed to create FileSink for \"" << outFileName
		  << "\": " << env->getResultMsg() << "\n";
	} else {
	  if (singleMedium == NULL) {
	    *env << "Created output file: \"" << outFileName << "\"\n";
	  } else {
	    *env << "Outputting data from the \"" << subsession->mediumName()
			<< "/" << subsession->codecName()
			<< "\" subsession to 'stdout'\n";
	  }

	  if (strcmp(subsession->mediumName(), "video") == 0 &&
	      strcmp(subsession->codecName(), "MP4V-ES") == 0 &&
	      subsession->fmtp_config() != NULL) {
	    // For MPEG-4 video RTP streams, the 'config' information
	    // from the SDP description contains useful VOL etc. headers.
	    // Insert this data at the front of the output file:
	    unsigned configLen;
	    unsigned char* configData
	      = parseGeneralConfigStr(subsession->fmtp_config(), configLen);
	    struct timeval timeNow;
	    gettimeofday(&timeNow, NULL);
	    fileSink->addData(configData, configLen, timeNow);
	    delete[] configData;
	  }

	  subsession->sink->startPlaying(*(subsession->readSource()),
					 subsessionAfterPlaying,
					 subsession);

	  // Also set a handler to be called if a RTCP "BYE" arrives
	  // for this subsession:
	  if (subsession->rtcpInstance() != NULL) {
	    subsession->rtcpInstance()->setByeHandler(subsessionByeHandler,
						      subsession);
	  }

	  madeProgress = True;
	}
      }
      if (!madeProgress) shutdown();
    }
  }

  // Finally, start playing each subsession, to start the data flow:
  if (duration == 0) {
    if (scale > 0) duration = session->playEndTime() - initialSeekTime; // use SDP end time
    else if (scale < 0) duration = initialSeekTime;
  }
  if (duration < 0) duration = 0.0;

  endTime = initialSeekTime;
  if (scale > 0) {
    if (duration <= 0) endTime = -1.0f;
    else endTime = initialSeekTime + duration;
  } else {
    endTime = initialSeekTime - duration;
    if (endTime < 0) endTime = 0.0f;
  }

  startPlayingSession(session, initialSeekTime, endTime, scale, continueAfterPLAY);
}
void rtpCodecInitialize_video(demuxer_t* demuxer,
			      MediaSubsession* subsession,
			      unsigned& flags) {
  flags = 0;
  // Create a dummy video stream header
  // to make the main MPlayer code happy:
  sh_video_t* sh_video = new_sh_video(demuxer,0);
  BITMAPINFOHEADER* bih
    = (BITMAPINFOHEADER*)calloc(1,sizeof(BITMAPINFOHEADER));
  bih->biSize = sizeof(BITMAPINFOHEADER);
  sh_video->bih = bih;
  demux_stream_t* d_video = demuxer->video;
  d_video->sh = sh_video; sh_video->ds = d_video;
  
  // Map known video MIME types to the BITMAPINFOHEADER parameters
  // that this program uses.  (Note that not all types need all
  // of the parameters to be set.)
  if (strcmp(subsession->codecName(), "MPV") == 0) {
    flags |= RTPSTATE_IS_MPEG12_VIDEO;
  } else if (strcmp(subsession->codecName(), "MP1S") == 0 ||
	     strcmp(subsession->codecName(), "MP2T") == 0) {
    flags |= RTPSTATE_IS_MPEG12_VIDEO|RTPSTATE_IS_MULTIPLEXED;
  } else if (strcmp(subsession->codecName(), "H263") == 0 ||
	     strcmp(subsession->codecName(), "H263-2000") == 0 ||
	     strcmp(subsession->codecName(), "H263-1998") == 0) {
    bih->biCompression = sh_video->format
      = mmioFOURCC('H','2','6','3');
    needVideoFrameRate(demuxer, subsession);
  } else if (strcmp(subsession->codecName(), "H264") == 0) {
    bih->biCompression = sh_video->format
      = mmioFOURCC('H','2','6','4');
    unsigned int configLen = 0;
    unsigned char* configData
      = parseH264ConfigStr(subsession->fmtp_spropparametersets(), configLen);
    sh_video->bih = bih = insertVideoExtradata(bih, configData, configLen);
    delete[] configData;
#ifdef USE_LIBAVCODEC
    av_register_codec_parser(&h264_parser);
    h264parserctx = av_parser_init(CODEC_ID_H264);
#endif
    needVideoFrameRate(demuxer, subsession);
  } else if (strcmp(subsession->codecName(), "H261") == 0) {
    bih->biCompression = sh_video->format
      = mmioFOURCC('H','2','6','1');
    needVideoFrameRate(demuxer, subsession);
  } else if (strcmp(subsession->codecName(), "JPEG") == 0) {
    bih->biCompression = sh_video->format
      = mmioFOURCC('M','J','P','G');
    needVideoFrameRate(demuxer, subsession);
  } else if (strcmp(subsession->codecName(), "MP4V-ES") == 0) {
    bih->biCompression = sh_video->format
      = mmioFOURCC('m','p','4','v');
    // For the codec to work correctly, it may need a 'VOL Header' to be
    // inserted at the front of the data stream.  Construct this from the
    // "config" MIME parameter, which was present (hopefully) in the
    // session's SDP description:
    unsigned configLen;
    unsigned char* configData
      = parseGeneralConfigStr(subsession->fmtp_config(), configLen);
    sh_video->bih = bih = insertVideoExtradata(bih, configData, configLen);
    needVideoFrameRate(demuxer, subsession);
  } else if (strcmp(subsession->codecName(), "X-QT") == 0 ||
	     strcmp(subsession->codecName(), "X-QUICKTIME") == 0) {
    // QuickTime generic RTP format, as described in
    // http://developer.apple.com/quicktime/icefloe/dispatch026.html

    // We can't initialize this stream until we've received the first packet
    // that has QuickTime "sdAtom" information in the header.  So, keep
    // reading packets until we get one:
    unsigned char* packetData; unsigned packetDataLen; float pts;
    QuickTimeGenericRTPSource* qtRTPSource
      = (QuickTimeGenericRTPSource*)(subsession->rtpSource());
    unsigned fourcc;
    do {
      if (!awaitRTPPacket(demuxer, demuxer->video,
			  packetData, packetDataLen, pts)) {
	return;
      }
    } while (!parseQTState_video(qtRTPSource->qtState, fourcc));

    bih->biCompression = sh_video->format = fourcc;
    bih->biWidth = qtRTPSource->qtState.width;
    bih->biHeight = qtRTPSource->qtState.height;
      uint8_t *pos = (uint8_t*)qtRTPSource->qtState.sdAtom + 86;
      uint8_t *endpos = (uint8_t*)qtRTPSource->qtState.sdAtom
                        + qtRTPSource->qtState.sdAtomSize;
      while (pos+8 < endpos) {
        unsigned atomLength = pos[0]<<24 | pos[1]<<16 | pos[2]<<8 | pos[3];
        if (atomLength == 0 || atomLength > endpos-pos) break;
        if ((!memcmp(pos+4, "avcC", 4) && fourcc==mmioFOURCC('a','v','c','1') || 
             !memcmp(pos+4, "esds", 4) || 
             !memcmp(pos+4, "SMI ", 4) && fourcc==mmioFOURCC('S','V','Q','3')) &&
            atomLength > 8) {
          sh_video->bih = bih = 
              insertVideoExtradata(bih, pos+8, atomLength-8);
          break;
        }
        pos += atomLength;
      }
    needVideoFrameRate(demuxer, subsession);
  } else {
    fprintf(stderr,
	    "Unknown MPlayer format code for MIME type \"video/%s\"\n",
	    subsession->codecName());
  }
}
void rtpCodecInitialize_audio(demuxer_t* demuxer,
			      MediaSubsession* subsession,
			      unsigned& flags) {
  flags = 0;
  // Create a dummy audio stream header
  // to make the main MPlayer code happy:
  sh_audio_t* sh_audio = new_sh_audio(demuxer,0);
  WAVEFORMATEX* wf = (WAVEFORMATEX*)calloc(1,sizeof(WAVEFORMATEX));
  sh_audio->wf = wf;
  demux_stream_t* d_audio = demuxer->audio;
  d_audio->sh = sh_audio; sh_audio->ds = d_audio;
  
  wf->nChannels = subsession->numChannels();

  // Map known audio MIME types to the WAVEFORMATEX parameters
  // that this program uses.  (Note that not all types need all
  // of the parameters to be set.)
  wf->nSamplesPerSec
    = subsession->rtpSource()->timestampFrequency(); // by default
  if (strcmp(subsession->codecName(), "MPA") == 0 ||
      strcmp(subsession->codecName(), "MPA-ROBUST") == 0 ||
      strcmp(subsession->codecName(), "X-MP3-DRAFT-00") == 0) {
    wf->wFormatTag = sh_audio->format = 0x55;
    // Note: 0x55 is for layer III, but should work for I,II also
    wf->nSamplesPerSec = 0; // sample rate is deduced from the data
  } else if (strcmp(subsession->codecName(), "AC3") == 0) {
    wf->wFormatTag = sh_audio->format = 0x2000;
    wf->nSamplesPerSec = 0; // sample rate is deduced from the data
  } else if (strcmp(subsession->codecName(), "L16") == 0) {
    wf->wFormatTag = sh_audio->format = 0x736f7774; // "twos"
    wf->nBlockAlign = 1;
    wf->wBitsPerSample = 16;
    wf->cbSize = 0;
  } else if (strcmp(subsession->codecName(), "L8") == 0) {
    wf->wFormatTag = sh_audio->format = 0x20776172; // "raw "
    wf->nBlockAlign = 1;
    wf->wBitsPerSample = 8;
    wf->cbSize = 0;
  } else if (strcmp(subsession->codecName(), "PCMU") == 0) {
    wf->wFormatTag = sh_audio->format = 0x7;
    wf->nAvgBytesPerSec = 8000;
    wf->nBlockAlign = 1;
    wf->wBitsPerSample = 8;
    wf->cbSize = 0;
  } else if (strcmp(subsession->codecName(), "PCMA") == 0) {
    wf->wFormatTag = sh_audio->format = 0x6;
    wf->nAvgBytesPerSec = 8000;
    wf->nBlockAlign = 1;
    wf->wBitsPerSample = 8;
    wf->cbSize = 0;
  } else if (strcmp(subsession->codecName(), "AMR") == 0) {
    wf->wFormatTag = sh_audio->format = mmioFOURCC('s','a','m','r');
  } else if (strcmp(subsession->codecName(), "AMR-WB") == 0) {
    wf->wFormatTag = sh_audio->format = mmioFOURCC('s','a','w','b');
  } else if (strcmp(subsession->codecName(), "GSM") == 0) {
    wf->wFormatTag = sh_audio->format = mmioFOURCC('a','g','s','m');
    wf->nAvgBytesPerSec = 1650;
    wf->nBlockAlign = 33;
    wf->wBitsPerSample = 16;
    wf->cbSize = 0;
  } else if (strcmp(subsession->codecName(), "QCELP") == 0) {
    wf->wFormatTag = sh_audio->format = mmioFOURCC('Q','c','l','p');
    wf->nAvgBytesPerSec = 1750;
    wf->nBlockAlign = 35;
    wf->wBitsPerSample = 16;
    wf->cbSize = 0;
  } else if (strcmp(subsession->codecName(), "MP4A-LATM") == 0) {
    wf->wFormatTag = sh_audio->format = mmioFOURCC('m','p','4','a');
    // For the codec to work correctly, it needs "AudioSpecificConfig"
    // data, which is parsed from the "StreamMuxConfig" string that
    // was present (hopefully) in the SDP description:
    unsigned codecdata_len;
    sh_audio->codecdata
      = parseStreamMuxConfigStr(subsession->fmtp_config(),
				codecdata_len);
    sh_audio->codecdata_len = codecdata_len;
    //faad doesn't understand LATM's data length field, so omit it
    ((MPEG4LATMAudioRTPSource*)subsession->rtpSource())->omitLATMDataLengthField();
  } else if (strcmp(subsession->codecName(), "MPEG4-GENERIC") == 0) {
    wf->wFormatTag = sh_audio->format = mmioFOURCC('m','p','4','a');
    // For the codec to work correctly, it needs "AudioSpecificConfig"
    // data, which was present (hopefully) in the SDP description:
    unsigned codecdata_len;
    sh_audio->codecdata
      = parseGeneralConfigStr(subsession->fmtp_config(),
			      codecdata_len);
    sh_audio->codecdata_len = codecdata_len;
  } else if (strcmp(subsession->codecName(), "X-QT") == 0 ||
	     strcmp(subsession->codecName(), "X-QUICKTIME") == 0) {
    // QuickTime generic RTP format, as described in
    // http://developer.apple.com/quicktime/icefloe/dispatch026.html

    // We can't initialize this stream until we've received the first packet
    // that has QuickTime "sdAtom" information in the header.  So, keep
    // reading packets until we get one:
    unsigned char* packetData; unsigned packetDataLen; float pts;
    QuickTimeGenericRTPSource* qtRTPSource
      = (QuickTimeGenericRTPSource*)(subsession->rtpSource());
    unsigned fourcc, numChannels;
    do {
      if (!awaitRTPPacket(demuxer, demuxer->audio,
			  packetData, packetDataLen, pts)) {
	return;
      }
    } while (!parseQTState_audio(qtRTPSource->qtState, fourcc, numChannels));

    wf->wFormatTag = sh_audio->format = fourcc;
    wf->nChannels = numChannels;

    uint8_t *pos = (uint8_t*)qtRTPSource->qtState.sdAtom + 52;
    uint8_t *endpos = (uint8_t*)qtRTPSource->qtState.sdAtom
                      + qtRTPSource->qtState.sdAtomSize;
    while (pos+8 < endpos) {
      unsigned atomLength = pos[0]<<24 | pos[1]<<16 | pos[2]<<8 | pos[3];
      if (atomLength == 0 || atomLength > endpos-pos) break;
      if (!memcmp(pos+4, "wave", 4) && fourcc==mmioFOURCC('Q','D','M','2') &&
          atomLength > 8 &&
          atomLength <= INT_MAX) {
        sh_audio->codecdata = (unsigned char*) malloc(atomLength-8);
        if (sh_audio->codecdata) {
          memcpy(sh_audio->codecdata, pos+8, atomLength-8);
          sh_audio->codecdata_len = atomLength-8;
        }
        break;
      }
      pos += atomLength;
    }
  } else {
    fprintf(stderr,
	    "Unknown MPlayer format code for MIME type \"audio/%s\"\n",
	    subsession->codecName());
  }
}
bool MtkRTSPClient::handSetup(char* resultString)
{
	CHECK_NULL_COND(session, false); 
	CHECK_NULL_COND(rtsp::env, false);

	bool bSuccess = false;
	
	// Then, setup the "RTPSource"s for the session:
	MediaSubsessionIterator iter(*(session));
	MediaSubsession *subsession = NULL;
	while ((subsession = iter.next()) != NULL) 
	{					
		if (subsession->readSource() == NULL) 
		{
			LOG_ERR("warning");
			continue; // was not initiated
		}

		if (subsession->sink != NULL)/*already be set*/
		{
			continue;
		}

		unsigned int type = getBufType(subsession);
		if (type == 0)
		{
			LOG_ERR("error type=%d", type);
			continue;
		}
		
		{
			iSetupCount--;
			/*set mediay info*/
			setMediaInfo(subsession, type);
		}

		CmpbSink *sink = NULL;
		if ((type != mediatype_audio) && (strcmp(subsession->codecName(), "H264") == 0))
		{
			sink = CmpbH264Sink::createNew(*env, *subsession, type, fileSinkBufferSize);
		}
        else if ((type == mediatype_audio) && 
                    ((stMediaInfo.audioCodec == MEDIACODEC_AC3) || 
                     (stMediaInfo.audioCodec == MEDIACODEC_EAC3) ||
                     (stMediaInfo.audioCodec == MEDIACODEC_MPEG4_GENERIC)))
		{
			sink = CmpbAACSink::createNew(*env, *subsession, type, fileSinkBufferSize);
		}
        else if ((type == mediatype_audio) && (stMediaInfo.audioCodec == MEDIACODEC_MP4A_LATM))
		{
			sink = CmpbLATMSink::createNew(*env, *subsession, type, fileSinkBufferSize);
		}
		else
		{
			sink = CmpbSink::createNew(*env, *subsession, type, fileSinkBufferSize);
		}
		subsession->sink = sink;
		if (subsession->sink == NULL) 
		{
			LOG_ERR("error!"); 
		} 
		else 
		{		
#if 0 /*this should be remove to cmpb sink*/           
			if ((type != mediatype_audio) && (strcmp(subsession->codecName(), "MP4V-ES") == 0)
				&& (subsession->fmtp_config() != NULL)) 
			{
			    // For MPEG-4 video RTP streams, the 'config' information
			    // from the SDP description contains useful VOL etc. headers.
			    // Insert this data at the front of the output file:
			    unsigned configLen;
			    unsigned char* configData
			      = parseGeneralConfigStr(subsession->fmtp_config(), configLen);
			    struct timeval timeNow;
			    gettimeofday(&timeNow, NULL);
			    sink->sendData(configData, configLen, timeNow);
			    delete[] configData;
		  	}
#endif			
			subsession->sink->startPlaying(*(subsession->readSource()),
												subsessionAfterPlaying,
													subsession);
			// Also set a handler to be called if a RTCP "BYE" arrives
			// for this subsession:
			if (subsession->rtcpInstance() != NULL) 
			{
				subsession->rtcpInstance()->setByeHandler(subsessionAfterPlaying, subsession);
			}

			bSuccess = true;
		}

		break;

	}

	if (iSetupCount == 0)
	{
		mediaInfoReady(); 
	}

	return bSuccess ;
}
Beispiel #7
0
void createOutputFiles(char const* periodicFilenameSuffix) {
  char outFileName[1000];

  if (outputQuickTimeFile || outputAVIFile) {
    if (periodicFilenameSuffix[0] == '\0') {
      // Normally (unless the '-P <interval-in-seconds>' option was given) we output to 'stdout':
      sprintf(outFileName, "stdout");
    } else {
      // Otherwise output to a type-specific file name, containing "periodicFilenameSuffix":
      char const* prefix = fileNamePrefix[0] == '\0' ? "output" : fileNamePrefix;
      snprintf(outFileName, sizeof outFileName, "%s%s.%s", prefix, periodicFilenameSuffix,
	       outputAVIFile ? "avi" : generateMP4Format ? "mp4" : "mov");
    }

    if (outputQuickTimeFile) {
      qtOut = QuickTimeFileSink::createNew(*env, *session, outFileName,
					   fileSinkBufferSize,
					   movieWidth, movieHeight,
					   movieFPS,
					   packetLossCompensate,
					   syncStreams,
					   generateHintTracks,
					   generateMP4Format);
      if (qtOut == NULL) {
	*env << "Failed to create a \"QuickTimeFileSink\" for outputting to \""
	     << outFileName << "\": " << env->getResultMsg() << "\n";
	shutdown();
      } else {
	*env << "Outputting to the file: \"" << outFileName << "\"\n";
      }
      
      qtOut->startPlaying(sessionAfterPlaying, NULL);
    } else { // outputAVIFile
      aviOut = AVIFileSink::createNew(*env, *session, outFileName,
				      fileSinkBufferSize,
				      movieWidth, movieHeight,
				      movieFPS,
				      packetLossCompensate);
      if (aviOut == NULL) {
	*env << "Failed to create an \"AVIFileSink\" for outputting to \""
	     << outFileName << "\": " << env->getResultMsg() << "\n";
	shutdown();
      } else {
	*env << "Outputting to the file: \"" << outFileName << "\"\n";
      }
      
      aviOut->startPlaying(sessionAfterPlaying, NULL);
    }
  } else {
    // Create and start "FileSink"s for each subsession:
    madeProgress = False;
    MediaSubsessionIterator iter(*session);
    while ((subsession = iter.next()) != NULL) {
      if (subsession->readSource() == NULL) continue; // was not initiated
      
      // Create an output file for each desired stream:
      if (singleMedium == NULL || periodicFilenameSuffix[0] != '\0') {
	// Output file name is
	//     "<filename-prefix><medium_name>-<codec_name>-<counter><periodicFilenameSuffix>"
	static unsigned streamCounter = 0;
	snprintf(outFileName, sizeof outFileName, "%s%s-%s-%d%s",
		 fileNamePrefix, subsession->mediumName(),
		 subsession->codecName(), ++streamCounter, periodicFilenameSuffix);
      } else {
	// When outputting a single medium only, we output to 'stdout
	// (unless the '-P <interval-in-seconds>' option was given):
	sprintf(outFileName, "stdout");
      }
      FileSink* fileSink;
      if (strcmp(subsession->mediumName(), "audio") == 0 &&
	  (strcmp(subsession->codecName(), "AMR") == 0 ||
	   strcmp(subsession->codecName(), "AMR-WB") == 0)) {
	// For AMR audio streams, we use a special sink that inserts AMR frame hdrs:
	fileSink = AMRAudioFileSink::createNew(*env, outFileName,
					       fileSinkBufferSize, oneFilePerFrame);
      } else if (strcmp(subsession->mediumName(), "video") == 0 &&
		 (strcmp(subsession->codecName(), "H264") == 0)) {
	// For H.264 video stream, we use a special sink that adds 'start codes',
	// and (at the start) the SPS and PPS NAL units:
	fileSink = H264VideoFileSink::createNew(*env, outFileName,
						subsession->fmtp_spropparametersets(),
						fileSinkBufferSize, oneFilePerFrame);
      } else if (strcmp(subsession->mediumName(), "video") == 0 &&
		 (strcmp(subsession->codecName(), "H265") == 0)) {
	// For H.265 video stream, we use a special sink that adds 'start codes',
	// and (at the start) the VPS, SPS, and PPS NAL units:
	fileSink = H265VideoFileSink::createNew(*env, outFileName,
						subsession->fmtp_spropvps(),
						subsession->fmtp_spropsps(),
						subsession->fmtp_sproppps(),
						fileSinkBufferSize, oneFilePerFrame);
      } else {
	// Normal case:
	fileSink = FileSink::createNew(*env, outFileName,
				       fileSinkBufferSize, oneFilePerFrame);
      }
      subsession->sink = fileSink;
      if (subsession->sink == NULL) {
	*env << "Failed to create FileSink for \"" << outFileName
	     << "\": " << env->getResultMsg() << "\n";
      } else {
	if (singleMedium == NULL) {
	  *env << "Created output file: \"" << outFileName << "\"\n";
	} else {
	  *env << "Outputting data from the \"" << subsession->mediumName()
	       << "/" << subsession->codecName()
	       << "\" subsession to \"" << outFileName << "\"\n";
	}
	
	if (strcmp(subsession->mediumName(), "video") == 0 &&
	    strcmp(subsession->codecName(), "MP4V-ES") == 0 &&
	    subsession->fmtp_config() != NULL) {
	  // For MPEG-4 video RTP streams, the 'config' information
	  // from the SDP description contains useful VOL etc. headers.
	  // Insert this data at the front of the output file:
	  unsigned configLen;
	  unsigned char* configData
	    = parseGeneralConfigStr(subsession->fmtp_config(), configLen);
	  struct timeval timeNow;
	  gettimeofday(&timeNow, NULL);
	  fileSink->addData(configData, configLen, timeNow);
	  delete[] configData;
	}
	
	subsession->sink->startPlaying(*(subsession->readSource()),
				       subsessionAfterPlaying,
				       subsession);
	
	// Also set a handler to be called if a RTCP "BYE" arrives
	// for this subsession:
	if (subsession->rtcpInstance() != NULL) {
	  subsession->rtcpInstance()->setByeHandler(subsessionByeHandler, subsession);
	}
	
	madeProgress = True;
      }
    }
    if (!madeProgress) shutdown();
  }
}
Beispiel #8
0
int main(int argc, char** argv) {
  // Begin by setting up our usage environment:
  TaskScheduler* scheduler = BasicTaskScheduler::createNew();
  env = BasicUsageEnvironment::createNew(*scheduler);

  progName = argv[0];

  gettimeofday(&startTime, NULL);

#ifdef USE_SIGNALS
  // Allow ourselves to be shut down gracefully by a SIGHUP or a SIGUSR1:
  signal(SIGHUP, signalHandlerShutdown);
  signal(SIGUSR1, signalHandlerShutdown);
#endif

  unsigned short desiredPortNum = 0;

  // unfortunately we can't use getopt() here, as Windoze doesn't have it
  while (argc > 2) {
    char* const opt = argv[1];
    if (opt[0] != '-') usage();
    switch (opt[1]) {

    case 'p': { // specify start port number
      int portArg;
      if (sscanf(argv[2], "%d", &portArg) != 1) {
	usage();
      }
      if (portArg <= 0 || portArg >= 65536 || portArg&1) {
	*env << "bad port number: " << portArg
		<< " (must be even, and in the range (0,65536))\n";
	usage();
      }
      desiredPortNum = (unsigned short)portArg;
      ++argv; --argc;
      break;
    }

    case 'r': { // do not receive data (instead, just 'play' the stream(s))
      createReceivers = False;
      break;
    }

    case 'q': { // output a QuickTime file (to stdout)
      outputQuickTimeFile = True;
      break;
    }

    case '4': { // output a 'mp4'-format file (to stdout)
      outputQuickTimeFile = True;
      generateMP4Format = True;
      break;
    }

    case 'i': { // output an AVI file (to stdout)
      outputAVIFile = True;
      break;
    }

    case 'I': { // specify input interface...
      NetAddressList addresses(argv[2]);
      if (addresses.numAddresses() == 0) {
	*env << "Failed to find network address for \"" << argv[2] << "\"";
	break;
      }
      ReceivingInterfaceAddr = *(unsigned*)(addresses.firstAddress()->data());
      ++argv; --argc;
      break;
    }

    case 'a': { // receive/record an audio stream only
      audioOnly = True;
      singleMedium = "audio";
      break;
    }

    case 'v': { // receive/record a video stream only
      videoOnly = True;
      singleMedium = "video";
      break;
    }

    case 'V': { // disable verbose output
      verbosityLevel = 0;
      break;
    }

    case 'd': { // specify duration, or how much to delay after end time
      float arg;
      if (sscanf(argv[2], "%g", &arg) != 1) {
	usage();
      }
      if (argv[2][0] == '-') { // not "arg<0", in case argv[2] was "-0"
	// a 'negative' argument was specified; use this for "durationSlop":
	duration = 0; // use whatever's in the SDP
	durationSlop = -arg;
      } else {
	duration = arg;
	durationSlop = 0;
      }
      ++argv; --argc;
      break;
    }

    case 'D': { // specify maximum number of seconds to wait for packets:
      if (sscanf(argv[2], "%u", &interPacketGapMaxTime) != 1) {
	usage();
      }
      ++argv; --argc;
      break;
    }

    case 'c': { // play continuously
      playContinuously = True;
      break;
    }

    case 'S': { // specify an offset to use with "SimpleRTPSource"s
      if (sscanf(argv[2], "%d", &simpleRTPoffsetArg) != 1) {
	usage();
      }
      if (simpleRTPoffsetArg < 0) {
	*env << "offset argument to \"-S\" must be >= 0\n";
	usage();
      }
      ++argv; --argc;
      break;
    }

    case 'O': { // Don't send an "OPTIONS" request before "DESCRIBE"
      sendOptionsRequest = False;
      break;
    }

    case 'o': { // Send only the "OPTIONS" request to the server
      sendOptionsRequestOnly = True;
      break;
    }

    case 'm': { // output multiple files - one for each frame
      oneFilePerFrame = True;
      break;
    }

    case 'n': { // notify the user when the first data packet arrives
      notifyOnPacketArrival = True;
      break;
    }

    case 't': {
      // stream RTP and RTCP over the TCP 'control' connection
      if (controlConnectionUsesTCP) {
	streamUsingTCP = True;
      } else {
	usage();
      }
      break;
    }

    case 'T': {
      // stream RTP and RTCP over a HTTP connection
      if (controlConnectionUsesTCP) {
	if (argc > 3 && argv[2][0] != '-') {
	  // The next argument is the HTTP server port number:
	  if (sscanf(argv[2], "%hu", &tunnelOverHTTPPortNum) == 1
	      && tunnelOverHTTPPortNum > 0) {
	    ++argv; --argc;
	    break;
	  }
	}
      }

      // If we get here, the option was specified incorrectly:
      usage();
      break;
    }

    case 'u': { // specify a username and password
      username = argv[2];
      password = argv[3];
      argv+=2; argc-=2;
      if (allowProxyServers && argc > 3 && argv[2][0] != '-') {
	// The next argument is the name of a proxy server:
	proxyServerName = argv[2];
	++argv; --argc;

	if (argc > 3 && argv[2][0] != '-') {
	  // The next argument is the proxy server port number:
	  if (sscanf(argv[2], "%hu", &proxyServerPortNum) != 1) {
	    usage();
	  }
	  ++argv; --argc;
	}
      }
      break;
    }

    case 'A': { // specify a desired audio RTP payload format
      unsigned formatArg;
      if (sscanf(argv[2], "%u", &formatArg) != 1
	  || formatArg >= 96) {
	usage();
      }
      desiredAudioRTPPayloadFormat = (unsigned char)formatArg;
      ++argv; --argc;
      break;
    }

    case 'M': { // specify a MIME subtype for a dynamic RTP payload type
      mimeSubtype = argv[2];
      if (desiredAudioRTPPayloadFormat==0) desiredAudioRTPPayloadFormat =96;
      ++argv; --argc;
      break;
    }

    case 'w': { // specify a width (pixels) for an output QuickTime or AVI movie
      if (sscanf(argv[2], "%hu", &movieWidth) != 1) {
	usage();
      }
      movieWidthOptionSet = True;
      ++argv; --argc;
      break;
    }

    case 'h': { // specify a height (pixels) for an output QuickTime or AVI movie
      if (sscanf(argv[2], "%hu", &movieHeight) != 1) {
	usage();
      }
      movieHeightOptionSet = True;
      ++argv; --argc;
      break;
    }

    case 'f': { // specify a frame rate (per second) for an output QT or AVI movie
      if (sscanf(argv[2], "%u", &movieFPS) != 1) {
	usage();
      }
      movieFPSOptionSet = True;
      ++argv; --argc;
      break;
    }

    case 'F': { // specify a prefix for the audio and video output files
      fileNamePrefix = argv[2];
      ++argv; --argc;
      break;
    }

    case 'b': { // specify the size of buffers for "FileSink"s
      if (sscanf(argv[2], "%u", &fileSinkBufferSize) != 1) {
	usage();
      }
      ++argv; --argc;
      break;
    }

    case 'B': { // specify the size of input socket buffers
      if (sscanf(argv[2], "%u", &socketInputBufferSize) != 1) {
	usage();
      }
      ++argv; --argc;
      break;
    }

    // Note: The following option is deprecated, and may someday be removed:
    case 'l': { // try to compensate for packet loss by repeating frames
      packetLossCompensate = True;
      break;
    }

    case 'y': { // synchronize audio and video streams
      syncStreams = True;
      break;
    }

    case 'H': { // generate hint tracks (as well as the regular data tracks)
      generateHintTracks = True;
      break;
    }

    case 'Q': { // output QOS measurements
      qosMeasurementIntervalMS = 1000; // default: 1 second

      if (argc > 3 && argv[2][0] != '-') {
	// The next argument is the measurement interval,
	// in multiples of 100 ms
	if (sscanf(argv[2], "%u", &qosMeasurementIntervalMS) != 1) {
	  usage();
	}
	qosMeasurementIntervalMS *= 100;
	++argv; --argc;
      }
      break;
    }

    case 's': { // specify initial seek time (trick play)
      double arg;
      if (sscanf(argv[2], "%lg", &arg) != 1 || arg < 0) {
	usage();
      }
      initialSeekTime = arg;
      ++argv; --argc;
      break;
    }

    case 'z': { // scale (trick play)
      float arg;
      if (sscanf(argv[2], "%g", &arg) != 1 || arg == 0.0f) {
	usage();
      }
      scale = arg;
      ++argv; --argc;
      break;
    }

    default: {
      usage();
      break;
    }
    }

    ++argv; --argc;
  }
  if (argc != 2) usage();
  if (outputQuickTimeFile && outputAVIFile) {
    *env << "The -i and -q (or -4) flags cannot both be used!\n";
    usage();
  }
  Boolean outputCompositeFile = outputQuickTimeFile || outputAVIFile;
  if (!createReceivers && outputCompositeFile) {
    *env << "The -r and -q (or -4 or -i) flags cannot both be used!\n";
    usage();
  }
  if (outputCompositeFile && !movieWidthOptionSet) {
    *env << "Warning: The -q, -4 or -i option was used, but not -w.  Assuming a video width of "
	 << movieWidth << " pixels\n";
  }
  if (outputCompositeFile && !movieHeightOptionSet) {
    *env << "Warning: The -q, -4 or -i option was used, but not -h.  Assuming a video height of "
	 << movieHeight << " pixels\n";
  }
  if (outputCompositeFile && !movieFPSOptionSet) {
    *env << "Warning: The -q, -4 or -i option was used, but not -f.  Assuming a video frame rate of "
	 << movieFPS << " frames-per-second\n";
  }
  if (audioOnly && videoOnly) {
    *env << "The -a and -v flags cannot both be used!\n";
    usage();
  }
  if (sendOptionsRequestOnly && !sendOptionsRequest) {
    *env << "The -o and -O flags cannot both be used!\n";
    usage();
  }
  if (tunnelOverHTTPPortNum > 0) {
    if (streamUsingTCP) {
      *env << "The -t and -T flags cannot both be used!\n";
      usage();
    } else {
      streamUsingTCP = True;
    }
  }
  if (!createReceivers && notifyOnPacketArrival) {
    *env << "Warning: Because we're not receiving stream data, the -n flag has no effect\n";
  }
  if (durationSlop < 0) {
    // This parameter wasn't set, so use a default value.
    // If we're measuring QOS stats, then don't add any slop, to avoid
    // having 'empty' measurement intervals at the end.
    durationSlop = qosMeasurementIntervalMS > 0 ? 0.0 : 5.0;
  }

  char* url = argv[1];

  // Create our client object:
  ourClient = createClient(*env, verbosityLevel, progName);
  if (ourClient == NULL) {
    *env << "Failed to create " << clientProtocolName
		<< " client: " << env->getResultMsg() << "\n";
    shutdown();
  }

  if (sendOptionsRequest) {
    // Begin by sending an "OPTIONS" command:
    char* optionsResponse
      = getOptionsResponse(ourClient, url, username, password);
    if (sendOptionsRequestOnly) {
      if (optionsResponse == NULL) {
	*env << clientProtocolName << " \"OPTIONS\" request failed: "
	     << env->getResultMsg() << "\n";
      } else {
	*env << clientProtocolName << " \"OPTIONS\" request returned: "
	     << optionsResponse << "\n";
      }
      shutdown();
    }
    delete[] optionsResponse;
  }

  // Open the URL, to get a SDP description:
  char* sdpDescription
    = getSDPDescriptionFromURL(ourClient, url, username, password,
			       proxyServerName, proxyServerPortNum,
			       desiredPortNum);
  if (sdpDescription == NULL) {
    *env << "Failed to get a SDP description from URL \"" << url
		<< "\": " << env->getResultMsg() << "\n";
    shutdown();
  }

  *env << "Opened URL \"" << url
	  << "\", returning a SDP description:\n" << sdpDescription << "\n";

  // Create a media session object from this SDP description:
  session = MediaSession::createNew(*env, sdpDescription);
  delete[] sdpDescription;
  if (session == NULL) {
    *env << "Failed to create a MediaSession object from the SDP description: " << env->getResultMsg() << "\n";
    shutdown();
  } else if (!session->hasSubsessions()) {
    *env << "This session has no media subsessions (i.e., \"m=\" lines)\n";
    shutdown();
  }

  // Then, setup the "RTPSource"s for the session:
  MediaSubsessionIterator iter(*session);
  MediaSubsession *subsession;
  Boolean madeProgress = False;
  char const* singleMediumToTest = singleMedium;
  while ((subsession = iter.next()) != NULL) {
    // If we've asked to receive only a single medium, then check this now:
    if (singleMediumToTest != NULL) {
      if (strcmp(subsession->mediumName(), singleMediumToTest) != 0) {
		  *env << "Ignoring \"" << subsession->mediumName()
			  << "/" << subsession->codecName()
			  << "\" subsession, because we've asked to receive a single " << singleMedium
			  << " session only\n";
	continue;
      } else {
	// Receive this subsession only
	singleMediumToTest = "xxxxx";
	    // this hack ensures that we get only 1 subsession of this type
      }
    }

    if (desiredPortNum != 0) {
      subsession->setClientPortNum(desiredPortNum);
      desiredPortNum += 2;
    }

    if (createReceivers) {
      if (!subsession->initiate(simpleRTPoffsetArg)) {
	*env << "Unable to create receiver for \"" << subsession->mediumName()
	     << "/" << subsession->codecName()
	     << "\" subsession: " << env->getResultMsg() << "\n";
      } else {
	*env << "Created receiver for \"" << subsession->mediumName()
	     << "/" << subsession->codecName()
	     << "\" subsession (client ports " << subsession->clientPortNum()
	     << "-" << subsession->clientPortNum()+1 << ")\n";
	madeProgress = True;
	
	if (subsession->rtpSource() != NULL) {
	  // Because we're saving the incoming data, rather than playing
	  // it in real time, allow an especially large time threshold
	  // (1 second) for reordering misordered incoming packets:
	  unsigned const thresh = 1000000; // 1 second
	  subsession->rtpSource()->setPacketReorderingThresholdTime(thresh);
	  
	  // Set the RTP source's OS socket buffer size as appropriate - either if we were explicitly asked (using -B),
	  // or if the desired FileSink buffer size happens to be larger than the current OS socket buffer size.
	  // (The latter case is a heuristic, on the assumption that if the user asked for a large FileSink buffer size,
	  // then the input data rate may be large enough to justify increasing the OS socket buffer size also.)
	  int socketNum = subsession->rtpSource()->RTPgs()->socketNum();
	  unsigned curBufferSize = getReceiveBufferSize(*env, socketNum);
	  if (socketInputBufferSize > 0 || fileSinkBufferSize > curBufferSize) {
	    unsigned newBufferSize = socketInputBufferSize > 0 ? socketInputBufferSize : fileSinkBufferSize;
	    newBufferSize = setReceiveBufferTo(*env, socketNum, newBufferSize);
	    if (socketInputBufferSize > 0) { // The user explicitly asked for the new socket buffer size; announce it:
	      *env << "Changed socket receive buffer size for the \""
		   << subsession->mediumName()
		   << "/" << subsession->codecName()
		   << "\" subsession from "
		   << curBufferSize << " to "
		   << newBufferSize << " bytes\n";
	    }
	  }
	}
      }
    } else {
      if (subsession->clientPortNum() == 0) {
	*env << "No client port was specified for the \""
	     << subsession->mediumName()
	     << "/" << subsession->codecName()
	     << "\" subsession.  (Try adding the \"-p <portNum>\" option.)\n";
      } else {
		madeProgress = True;
      }
    }
  }
  if (!madeProgress) shutdown();

  // Perform additional 'setup' on each subsession, before playing them:
  setupStreams();

  // Create output files:
  if (createReceivers) {
    if (outputQuickTimeFile) {
      // Create a "QuickTimeFileSink", to write to 'stdout':
      qtOut = QuickTimeFileSink::createNew(*env, *session, "stdout",
					   fileSinkBufferSize,
					   movieWidth, movieHeight,
					   movieFPS,
					   packetLossCompensate,
					   syncStreams,
					   generateHintTracks,
					   generateMP4Format);
      if (qtOut == NULL) {
	*env << "Failed to create QuickTime file sink for stdout: " << env->getResultMsg();
	shutdown();
      }

      qtOut->startPlaying(sessionAfterPlaying, NULL);
    } else if (outputAVIFile) {
      // Create an "AVIFileSink", to write to 'stdout':
      aviOut = AVIFileSink::createNew(*env, *session, "stdout",
				      fileSinkBufferSize,
				      movieWidth, movieHeight,
				      movieFPS,
				      packetLossCompensate);
      if (aviOut == NULL) {
	*env << "Failed to create AVI file sink for stdout: " << env->getResultMsg();
	shutdown();
      }

      aviOut->startPlaying(sessionAfterPlaying, NULL);
    } else {
      // Create and start "FileSink"s for each subsession:
      madeProgress = False;
      iter.reset();
      while ((subsession = iter.next()) != NULL) {
	if (subsession->readSource() == NULL) continue; // was not initiated

	// Create an output file for each desired stream:
	char outFileName[1000];
	if (singleMedium == NULL) {
	  // Output file name is
	  //     "<filename-prefix><medium_name>-<codec_name>-<counter>"
	  static unsigned streamCounter = 0;
	  snprintf(outFileName, sizeof outFileName, "%s%s-%s-%d",
		   fileNamePrefix, subsession->mediumName(),
		   subsession->codecName(), ++streamCounter);
	} else {
	  sprintf(outFileName, "stdout");
	}
	FileSink* fileSink;
	if (strcmp(subsession->mediumName(), "audio") == 0 &&
	    (strcmp(subsession->codecName(), "AMR") == 0 ||
	     strcmp(subsession->codecName(), "AMR-WB") == 0)) {
	  // For AMR audio streams, we use a special sink that inserts AMR frame hdrs:
	  fileSink = AMRAudioFileSink::createNew(*env, outFileName,
						 fileSinkBufferSize, oneFilePerFrame);
	} else if (strcmp(subsession->mediumName(), "video") == 0 &&
	    (strcmp(subsession->codecName(), "H264") == 0)) {
	  // For H.264 video stream, we use a special sink that insert start_codes:
	  fileSink = H264VideoFileSink::createNew(*env, outFileName,
						 fileSinkBufferSize, oneFilePerFrame);
	} else {
	  // Normal case:
	  fileSink = FileSink::createNew(*env, outFileName,
					 fileSinkBufferSize, oneFilePerFrame);
	}
	subsession->sink = fileSink;
	if (subsession->sink == NULL) {
	  *env << "Failed to create FileSink for \"" << outFileName
		  << "\": " << env->getResultMsg() << "\n";
	} else {
	  if (singleMedium == NULL) {
	    *env << "Created output file: \"" << outFileName << "\"\n";
	  } else {
	    *env << "Outputting data from the \"" << subsession->mediumName()
			<< "/" << subsession->codecName()
			<< "\" subsession to 'stdout'\n";
	  }

	  if (strcmp(subsession->mediumName(), "video") == 0 &&
	      strcmp(subsession->codecName(), "MP4V-ES") == 0 &&
	      subsession->fmtp_config() != NULL) {
	    // For MPEG-4 video RTP streams, the 'config' information
	    // from the SDP description contains useful VOL etc. headers.
	    // Insert this data at the front of the output file:
	    unsigned configLen;
	    unsigned char* configData
	      = parseGeneralConfigStr(subsession->fmtp_config(), configLen);
	    struct timeval timeNow;
	    gettimeofday(&timeNow, NULL);
	    fileSink->addData(configData, configLen, timeNow);
	    delete[] configData;
	  }

	  subsession->sink->startPlaying(*(subsession->readSource()),
					 subsessionAfterPlaying,
					 subsession);

	  // Also set a handler to be called if a RTCP "BYE" arrives
	  // for this subsession:
	  if (subsession->rtcpInstance() != NULL) {
	    subsession->rtcpInstance()->setByeHandler(subsessionByeHandler,
						      subsession);
	  }

	  madeProgress = True;
	}
      }
      if (!madeProgress) shutdown();
    }
  }

  // Finally, start playing each subsession, to start the data flow:

  startPlayingStreams();

  env->taskScheduler().doEventLoop(); // does not return

  return 0; // only to prevent compiler warning
}
Beispiel #9
0
int CLvRtspClient::InitAudio( MediaSubsession *pss )
{_STT();

	if ( !m_pRtspClient )
	{	setLastError( -200, oexT( "Invalid rtsp client object" ) );
		return 0;
	} // end if

	if ( !pss )
	{	setLastError( -201, oexT( "Invalid video object" ) );
		return 0;
	} // end if

	// Create receiver for stream
	if ( !pss->initiate() )
	{	setLastError( -203, sqbind::oex2std( oexMks( oexT( "initiate() video stream failed : " ), oexT( " : " ), m_pEnv->getResultMsg() ) ) );
		return 0;
	} // end if

	if ( !pss->rtpSource() )
	{	setLastError( -204, sqbind::oex2std( oexMks( oexT( "RTP source is null : " ), oexT( " : " ), m_pEnv->getResultMsg() ) ) );
		return 0;
	} // end if

	if ( oex::CStr8( "MP4A-LATM" ) == pss->codecName() )
	{
		((MPEG4LATMAudioRTPSource*)pss->rtpSource())->omitLATMDataLengthField();

		const char *pCfg = pss->fmtp_config();
		if ( pCfg )
		{	unsigned elen = 0;
			unsigned char *pExtra = parseStreamMuxConfigStr( pCfg, elen );
			if ( pExtra && elen )
				m_extraAudio.AppendBuffer( (const char*)pExtra, elen );

		} // end if

	} // end if

	else if ( oex::CStr8( "MPEG4-GENERIC" ) == pss->codecName() )
	{
		const char *pCfg = pss->fmtp_config();
		if ( pCfg )
		{	unsigned elen = 0;
			unsigned char *pExtra = parseGeneralConfigStr( pCfg, elen );
			if ( pExtra && elen )
				m_extraAudio.AppendBuffer( (const char*)pExtra, elen );

		} // end if

	} // end if

	// Read extradata
    const char *props = pss->fmtp_spropparametersets();
    if ( props )
    {	oex::TList< oex::CStr8 > lst = oex::CParser::Explode( props, oexT( "," ) );
        for ( oex::TList< oex::CStr8 >::iterator it; lst.Next( it ); )
        {	m_extraAudio.AppendBuffer( "\x00\x00\x01", 3 );
            m_extraAudio.Mem().appendString( oex::CBase64::Decode( *it ) );
        } // end for
    } // end if

	// Set minimum rx buffer size
	if ( 2000000 > m_nRxBufferSize )
		m_nRxBufferSize = 2000000;

	// Set rx buffer size
	int sn = pss->rtpSource()->RTPgs()->socketNum();
	increaseReceiveBufferTo( *m_pEnv, sn, m_nRxBufferSize );

	pss->rtpSource()->setPacketReorderingThresholdTime( 2000000 );

	if ( pss->codecName() )
		m_sAudioCodec = oexMbToStrPtr( pss->codecName() );

	if ( !m_pRtspClient->setupMediaSubsession( *pss, False, False ) )
	{	setLastError( -205, sqbind::oex2std( oexMks( oexT( "setupMediaSubsession() failed : " ), oexT( " : " ), m_pEnv->getResultMsg() ) ) );
		return 0;
	} // end if

	// Save away important audio parameters
	m_nAudioNumChannels = pss->numChannels();
	m_nAudioRate = pss->rtpTimestampFrequency();
	m_nAudioBps = 0;

	m_pAs = new CAudioSink( *m_pEnv );
	if ( !m_pAs )
	{	setLastError( -206, sqbind::oex2std( oexMks( oexT( "CAudioSink::createNew() failed : " ), oexT( " : " ), m_pEnv->getResultMsg() ) ) );
		return 0;
	} // end if

	m_pAs->setDataEvent( &m_evtData );
	m_pAsPss = pss;

	return 1;
}
Beispiel #10
0
void DummySink::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes,
				  struct timeval presentationTime, unsigned /*durationInMicroseconds*/) {
  // We've just received a frame of data.  (Optionally) print out information about it:
	if(numTruncatedBytes > 0)
  {
  	printf("============== warnning, live555 truncate %d bytes =================\n", numTruncatedBytes);
  }
#ifdef DEBUG_PRINT_EACH_RECEIVED_FRAME
  if (fStreamId != NULL) envir() << "Stream \"" << fStreamId << "\"; ";
  envir() << fSubsession.mediumName() << "/" << fSubsession.codecName() << ":\tReceived " << frameSize << " bytes";
  if (numTruncatedBytes > 0) envir() << " (with " << numTruncatedBytes << " bytes truncated)";
  char uSecsStr[6+1]; // used to output the 'microseconds' part of the presentation time
  sprintf(uSecsStr, "%06u", (unsigned)presentationTime.tv_usec);
  envir() << ".\tPresentation time: " << (unsigned)presentationTime.tv_sec << "." << uSecsStr;
  if (fSubsession.rtpSource() != NULL && !fSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) {
    envir() << "!"; // mark the debugging output to indicate that this presentation time is not RTCP-synchronized
  }
  envir() << "\n";
#endif

  // 分辨率已经发生变化,不再上报数据
	if (_drop)
	{
		printf("############################### drop frame\n");
		return;
	}


  if(_cb)
  {
    RtspFrameInfo	info;
	info.videoFPS = fSubsession.videoFPS();
	info.videoWidth = fSubsession.videoWidth();
	info.videoHeight = fSubsession.videoHeight();
	info.frequency = fSubsession.rtpTimestampFrequency();
	info.channels = fSubsession.numChannels();
	info.profile_level_id = fSubsession.fmtp_profile_level_id();
	
	strncpy((char*)&(info.mediaName), fSubsession.mediumName(), sizeof(info.mediaName));
	strncpy((char*)&(info.codecName), fSubsession.codecName(), sizeof(info.codecName));
	info.timestamp	= presentationTime;
  	if(fSubsession.rtpSource() != NULL && !fSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) 
	{
		info.syncUseRTCP	= false;
	}
	else
	{
		info.syncUseRTCP	= true;
	}
 
	if(strcmp(fSubsession.mediumName(), "audio") == 0)
	{
		if (strcmp(fSubsession.codecName(), "MPEG4-GENERIC") == 0)
		{
			info.isHeader	= 0;
 			_cb(_channel, frameSize, (char*)fReceiveBuffer, info);
		}
		else if (strcmp(fSubsession.codecName(), "L16") == 0)
		{
			int i = fSubsession.numChannels();
			info.isHeader	= 0;
 			_cb(_channel, frameSize, (char*)fReceiveBuffer, info);
		}
	}
	else if(strcmp(fSubsession.mediumName(), "video") == 0)
	{
		if(strcmp(fSubsession.codecName(), "H264") == 0)
		{
			unsigned char start_code[4] = {0x00, 0x00, 0x00, 0x01};

			if(!_sentHeader)
			{
				_sentHeader	= true;
	
				unsigned numSpropRecords;
				if(fSubsession.fmtp_spropparametersets() && 0 < strlen(fSubsession.fmtp_spropparametersets()))
				{
					SPropRecord* sPropRecords = parseSPropParameterSets(fSubsession.fmtp_spropparametersets(), numSpropRecords);
					printf("====================== proparamset: [%d]%s =================\n", numSpropRecords, fSubsession.fmtp_spropparametersets());
					if(numSpropRecords > 0)
					{
						int 	headerLen		= 0;
						int 	validRecordNum	= 0;
						for(unsigned int i = 0; i < numSpropRecords; i++)
						{
							printf("spropparameter first byte = %x\n", sPropRecords[i].sPropBytes[0]);
							if(((sPropRecords[i].sPropBytes[0] & 0x1f) == 7) || ((sPropRecords[i].sPropBytes[0] & 0x1f) == 8))
							{
								headerLen += sPropRecords[i].sPropLength;
								validRecordNum	+= 1;
							}
						}

						headerLen		+= sizeof(start_code) * validRecordNum;
						char*	headerData	= new char[headerLen];
						int		offset		= 0;
						for(unsigned int i = 0; i < numSpropRecords; i++)
						{
							if(((sPropRecords[i].sPropBytes[0] & 0x1f) == 7) || ((sPropRecords[i].sPropBytes[0] & 0x1f) == 8))
							{
								memcpy(headerData + offset, start_code, 4);
								offset				+= 4;
								memcpy(headerData + offset, sPropRecords[i].sPropBytes, sPropRecords[i].sPropLength);
								offset				+= sPropRecords[i].sPropLength;
							}
						}

						uint16_t w = 0;
						uint16_t h = 0;
						if (H264Parse::GetResolution((uint8_t*)headerData, headerLen, &w, &h))
						{
							_w = w;
							_h = h;
						}

						info.isHeader	= 1;
						_cb(_channel, headerLen, headerData, info);

						delete [] headerData;
					}
				}
			}
			else
			{
				if ((fReceiveBuffer[0] & 0x1f) == 7)
				{
					uint16_t w = 0;
					uint16_t h = 0;
					if (H264Parse::GetResolution((uint8_t*)fReceiveBuffer, frameSize, &w, &h))
					{
						if (_w == 0 || _h == 0)
						{
							_w = w;
							_h = h;
						}
						else if ((_w != w) || (_h != h))
						{
							printf("=====33333333========= %dx%d,   %dx%d\n", _w, _h, w, h);
							_drop = true;
						}
					}
				}
			}

			if (!_drop)
			{
				info.isHeader	= 0;

				char* newData	= new char[sizeof(start_code) + frameSize];
				memcpy(newData, start_code, sizeof(start_code));
				memcpy(newData + sizeof(start_code), (char*)fReceiveBuffer, frameSize);
 				_cb(_channel, frameSize + sizeof(start_code), newData, info);
			
				delete [] newData;
			}
		}
		else if(strcmp(fSubsession.codecName(), "MP4V-ES") == 0)
		{
#ifdef SEND_CONFIG_HEADER
			unsigned configLen;
			unsigned char* configData = parseGeneralConfigStr(fSubsession.fmtp_config(), configLen);
				
			info.isHeader	= 1;
			_cb(_channel, configLen, (char*)configData, info);
#endif
			
			info.isHeader	= 0;
 			_cb(_channel, frameSize, (char*)fReceiveBuffer, info);
		}
		else
		{
			info.isHeader	= 0;
 			_cb(_channel, frameSize, (char*)fReceiveBuffer, info);
		}
	}
  }
  
  // Then continue, to request the next frame of data:
  continuePlaying();
}