RTSPStream IpcamRuntime::addRTSPStream
(IVideoStream *video, IAudioStream *audio)
{
    char const* description = "RTSP/RTP stream from live-streamer";
    ServerMediaSession *sms;
	std::string stream_path = std::to_string(_stream_list.size());
    sms = ServerMediaSession::createNew(_rtsp_server->envir(),
                                        stream_path.c_str(),
                                        stream_path.c_str(),
                                        description);
    if (sms) {
        if (video) {
            LiveVideoServerMediaSubsession* vsmss;
            vsmss = LiveVideoServerMediaSubsession
                ::createNew(_rtsp_server->envir(), *video);
            sms->addSubsession(vsmss);
        }
		if (audio) {
			LiveAudioServerMediaSubsession* asmss;
			asmss = LiveAudioServerMediaSubsession
				::createNew(_rtsp_server->envir(), *audio);
			sms->addSubsession(asmss);
		}

		_rtsp_server->addServerMediaSession(sms);

		_stream_list.push_back(sms);
    }

	return sms;
}
int BasicRTSPOnlyServer::init_server() {
    
    if (env != NULL || rtspServer != NULL || mod == NULL || (avType >= NUM_RTSP_FORMATS && avType < 0)){
        exit(1);
    }

    //setting livenessTimeoutTask
    unsigned reclamationTestSeconds = 60;

    TaskScheduler* scheduler = BasicTaskScheduler::createNew();
    env = BasicUsageEnvironment::createNew(*scheduler);

    UserAuthenticationDatabase* authDB = NULL;
 #ifdef ACCESS_CONTROL
   // To implement client access control to the RTSP server, do the following:
   authDB = new UserAuthenticationDatabase;
   authDB->addUserRecord("i2cat", "ultragrid"); // replace these with real strings
   // Repeat the above with each <username>, <password> that you wish to allow
   // access to the server.
 #endif

    if (fPort == 0){
        fPort = 8554;
    }

    rtspServer = RTSPServer::createNew(*env, fPort, authDB, reclamationTestSeconds);
    if (rtspServer == NULL) {
        *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
        exit(1);
    }
    ServerMediaSession* sms;
               sms = ServerMediaSession::createNew(*env, "ultragrid",
                   "UltraGrid RTSP server enabling standard transport",
                   "UltraGrid RTSP server");

               if(avType == av){
                                  sms->addSubsession(BasicRTSPOnlySubsession
                                                    ::createNew(*env, True, mod, audio, audio_codec, audio_sample_rate, audio_channels, audio_bps, rtp_port, rtp_port_audio));
                                  sms->addSubsession(BasicRTSPOnlySubsession
                                                    ::createNew(*env, True, mod, video, audio_codec, audio_sample_rate, audio_channels, audio_bps, rtp_port, rtp_port_audio));
               }else if(avType == audio){
                   sms->addSubsession(BasicRTSPOnlySubsession
                                     	 ::createNew(*env, True, mod, audio, audio_codec, audio_sample_rate, audio_channels, audio_bps, rtp_port, rtp_port_audio));

               }else if(avType == video){
            	   sms->addSubsession(BasicRTSPOnlySubsession
            			   	   	    	 ::createNew(*env, True, mod, video, audio_codec, audio_sample_rate, audio_channels, audio_bps, rtp_port, rtp_port_audio));
               }else{
            	   *env << "\n[RTSP Server] Error when trying to play stream type: \"" << avType << "\"\n";
            	   exit(1);
               }

               rtspServer->addServerMediaSession(sms);

               char* url = rtspServer->rtspURL(sms);
               *env << "\n[RTSP Server] Play this stream using the URL \"" << url << "\"\n";
               delete[] url;

    return 0;
}
static ServerMediaSession* createNewSMS(UsageEnvironment& env, LiveRtspServer& rRtspServer, const Channel& channel, 
                                        IRateAdaptationFactory* pFactory, IRateController* pGlobalRateControl)
{
  VLOG(2) << "createNewSMS: " << channel.ChannelName;
  ServerMediaSession* sms = ServerMediaSession::createNew(env, channel.ChannelName.c_str(), channel.ChannelName.c_str(), "Session streamed by \"MSS\"", False/*SSM*/);

  // at least a video or audio descriptor must be set
  assert(channel.VideoDescriptor || channel.AudioDescriptor);

  // FIXME: hard-coded in application and lib for now!
  uint32_t uiVideoId = 0;
  uint32_t uiAudioId = 0;

  // create the live media RTSP subsessions
  if (channel.VideoDescriptor)
  {
    // if there is a video channel, the audio source id increases!
    uiAudioId = 1;
    LiveMediaSubsession* pLiveMediaSubsession = LiveMediaSubsessionFactory::createVideoSubsession(env, rRtspServer, channel.ChannelName, channel.ChannelId, uiVideoId, *(channel.VideoDescriptor), 
                                                                                                  pFactory, pGlobalRateControl);
    if (pLiveMediaSubsession == NULL)
    {
      LOG(WARNING) << "TODO: Invalid video subsession";
      Medium::close(sms);
      return NULL;
    }
    else
    {
      VLOG(2) << "Added " << channel.ChannelName << " to video ServerMediaSession";
      sms->addSubsession(pLiveMediaSubsession);
      pLiveMediaSubsession->setClientJoinHandler(boost::bind(&LiveRtspServer::onClientJoin, boost::ref(rRtspServer), _1, _2, _3, _4));
      pLiveMediaSubsession->setClientUpdateHandler(boost::bind(&LiveRtspServer::onClientUpdate, boost::ref(rRtspServer), _1, _2, _3, _4));
      pLiveMediaSubsession->setClientLeaveHandler(boost::bind(&LiveRtspServer::onClientLeave, boost::ref(rRtspServer), _1, _2, _3));
    }
  }

  // create the live media RTSP subsessions
  if (channel.AudioDescriptor)
  {
    LiveMediaSubsession* pLiveMediaSubsession = LiveMediaSubsessionFactory::createAudioSubsession(env, rRtspServer, channel.ChannelName, channel.ChannelId, uiAudioId, *(channel.AudioDescriptor), 
                                                                                                  pFactory, pGlobalRateControl);
    if (pLiveMediaSubsession == NULL)
    {
      LOG(WARNING) << "TODO: Invalid audio subsession";
      Medium::close(sms);
      return NULL;
    }
    else
    {
      VLOG(2) << "Added " << channel.ChannelName << " audio to ServerMediaSession";
      sms->addSubsession(pLiveMediaSubsession);
      pLiveMediaSubsession->setClientJoinHandler(boost::bind(&LiveRtspServer::onClientJoin, boost::ref(rRtspServer), _1, _2, _3, _4));
      pLiveMediaSubsession->setClientUpdateHandler(boost::bind(&LiveRtspServer::onClientUpdate, boost::ref(rRtspServer), _1, _2, _3, _4));
      pLiveMediaSubsession->setClientLeaveHandler(boost::bind(&LiveRtspServer::onClientLeave, boost::ref(rRtspServer), _1, _2, _3));
    }
  }

  return sms;
}
void  RTSPManager::createRTSPServer(unsigned int id , unsigned int port , volatile char * watcher)
{
	std::unique_lock<std::mutex> lock(_lock);
	TaskScheduler* taskSchedular = BasicTaskScheduler::createNew();
	BasicUsageEnvironment* usageEnvironment = BasicUsageEnvironment::createNew(*taskSchedular);
	RTSPServer* rtspServer = RTSPServer::createNew(*usageEnvironment, port, NULL);

	if(rtspServer == NULL)
	{
		logger::log(usageEnvironment->getResultMsg() , logger::logType::FAILURE);
		*watcher = -1;
		this->_done = true;
		this->_condition.notify_all();
		return;
	}

		H264LiveServerMediaSession *liveSubSession = H264LiveServerMediaSession::createNew(*usageEnvironment, true , id);
		std::string streamName = "camera_" + std::to_string(id);
		ServerMediaSession* sms = ServerMediaSession::createNew(*usageEnvironment, streamName.c_str(), streamName.c_str(), "Live H264 Stream");
		sms->addSubsession(liveSubSession);
		rtspServer->addServerMediaSession(sms);
		char* url = rtspServer->rtspURL(sms);
		logger::log(INFO_RTSP_URL(url) , logger::logType::PRIORITY);
		delete[] url;

		this->_done = true;
		this->_condition.notify_all();
		lock.unlock();
		taskSchedular->doEventLoop(watcher);

		return;
}
Exemple #5
0
int main (int argc, char **argv)
{
	// env
	TaskScheduler *scheduler = BasicTaskScheduler::createNew();
	_env = BasicUsageEnvironment::createNew(*scheduler);

	// test
	//test(*_env);

	// rtsp server
	RTSPServer *rtspServer = RTSPServer::createNew(*_env, 9554);
	if (!rtspServer) {
		fprintf(stderr, "ERR: create RTSPServer err\n");
		::exit(-1);
	}

	// add live stream
	do {
		WebcamFrameSource *webcam_source = 0;

		ServerMediaSession *sms = ServerMediaSession::createNew(*_env, "webcam", 0, "Session from /dev/video1"); 
		sms->addSubsession(WebcamOndemandMediaSubsession::createNew(*_env, webcam_source));
		rtspServer->addServerMediaSession(sms);

		char *url = rtspServer->rtspURL(sms);
		*_env << "using url \"" << url << "\"\n";
		delete [] url;
	} while (0);

	// run loop
	_env->taskScheduler().doEventLoop();

	return 1;
}
Exemple #6
0
// -----------------------------------------
//    add an RTSP session
// -----------------------------------------
void addSession(RTSPServer* rtspServer, const char* sessionName, ServerMediaSubsession *subSession, ServerMediaSubsession *audio_subSession)
{
	UsageEnvironment& env(rtspServer->envir());
	ServerMediaSession* sms = ServerMediaSession::createNew(env, sessionName);
	sms->addSubsession(subSession);
	
	if (audio_subSession)
		sms->addSubsession(audio_subSession);

	rtspServer->addServerMediaSession(sms);

	char* url = rtspServer->rtspURL(sms);

	fprintf(stderr, "lay this stream using the URL: \"%s\"\n", url );

	delete[] url;			
}
int _tmain(int argc, TCHAR* argv[])
{
	try{
	   char* str = "1234";
	   fprintf(stderr, "%s", str);
		OutPacketBuffer::maxSize = 200000;
		TaskScheduler* scheduler = BasicTaskScheduler::createNew();
		env = BasicUsageEnvironment::createNew(*scheduler);
		*env<<"mack is a good boy";
		string s = "0123456789";
		s = s.substr(8);
		::CoInitialize(NULL);
		CDshowCapInfoMgr* mgr = new CDshowCapInfoMgr();
		mgr->enumAllCapInfo();
		mgr->printCapDetail();
		//CDShowCapInfo* pInfo = mgr->getVideoInfo(0);	
		//CDeviceCapture* device = new CDeviceCapture(DEVICE_CAP_VIDEO_TYPE,pInfo->getFriendlyName(),pInfo->getMediaOption(0));
		CDShowCapInfo* pInfo = mgr->getAudioInfo(0);	
		CDeviceCapture* device = new CDeviceCapture(DEVICE_CAP_AUDIO_TYPE,pInfo->getFriendlyName(),pInfo->getMediaOption(8));

		device->startCap();
		
		
		UserAuthenticationDatabase* authDB = NULL;
		RTSPServer* rtspServer = RTSPServer::createNew(*env, 8554, authDB);
		if (rtspServer == NULL) {
			*env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
			exit(1);
		}

		char const* descriptionString
			= "Session streamed by \"testOnDemandRTSPServer\"";

		{
			device->startCap();
			char const* streamName = "h264ESVideoTest";
			char const* inputFileName = "test.264";
			ServerMediaSession* sms
				= ServerMediaSession::createNew(*env, streamName, streamName,
				descriptionString);
			sms->addSubsession(H264DshowCapMediaServerSubsession
				::createNew(*env, inputFileName,device));
			rtspServer->addServerMediaSession(sms);

			announceStream(rtspServer, sms, streamName, inputFileName);
		}
		env->taskScheduler().doEventLoop(); // does not return
		while (true)
		{
			Sleep(1000);
		}
	}
	catch(std::exception e)
	{
		printf(e.what());
	}
	return 0;
}
int main (int argc, char **argv)
{
	// env
	TaskScheduler *scheduler = BasicTaskScheduler::createNew();
	_env = BasicUsageEnvironment::createNew(*scheduler);

	// rtsp server
	RTSPServer *rtspServer = RTSPServer::createNew(*_env, SINK_PORT);
	if (!rtspServer) {
		fprintf(stderr, "ERR: create RTSPServer err\n");
		exit(-1);
	}

	// add live stream
	do {
        // low resolution
		ServerMediaSession *sms = ServerMediaSession::createNew(*_env,
                "live", 0, "Session from /dev/video0"); 
		sms->addSubsession(WebcamOndemandMediaSubsession::createNew(*_env,
                    640, 360, PIX_FMT_YUV420P, FRAME_PER_SEC));
		rtspServer->addServerMediaSession(sms);

		char *url = rtspServer->rtspURL(sms);
		*_env << "using url \"" << url << "\"\n";
		delete [] url;

        // high resolution
		sms = ServerMediaSession::createNew(*_env,
                "live-high", 0, "Session from /dev/video0 with high resolution"); 
		sms->addSubsession(WebcamOndemandMediaSubsession::createNew(*_env,
                    1280, 720, PIX_FMT_YUV420P, FRAME_PER_SEC));
		rtspServer->addServerMediaSession(sms);

		url = rtspServer->rtspURL(sms);
		*_env << "using url \"" << url << "\"\n";
		delete [] url;
	} while (0);

	// run loop
	_env->taskScheduler().doEventLoop();

	return 1;
}
// -----------------------------------------
//    signal handler
// -----------------------------------------
void addSession(RTSPServer* rtspServer, const char* sessionName, ServerMediaSubsession *subSession)
{
	UsageEnvironment& env(rtspServer->envir());
	ServerMediaSession* sms = ServerMediaSession::createNew(env, sessionName);
	sms->addSubsession(subSession);
	rtspServer->addServerMediaSession(sms);

	char* url = rtspServer->rtspURL(sms);
	env << "Play this stream using the URL \"" << url << "\"\n";
	delete[] url;			
}
Exemple #10
0
// -----------------------------------------
//    add an RTSP session
// -----------------------------------------
void addSession(RTSPServer* rtspServer, const std::string & sessionName, ServerMediaSubsession *subSession)
{
	UsageEnvironment& env(rtspServer->envir());
	ServerMediaSession* sms = ServerMediaSession::createNew(env, sessionName.c_str());
	if (sms != NULL)
	{
		sms->addSubsession(subSession);
		rtspServer->addServerMediaSession(sms);

		char* url = rtspServer->rtspURL(sms);
		if (url != NULL)
		{
			LOG(NOTICE) << "Play this stream using the URL \"" << url << "\"";
			delete[] url;			
		}
	}
}
int main(int argc, char** argv) {  
    // Begin by setting up our usage environment:  
    TaskScheduler* scheduler = BasicTaskScheduler::createNew();  
    UsageEnvironment* env = BasicUsageEnvironment::createNew(*scheduler);  
  
    UserAuthenticationDatabase* authDB = NULL;  
#ifdef ACCESS_CONTROL  
    // To implement client access control to the RTSP server, do the following:  
    authDB = new UserAuthenticationDatabase;  
    authDB->addUserRecord("username1", "password1"); // replace these with real strings  
    // Repeat the above with each <username>, <password> that you wish to allow  
    // access to the server.  
#endif  
  
    // Create the RTSP server:  
    RTSPServer* rtspServer = RTSPServer::createNew(*env, 554, authDB);  
    if (rtspServer == NULL) {  
        *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";  
        exit(1);  
    }  
  
    // Add live stream  
  
    WW_H264VideoSource * videoSource = 0;  
  
    ServerMediaSession * sms = ServerMediaSession::createNew(*env, "live", 0, "ww live test");  
    sms->addSubsession(WW_H264VideoServerMediaSubsession::createNew(*env, videoSource));  
    rtspServer->addServerMediaSession(sms);  
  
    char * url = rtspServer->rtspURL(sms);  
    *env << "using url \"" << url << "\"\n";  
    delete[] url;  
  
    // Run loop  
    env->taskScheduler().doEventLoop();  
  
    rtspServer->removeServerMediaSession(sms);  
  
    Medium::close(rtspServer);  
  
    env->reclaim();  
  
    delete scheduler;  
  
    return 1;  
}  
ServerMediaSession *HimppMedia::addSMS(HimppVideoEncoder& encoder)
{
    ServerMediaSession *sms;
    VENC_CHN chnid = encoder.channelId();
    char const* description = "RTSP/RTP stream from live-streamer";
    std::string streamPath = std::to_string(chnid);
    sms = ServerMediaSession::createNew(*_env,
                                        streamPath.c_str(),
                                        streamPath.c_str(),
                                        description);
    if (sms) {
        HimppServerMediaSubsession* h264smss;
        h264smss = HimppServerMediaSubsession::createNew(*_env, encoder);
        sms->addSubsession(h264smss);
    }

    return sms;
}
static ServerMediaSession* createNewSMS(UsageEnvironment& env,
                                        char const* fileName, FILE* /*fid*/) {
    // Use the file name extension to determine the type of "ServerMediaSession":
    char const* extension = strrchr(fileName, '.');
    if (extension == NULL) return NULL;

    ServerMediaSession* sms = NULL;
    Boolean const reuseSource = False;
    if (strcmp(extension, ".aac") == 0) {
        // Assumed to be an AAC Audio (ADTS format) file:
        NEW_SMS("AAC Audio");
        sms->addSubsession(ADTSAudioFileServerMediaSubsession::createNew(env, fileName, reuseSource));
    } else if (strcmp(extension, ".amr") == 0) {
        // Assumed to be an AMR Audio file:
        NEW_SMS("AMR Audio");
        sms->addSubsession(AMRAudioFileServerMediaSubsession::createNew(env, fileName, reuseSource));
    } else if (strcmp(extension, ".m4e") == 0) {
        // Assumed to be a MPEG-4 Video Elementary Stream file:
        NEW_SMS("MPEG-4 Video");
        sms->addSubsession(MPEG4VideoFileServerMediaSubsession::createNew(env, fileName, reuseSource));
    } else if (strcmp(extension, ".mp3") == 0) {
        // Assumed to be a MPEG-1 or 2 Audio file:
        NEW_SMS("MPEG-1 or 2 Audio");
        // To stream using 'ADUs' rather than raw MP3 frames, uncomment the following:
//#define STREAM_USING_ADUS 1
        // To also reorder ADUs before streaming, uncomment the following:
//#define INTERLEAVE_ADUS 1
        // (For more information about ADUs and interleaving,
        //  see <http://www.live555.com/rtp-mp3/>)
        Boolean useADUs = False;
        Interleaving* interleaving = NULL;
#ifdef STREAM_USING_ADUS
        useADUs = True;
#ifdef INTERLEAVE_ADUS
        unsigned char interleaveCycle[] = {0,2,1,3}; // or choose your own...
        unsigned const interleaveCycleSize
            = (sizeof interleaveCycle)/(sizeof (unsigned char));
        interleaving = new Interleaving(interleaveCycleSize, interleaveCycle);
#endif
#endif
        sms->addSubsession(MP3AudioFileServerMediaSubsession::createNew(env, fileName, reuseSource, useADUs, interleaving));
    } else if (strcmp(extension, ".mpg") == 0) {
        // Assumed to be a MPEG-1 or 2 Program Stream (audio+video) file:
        NEW_SMS("MPEG-1 or 2 Program Stream");
        MPEG1or2FileServerDemux* demux
            = MPEG1or2FileServerDemux::createNew(env, fileName, reuseSource);
        sms->addSubsession(demux->newVideoServerMediaSubsession());
        sms->addSubsession(demux->newAudioServerMediaSubsession());
    } else if (strcmp(extension, ".ts") == 0) {
        // Assumed to be a MPEG Transport Stream file:
        // Use an index file name that's the same as the TS file name, except with ".tsx":
        unsigned indexFileNameLen = strlen(fileName) + 2; // allow for trailing "x\0"
        char* indexFileName = new char[indexFileNameLen];
        sprintf(indexFileName, "%sx", fileName);
        NEW_SMS("MPEG Transport Stream");
        sms->addSubsession(MPEG2TransportFileServerMediaSubsession::createNew(env, fileName, indexFileName, reuseSource));
        delete[] indexFileName;
    } else if (strcmp(extension, ".wav") == 0) {
        // Assumed to be a WAV Audio file:
        NEW_SMS("WAV Audio Stream");
        // To convert 16-bit PCM data to 8-bit u-law, prior to streaming,
        // change the following to True:
        Boolean convertToULaw = False;
        sms->addSubsession(WAVAudioFileServerMediaSubsession::createNew(env, fileName, reuseSource, convertToULaw));
    } else if (strcmp(extension, ".dv") == 0) {
        // Assumed to be a DV Video file
        // First, make sure that the RTPSinks' buffers will be large enough to handle the huge size of DV frames (as big as 288000).
        OutPacketBuffer::maxSize = 300000;

        NEW_SMS("DV Video");
        sms->addSubsession(DVVideoFileServerMediaSubsession::createNew(env, fileName, reuseSource));
    }

    return sms;
}
JNIEXPORT void JNICALL Java_com_parizene_streamer_Streamer_loop(JNIEnv *env,
		jobject obj, jstring addr) {
	// Begin by setting up our usage environment:
	TaskScheduler* scheduler = BasicTaskScheduler::createNew();
	uEnv = BasicUsageEnvironment::createNew(*scheduler);

	// Create 'groupsocks' for RTP and RTCP:
	struct in_addr destinationAddress;
	const char *_addr = env->GetStringUTFChars(addr, NULL);
	destinationAddress.s_addr = our_inet_addr(_addr); /*chooseRandomIPv4SSMAddress(*uEnv);*/
	env->ReleaseStringUTFChars(addr, _addr);
	// Note: This is a multicast address.  If you wish instead to stream
	// using unicast, then you should use the "testOnDemandRTSPServer"
	// test program - not this test program - as a model.

	const unsigned short rtpPortNum = 18888;
	const unsigned short rtcpPortNum = rtpPortNum + 1;
	const unsigned char ttl = 255;

	const Port rtpPort(rtpPortNum);
	const Port rtcpPort(rtcpPortNum);

	Groupsock rtpGroupsock(*uEnv, destinationAddress, rtpPort, ttl);
	Groupsock rtcpGroupsock(*uEnv, destinationAddress, rtcpPort, ttl);

	// Create a 'H264 Video RTP' sink from the RTP 'groupsock':
	OutPacketBuffer::maxSize = 100000;
	videoSink = H264VideoRTPSink::createNew(*uEnv, &rtpGroupsock, 96);

	// Create (and start) a 'RTCP instance' for this RTP sink:
	const unsigned estimatedSessionBandwidth = 500; // in kbps; for RTCP b/w share
	const unsigned maxCNAMElen = 100;
	unsigned char CNAME[maxCNAMElen + 1];
	gethostname((char*) CNAME, maxCNAMElen);
	CNAME[maxCNAMElen] = '\0'; // just in case
	RTCPInstance* rtcp = RTCPInstance::createNew(*uEnv, &rtcpGroupsock,
			estimatedSessionBandwidth, CNAME, videoSink,
			NULL /* we're a server */, True /* we're a SSM source */);
	// Note: This starts RTCP running automatically

	RTSPServer* rtspServer = RTSPServer::createNew(*uEnv, 8554);
	if (rtspServer == NULL) {
		LOGE("Failed to create RTSP server: %s", uEnv->getResultMsg());
		exit(1);
	}
	ServerMediaSession* sms = ServerMediaSession::createNew(*uEnv, "streamer",
			inputFilename, "Session streamed by \"testH264VideoStreamer\"",
			True /*SSM*/);
	sms->addSubsession(
			PassiveServerMediaSubsession::createNew(*videoSink, rtcp));
	rtspServer->addServerMediaSession(sms);

	char* url = rtspServer->rtspURL(sms);
	LOGI("Play this stream using the URL \"%s\"", url);
	delete[] url;

	// Start the streaming:
	LOGI("Beginning streaming...\n");
	play();

	uEnv->taskScheduler().doEventLoop(); // does not return
}
Exemple #15
0
int main(int argc, char** argv) {
  init_signals();
  setpriority(PRIO_PROCESS, 0, 0);
  int IsSilence = 0;
  int svcEnable = 0;
  int cnt=0;
  int activePortCnt=0;
  if( GetSampleRate() == 16000 )
  {
	audioOutputBitrate = 128000;
	audioSamplingFrequency = 16000;
  }else{
	audioOutputBitrate = 64000;
	audioSamplingFrequency = 8000;
  }
  // Begin by setting up our usage environment:
  TaskScheduler* scheduler = BasicTaskScheduler::createNew();
  UsageEnvironment* env = BasicUsageEnvironment::createNew(*scheduler);
  int msg_type, video_type;
  APPROInput* MjpegInputDevice = NULL;
  APPROInput* H264InputDevice = NULL;
  APPROInput* Mpeg4InputDevice = NULL;
  static pid_t child[4] = {
	-1,-1,-1,-1
  };

  StreamingMode streamingMode = STREAMING_UNICAST;
  netAddressBits multicastAddress = 0;//our_inet_addr("224.1.4.6");
  portNumBits videoRTPPortNum = 0;
  portNumBits audioRTPPortNum = 0;

  IsSilence = 0;
  svcEnable = 0;
  audioType = AUDIO_G711;
  streamingMode = STREAMING_UNICAST;

  for( cnt = 1; cnt < argc ;cnt++ )
  {
	if( strcmp( argv[cnt],"-m" )== 0  )
	{
		streamingMode = STREAMING_MULTICAST_SSM;
	}

	if( strcmp( argv[cnt],"-s" )== 0  )
	{
		IsSilence = 1;
	}

	if( strcmp( argv[cnt],"-a" )== 0  )
	{
		audioType = AUDIO_AAC;
	}

	if( strcmp( argv[cnt],"-v" )== 0  )
	{
		svcEnable = 1;
	}
  }

#if 0
  printf("###########IsSilence = %d ################\n",IsSilence);
  printf("###########streamingMode = %d ################\n",streamingMode);
  printf("###########audioType = %d ################\n",audioType);
  printf("###########svcEnable = %d ################\n",svcEnable);
#endif

  child[0] = fork();

  if( child[0] != 0 )
  {
	child[1] = fork();
  }

  if( child[0] != 0 && child[1] != 0 )
  {
	child[2] = fork();
  }

  if( child[0] != 0 && child[1] != 0 && child[2] != 0 )
  {
	child[3] = fork();
  }

  if(svcEnable) {
	  if( child[0] != 0 && child[1] != 0 && child[2] != 0 && child[3] != 0)
	  {
		child[4] = fork();
	  }

	  if( child[0] != 0 && child[1] != 0 && child[2] != 0 && child[3] != 0 && child[4] != 0)
	  {
		child[5] = fork();
	  }

	  if( child[0] != 0 && child[1] != 0 && child[2] != 0 && child[3] != 0 && child[4] != 0 && child[5] != 0)
	  {
		child[6] = fork();
	  }

	  if( child[0] != 0 && child[1] != 0 && child[2] != 0 && child[3] != 0 && child[4] != 0 && child[5] != 0 && child[6] != 0)
	  {
		child[7] = fork();
	  }
  }

  if( child[0] == 0 )
  {
	/* parent, success */
	msg_type = LIVE_MSG_TYPE4;
	video_type = VIDEO_TYPE_H264_CIF;
	rtspServerPortNum = 8556;
	H264VideoBitrate = 12000000;
	videoRTPPortNum = 6012;
	audioRTPPortNum = 6014;
  }
  if( child[1] == 0 )
  {
	/* parent, success */
	msg_type = LIVE_MSG_TYPE3;
	video_type = VIDEO_TYPE_MJPEG;
	rtspServerPortNum = 8555;
	MjpegVideoBitrate = 12000000;
	videoRTPPortNum = 6008;
	audioRTPPortNum = 6010;
  }
  if( child[2] == 0 )
  {
	/* parent, success */
	msg_type = LIVE_MSG_TYPE;
	video_type = VIDEO_TYPE_MPEG4;
	rtspServerPortNum = 8553;
	Mpeg4VideoBitrate = 12000000;
	videoRTPPortNum = 6000;
	audioRTPPortNum = 6002;
  }
  if( child[3] == 0 )
  {
	/* parent, success */
	msg_type = LIVE_MSG_TYPE2;
	video_type = VIDEO_TYPE_MPEG4_CIF;
	rtspServerPortNum = 8554;
	Mpeg4VideoBitrate = 12000000;
	videoRTPPortNum = 6004;
	audioRTPPortNum = 6006;
  }

  if(svcEnable) {
	  if( child[4] == 0 )
	  {
		/* parent, success */
		msg_type = LIVE_MSG_TYPE5;
		video_type = VIDEO_TYPE_H264_SVC_30FPS;
		rtspServerPortNum = 8601;
		H264VideoBitrate = 12000000;
		videoRTPPortNum = 6016;
		audioRTPPortNum = 6018;
	  }
	  if( child[5] == 0 )
	  {
		/* parent, success */
		msg_type = LIVE_MSG_TYPE6;
		video_type = VIDEO_TYPE_H264_SVC_15FPS;
		rtspServerPortNum = 8602;
		H264VideoBitrate = 12000000;
		videoRTPPortNum = 6020;
		audioRTPPortNum = 6022;
	  }
	  if( child[6] == 0 )
	  {
		/* parent, success */
		msg_type = LIVE_MSG_TYPE7;
		video_type = VIDEO_TYPE_H264_SVC_7FPS;
		rtspServerPortNum = 8603;
		H264VideoBitrate = 12000000;
		videoRTPPortNum = 6024;
		audioRTPPortNum = 6026;
	  }
	  if( child[7] == 0 )
	  {
		/* parent, success */
		msg_type = LIVE_MSG_TYPE8;
		video_type = VIDEO_TYPE_H264_SVC_3FPS;
		rtspServerPortNum = 8604;
		H264VideoBitrate = 12000000;
		videoRTPPortNum = 6028;
		audioRTPPortNum = 6030;
	  }
	  if( child[0] != 0 && child[1] != 0 && child[2] != 0 && child[3] != 0 && child[4] != 0 && child[5] != 0 && child[6] != 0 && child[7] != 0)
	  {
		/* parent, success */
		msg_type = LIVE_MSG_TYPE9;
		video_type = VIDEO_TYPE_H264;
		rtspServerPortNum = 8557;
		H264VideoBitrate = 12000000;
		videoRTPPortNum = 6032;
		audioRTPPortNum = 6034;
	  }
 }
 else {
  	  if( child[0] != 0 && child[1] != 0 && child[2] != 0 && child[3] != 0)
	  {
		/* parent, success */
		msg_type = LIVE_MSG_TYPE5;
		video_type = VIDEO_TYPE_H264;
		rtspServerPortNum = 8557;
		H264VideoBitrate = 12000000;
		videoRTPPortNum = 6032;
		audioRTPPortNum = 6034;
	  }
 }

  videoType = video_type;

  // Objects used for multicast streaming:
  static Groupsock* rtpGroupsockAudio = NULL;
  static Groupsock* rtcpGroupsockAudio = NULL;
  static Groupsock* rtpGroupsockVideo = NULL;
  static Groupsock* rtcpGroupsockVideo = NULL;
  static FramedSource* sourceAudio = NULL;
  static RTPSink* sinkAudio = NULL;
  static RTCPInstance* rtcpAudio = NULL;
  static FramedSource* sourceVideo = NULL;
  static RTPSink* sinkVideo = NULL;
  static RTCPInstance* rtcpVideo = NULL;

  share_memory_init(msg_type);

  //init_signals();

  *env << "Initializing...\n";


  // Initialize the WIS input device:
  if( video_type == VIDEO_TYPE_MJPEG)
  {
	  MjpegInputDevice = APPROInput::createNew(*env, VIDEO_TYPE_MJPEG);
	  if (MjpegInputDevice == NULL) {
	    err(*env) << "Failed to create MJPEG input device\n";
	    exit(1);
	  }
  }

  if( video_type == VIDEO_TYPE_H264 || video_type == VIDEO_TYPE_H264_CIF || video_type == VIDEO_TYPE_H264_SVC_30FPS ||
		video_type == VIDEO_TYPE_H264_SVC_15FPS || video_type == VIDEO_TYPE_H264_SVC_7FPS || video_type == VIDEO_TYPE_H264_SVC_3FPS)
  {
	  H264InputDevice = APPROInput::createNew(*env, video_type);
	  if (H264InputDevice == NULL) {
	    err(*env) << "Failed to create MJPEG input device\n";
	    exit(1);
	  }
  }

  if( video_type == VIDEO_TYPE_MPEG4 || video_type == VIDEO_TYPE_MPEG4_CIF )
  {
	  Mpeg4InputDevice = APPROInput::createNew(*env, video_type);
	  if (Mpeg4InputDevice == NULL) {
		err(*env) << "Failed to create MPEG4 input device\n";
		exit(1);
	  }
  }

  // Create the RTSP server:
  RTSPServer* rtspServer = NULL;
  // Normal case: Streaming from a built-in RTSP server:
  rtspServer = RTSPServer::createNew(*env, rtspServerPortNum, NULL);
  if (rtspServer == NULL) {
    *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
    exit(1);
  }

  *env << "...done initializing\n";

  if( streamingMode == STREAMING_UNICAST )
  {
	  if( video_type == VIDEO_TYPE_MJPEG)
	  {
	    ServerMediaSession* sms
	      = ServerMediaSession::createNew(*env, MjpegStreamName, MjpegStreamName, streamDescription,streamingMode == STREAMING_MULTICAST_SSM);
	    sms->addSubsession(WISJPEGVideoServerMediaSubsession
				 ::createNew(sms->envir(), *MjpegInputDevice, MjpegVideoBitrate));
	    if( IsSilence == 0)
	    {
			sms->addSubsession(WISPCMAudioServerMediaSubsession::createNew(sms->envir(), *MjpegInputDevice));
	    }

	    rtspServer->addServerMediaSession(sms);

	    char *url = rtspServer->rtspURL(sms);
	    *env << "Play this stream using the URL:\n\t" << url << "\n";
	    delete[] url;
	  }

	  if( video_type == VIDEO_TYPE_H264 || video_type == VIDEO_TYPE_H264_CIF || video_type == VIDEO_TYPE_H264_SVC_30FPS ||
			video_type == VIDEO_TYPE_H264_SVC_15FPS || video_type == VIDEO_TYPE_H264_SVC_7FPS || video_type ==VIDEO_TYPE_H264_SVC_3FPS)
	  {
            ServerMediaSession* sms;
            sms
	      = ServerMediaSession::createNew(*env, H264StreamName, H264StreamName, streamDescription,streamingMode == STREAMING_MULTICAST_SSM);
	    sms->addSubsession(WISH264VideoServerMediaSubsession
				 ::createNew(sms->envir(), *H264InputDevice, H264VideoBitrate));
	    if( IsSilence == 0)
	    {
	    	sms->addSubsession(WISPCMAudioServerMediaSubsession::createNew(sms->envir(), *H264InputDevice));

	    }
	    rtspServer->addServerMediaSession(sms);

	    char *url = rtspServer->rtspURL(sms);
	    *env << "Play this stream using the URL:\n\t" << url << "\n";
	    delete[] url;
	  }

	    // Create a record describing the media to be streamed:
	  if( video_type == VIDEO_TYPE_MPEG4 || video_type == VIDEO_TYPE_MPEG4_CIF )
	  {
	    ServerMediaSession* sms
	      = ServerMediaSession::createNew(*env, Mpeg4StreamName, Mpeg4StreamName, streamDescription,streamingMode == STREAMING_MULTICAST_SSM);
	    sms->addSubsession(WISMPEG4VideoServerMediaSubsession
				 ::createNew(sms->envir(), *Mpeg4InputDevice, Mpeg4VideoBitrate));
	    if( IsSilence == 0)
	    {
	    	sms->addSubsession(WISPCMAudioServerMediaSubsession::createNew(sms->envir(), *Mpeg4InputDevice));
	    }

	    rtspServer->addServerMediaSession(sms);


	    char *url = rtspServer->rtspURL(sms);
	    *env << "Play this stream using the URL:\n\t" << url << "\n";
	    delete[] url;
	  }
  }else{


	if (streamingMode == STREAMING_MULTICAST_SSM)
	{
		if (multicastAddress == 0)
			multicastAddress = chooseRandomIPv4SSMAddress(*env);
	} else if (multicastAddress != 0) {
		streamingMode = STREAMING_MULTICAST_ASM;
	}

	struct in_addr dest; dest.s_addr = multicastAddress;
	const unsigned char ttl = 255;

	// For RTCP:
	const unsigned maxCNAMElen = 100;
	unsigned char CNAME[maxCNAMElen + 1];
	gethostname((char *) CNAME, maxCNAMElen);
	CNAME[maxCNAMElen] = '\0';      // just in case

	ServerMediaSession* sms=NULL;

	if( video_type == VIDEO_TYPE_MJPEG)
	{
		sms = ServerMediaSession::createNew(*env, MjpegStreamName, MjpegStreamName, streamDescription,streamingMode == STREAMING_MULTICAST_SSM);

		sourceAudio = MjpegInputDevice->audioSource();
		sourceVideo = WISJPEGStreamSource::createNew(MjpegInputDevice->videoSource());
		// Create 'groupsocks' for RTP and RTCP:
	    const Port rtpPortVideo(videoRTPPortNum);
	    const Port rtcpPortVideo(videoRTPPortNum+1);
	    rtpGroupsockVideo = new Groupsock(*env, dest, rtpPortVideo, ttl);
	    rtcpGroupsockVideo = new Groupsock(*env, dest, rtcpPortVideo, ttl);
	    if (streamingMode == STREAMING_MULTICAST_SSM) {
	      rtpGroupsockVideo->multicastSendOnly();
	      rtcpGroupsockVideo->multicastSendOnly();
	    }
		setVideoRTPSinkBufferSize();
		sinkVideo = JPEGVideoRTPSink::createNew(*env, rtpGroupsockVideo);

	}

	if( video_type == VIDEO_TYPE_H264 || video_type == VIDEO_TYPE_H264_CIF ||
		video_type == VIDEO_TYPE_H264_SVC_30FPS || video_type == VIDEO_TYPE_H264_SVC_15FPS ||
			video_type == VIDEO_TYPE_H264_SVC_7FPS || video_type == VIDEO_TYPE_H264_SVC_3FPS)
	{
 		sms = ServerMediaSession::createNew(*env, H264StreamName, H264StreamName, streamDescription,streamingMode == STREAMING_MULTICAST_SSM);

		sourceAudio = H264InputDevice->audioSource();
		sourceVideo = H264VideoStreamFramer::createNew(*env, H264InputDevice->videoSource());

		// Create 'groupsocks' for RTP and RTCP:
	    const Port rtpPortVideo(videoRTPPortNum);
	    const Port rtcpPortVideo(videoRTPPortNum+1);
	    rtpGroupsockVideo = new Groupsock(*env, dest, rtpPortVideo, ttl);
	    rtcpGroupsockVideo = new Groupsock(*env, dest, rtcpPortVideo, ttl);
	    if (streamingMode == STREAMING_MULTICAST_SSM) {
	      rtpGroupsockVideo->multicastSendOnly();
	      rtcpGroupsockVideo->multicastSendOnly();
	    }
		setVideoRTPSinkBufferSize();
		{
			char BuffStr[200];
			extern int GetSprop(void *pBuff, char vType);
			GetSprop(BuffStr,video_type);
			sinkVideo = H264VideoRTPSink::createNew(*env, rtpGroupsockVideo,96, 0x64001F,BuffStr);
		}

	}

	// Create a record describing the media to be streamed:
	if( video_type == VIDEO_TYPE_MPEG4 || video_type == VIDEO_TYPE_MPEG4_CIF )
	{
		sms = ServerMediaSession::createNew(*env, Mpeg4StreamName, Mpeg4StreamName, streamDescription,streamingMode == STREAMING_MULTICAST_SSM);

		sourceAudio = Mpeg4InputDevice->audioSource();
		sourceVideo = MPEG4VideoStreamDiscreteFramer::createNew(*env, Mpeg4InputDevice->videoSource());

		// Create 'groupsocks' for RTP and RTCP:
	    const Port rtpPortVideo(videoRTPPortNum);
	    const Port rtcpPortVideo(videoRTPPortNum+1);
	    rtpGroupsockVideo = new Groupsock(*env, dest, rtpPortVideo, ttl);
	    rtcpGroupsockVideo = new Groupsock(*env, dest, rtcpPortVideo, ttl);
	    if (streamingMode == STREAMING_MULTICAST_SSM) {
	      rtpGroupsockVideo->multicastSendOnly();
	      rtcpGroupsockVideo->multicastSendOnly();
	    }
		setVideoRTPSinkBufferSize();
		sinkVideo = MPEG4ESVideoRTPSink::createNew(*env, rtpGroupsockVideo,97);

	}
	/* VIDEO Channel initial */
	if(1)
	{
		// Create (and start) a 'RTCP instance' for this RTP sink:
		unsigned totalSessionBandwidthVideo = (Mpeg4VideoBitrate+500)/1000; // in kbps; for RTCP b/w share
		rtcpVideo = RTCPInstance::createNew(*env, rtcpGroupsockVideo,
					totalSessionBandwidthVideo, CNAME,
					sinkVideo, NULL /* we're a server */ ,
					streamingMode == STREAMING_MULTICAST_SSM);
	    // Note: This starts RTCP running automatically
		sms->addSubsession(PassiveServerMediaSubsession::createNew(*sinkVideo, rtcpVideo));

		// Start streaming:
		sinkVideo->startPlaying(*sourceVideo, NULL, NULL);
	}
	/* AUDIO Channel initial */
	if( IsSilence == 0)
	{
		// there's a separate RTP stream for audio
		// Create 'groupsocks' for RTP and RTCP:
		const Port rtpPortAudio(audioRTPPortNum);
		const Port rtcpPortAudio(audioRTPPortNum+1);

		rtpGroupsockAudio = new Groupsock(*env, dest, rtpPortAudio, ttl);
		rtcpGroupsockAudio = new Groupsock(*env, dest, rtcpPortAudio, ttl);

		if (streamingMode == STREAMING_MULTICAST_SSM)
		{
			rtpGroupsockAudio->multicastSendOnly();
			rtcpGroupsockAudio->multicastSendOnly();
		}
		if( audioSamplingFrequency == 16000 )
		{

			if( audioType == AUDIO_G711)
			{
				sinkAudio = SimpleRTPSink::createNew(*env, rtpGroupsockAudio, 96, audioSamplingFrequency, "audio", "PCMU", 1);
			}
			else
			{
				char const* encoderConfigStr = "1408";// (2<<3)|(8>>1) = 0x14 ; ((8<<7)&0xFF)|(1<<3)=0x08 ;
				sinkAudio = MPEG4GenericRTPSink::createNew(*env, rtpGroupsockAudio,
						       96,
						       audioSamplingFrequency,
						       "audio", "AAC-hbr",
						       encoderConfigStr, audioNumChannels);
			}
		}
		else{
			if(audioType == AUDIO_G711)
			{
				sinkAudio = SimpleRTPSink::createNew(*env, rtpGroupsockAudio, 0, audioSamplingFrequency, "audio", "PCMU", 1);
			}
			else{
				char const* encoderConfigStr =  "1588";// (2<<3)|(11>>1) = 0x15 ; ((11<<7)&0xFF)|(1<<3)=0x88 ;
				sinkAudio = MPEG4GenericRTPSink::createNew(*env, rtpGroupsockAudio,
						       96,
						       audioSamplingFrequency,
						       "audio", "AAC-hbr",
						       encoderConfigStr, audioNumChannels);

			}
		}

		// Create (and start) a 'RTCP instance' for this RTP sink:
		unsigned totalSessionBandwidthAudio = (audioOutputBitrate+500)/1000; // in kbps; for RTCP b/w share
		rtcpAudio = RTCPInstance::createNew(*env, rtcpGroupsockAudio,
					  totalSessionBandwidthAudio, CNAME,
					  sinkAudio, NULL /* we're a server */,
					  streamingMode == STREAMING_MULTICAST_SSM);
		// Note: This starts RTCP running automatically
		sms->addSubsession(PassiveServerMediaSubsession::createNew(*sinkAudio, rtcpAudio));

		// Start streaming:
		sinkAudio->startPlaying(*sourceAudio, NULL, NULL);
    }

	rtspServer->addServerMediaSession(sms);
	{
		struct in_addr dest; dest.s_addr = multicastAddress;
		char *url = rtspServer->rtspURL(sms);
		//char *url2 = inet_ntoa(dest);
		*env << "Mulicast Play this stream using the URL:\n\t" << url << "\n";
		//*env << "2 Mulicast addr:\n\t" << url2 << "\n";
		delete[] url;
	}
  }


  // Begin the LIVE555 event loop:
  env->taskScheduler().doEventLoop(&watchVariable); // does not return


  if( streamingMode!= STREAMING_UNICAST )
  {
	Medium::close(rtcpAudio);
	Medium::close(sinkAudio);
	Medium::close(sourceAudio);
	delete rtpGroupsockAudio;
	delete rtcpGroupsockAudio;

	Medium::close(rtcpVideo);
	Medium::close(sinkVideo);
	Medium::close(sourceVideo);
	delete rtpGroupsockVideo;
	delete rtcpGroupsockVideo;

  }

  Medium::close(rtspServer); // will also reclaim "sms" and its "ServerMediaSubsession"s
  if( MjpegInputDevice != NULL )
  {
	Medium::close(MjpegInputDevice);
  }

  if( H264InputDevice != NULL )
  {
	Medium::close(H264InputDevice);
  }

  if( Mpeg4InputDevice != NULL )
  {
	Medium::close(Mpeg4InputDevice);
  }

  env->reclaim();

  delete scheduler;

  ApproInterfaceExit();

  return 0; // only to prevent compiler warning

}
int main(int argc, char** argv) {
  // Begin by setting up our usage environment:
  TaskScheduler* scheduler = BasicTaskScheduler::createNew();
  env = BasicUsageEnvironment::createNew(*scheduler);

  UserAuthenticationDatabase* authDB = NULL;
#ifdef ACCESS_CONTROL
  // To implement client access control to the RTSP server, do the following:
  authDB = new UserAuthenticationDatabase;
  authDB->addUserRecord("username1", "password1"); // replace these with real strings
  // Repeat the above with each <username>, <password> that you wish to allow
  // access to the server.
#endif

  // Create the RTSP server:
  RTSPServer* rtspServer = RTSPServer::createNew(*env, 8554, authDB);
  if (rtspServer == NULL) {
    *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
    exit(1);
  }

  char const* descriptionString
    = "Session streamed by \"testOnDemandRTSPServer\"";

  // Set up each of the possible streams that can be served by the
  // RTSP server.  Each such stream is implemented using a
  // "ServerMediaSession" object, plus one or more
  // "ServerMediaSubsession" objects for each audio/video substream.

  // A MPEG-4 video elementary stream:
  {
    char const* streamName = "mpeg4ESVideoTest";
    char const* inputFileName = "test.m4e";
    ServerMediaSession* sms
      = ServerMediaSession::createNew(*env, streamName, streamName,
				      descriptionString);
    sms->addSubsession(MPEG4VideoFileServerMediaSubsession
		       ::createNew(*env, inputFileName, reuseFirstSource));
    rtspServer->addServerMediaSession(sms);

    announceStream(rtspServer, sms, streamName, inputFileName);
  }

  // A H.264 video elementary stream:
  {
    char const* streamName = "h264ESVideoTest";
    char const* inputFileName = "test.264";
    ServerMediaSession* sms
      = ServerMediaSession::createNew(*env, streamName, streamName,
				      descriptionString);
    sms->addSubsession(H264VideoFileServerMediaSubsession
		       ::createNew(*env, inputFileName, reuseFirstSource));
    rtspServer->addServerMediaSession(sms);

    announceStream(rtspServer, sms, streamName, inputFileName);
  }

  // A H.265 video elementary stream:
  {
    char const* streamName = "h265ESVideoTest";
    char const* inputFileName = "test.265";
    ServerMediaSession* sms
      = ServerMediaSession::createNew(*env, streamName, streamName,
				      descriptionString);
    sms->addSubsession(H265VideoFileServerMediaSubsession
		       ::createNew(*env, inputFileName, reuseFirstSource));
    rtspServer->addServerMediaSession(sms);

    announceStream(rtspServer, sms, streamName, inputFileName);
  }

  // A MPEG-1 or 2 audio+video program stream:
  {
    char const* streamName = "mpeg1or2AudioVideoTest";
    char const* inputFileName = "test.mpg";
    // NOTE: This *must* be a Program Stream; not an Elementary Stream
    ServerMediaSession* sms
      = ServerMediaSession::createNew(*env, streamName, streamName,
				      descriptionString);
    MPEG1or2FileServerDemux* demux
      = MPEG1or2FileServerDemux::createNew(*env, inputFileName, reuseFirstSource);
    sms->addSubsession(demux->newVideoServerMediaSubsession(iFramesOnly));
    sms->addSubsession(demux->newAudioServerMediaSubsession());
    rtspServer->addServerMediaSession(sms);

    announceStream(rtspServer, sms, streamName, inputFileName);
  }

  // A MPEG-1 or 2 video elementary stream:
  {
    char const* streamName = "mpeg1or2ESVideoTest";
    char const* inputFileName = "testv.mpg";
    // NOTE: This *must* be a Video Elementary Stream; not a Program Stream
    ServerMediaSession* sms
      = ServerMediaSession::createNew(*env, streamName, streamName,
				      descriptionString);
    sms->addSubsession(MPEG1or2VideoFileServerMediaSubsession
	       ::createNew(*env, inputFileName, reuseFirstSource, iFramesOnly));
    rtspServer->addServerMediaSession(sms);

    announceStream(rtspServer, sms, streamName, inputFileName);
  }

  // A MP3 audio stream (actually, any MPEG-1 or 2 audio file will work):
  // To stream using 'ADUs' rather than raw MP3 frames, uncomment the following:
//#define STREAM_USING_ADUS 1
  // To also reorder ADUs before streaming, uncomment the following:
//#define INTERLEAVE_ADUS 1
  // (For more information about ADUs and interleaving,
  //  see <http://www.live555.com/rtp-mp3/>)
  {
    char const* streamName = "mp3AudioTest";
    char const* inputFileName = "test.mp3";
    ServerMediaSession* sms
      = ServerMediaSession::createNew(*env, streamName, streamName,
				      descriptionString);
    Boolean useADUs = False;
    Interleaving* interleaving = NULL;
#ifdef STREAM_USING_ADUS
    useADUs = True;
#ifdef INTERLEAVE_ADUS
    unsigned char interleaveCycle[] = {0,2,1,3}; // or choose your own...
    unsigned const interleaveCycleSize
      = (sizeof interleaveCycle)/(sizeof (unsigned char));
    interleaving = new Interleaving(interleaveCycleSize, interleaveCycle);
#endif
#endif
    sms->addSubsession(MP3AudioFileServerMediaSubsession
		       ::createNew(*env, inputFileName, reuseFirstSource,
				   useADUs, interleaving));
    rtspServer->addServerMediaSession(sms);

    announceStream(rtspServer, sms, streamName, inputFileName);
  }

  // A WAV audio stream:
  {
    char const* streamName = "wavAudioTest";
    char const* inputFileName = "test.wav";
    ServerMediaSession* sms
      = ServerMediaSession::createNew(*env, streamName, streamName,
				      descriptionString);
    // To convert 16-bit PCM data to 8-bit u-law, prior to streaming,
    // change the following to True:
    Boolean convertToULaw = False;
    sms->addSubsession(WAVAudioFileServerMediaSubsession
	       ::createNew(*env, inputFileName, reuseFirstSource, convertToULaw));
    rtspServer->addServerMediaSession(sms);

    announceStream(rtspServer, sms, streamName, inputFileName);
  }

  // An AMR audio stream:
  {
    char const* streamName = "amrAudioTest";
    char const* inputFileName = "test.amr";
    ServerMediaSession* sms
      = ServerMediaSession::createNew(*env, streamName, streamName,
				      descriptionString);
    sms->addSubsession(AMRAudioFileServerMediaSubsession
		       ::createNew(*env, inputFileName, reuseFirstSource));
    rtspServer->addServerMediaSession(sms);

    announceStream(rtspServer, sms, streamName, inputFileName);
  }

  // A 'VOB' file (e.g., from an unencrypted DVD):
  {
    char const* streamName = "vobTest";
    char const* inputFileName = "test.vob";
    ServerMediaSession* sms
      = ServerMediaSession::createNew(*env, streamName, streamName,
				      descriptionString);
    // Note: VOB files are MPEG-2 Program Stream files, but using AC-3 audio
    MPEG1or2FileServerDemux* demux
      = MPEG1or2FileServerDemux::createNew(*env, inputFileName, reuseFirstSource);
    sms->addSubsession(demux->newVideoServerMediaSubsession(iFramesOnly));
    sms->addSubsession(demux->newAC3AudioServerMediaSubsession());
    rtspServer->addServerMediaSession(sms);

    announceStream(rtspServer, sms, streamName, inputFileName);
  }

  // A MPEG-2 Transport Stream:
  {
    char const* streamName = "mpeg2TransportStreamTest";
    char const* inputFileName = "test.ts";
    char const* indexFileName = "test.tsx";
    ServerMediaSession* sms
      = ServerMediaSession::createNew(*env, streamName, streamName,
				      descriptionString);
    sms->addSubsession(MPEG2TransportFileServerMediaSubsession
		       ::createNew(*env, inputFileName, indexFileName, reuseFirstSource));
    rtspServer->addServerMediaSession(sms);

    announceStream(rtspServer, sms, streamName, inputFileName);
  }

  // An AAC audio stream (ADTS-format file):
  {
    char const* streamName = "aacAudioTest";
    char const* inputFileName = "test.aac";
    ServerMediaSession* sms
      = ServerMediaSession::createNew(*env, streamName, streamName,
				      descriptionString);
    sms->addSubsession(ADTSAudioFileServerMediaSubsession
		       ::createNew(*env, inputFileName, reuseFirstSource));
    rtspServer->addServerMediaSession(sms);

    announceStream(rtspServer, sms, streamName, inputFileName);
  }

  // A DV video stream:
  {
    // First, make sure that the RTPSinks' buffers will be large enough to handle the huge size of DV frames (as big as 288000).
    OutPacketBuffer::maxSize = 2000000;

    char const* streamName = "dvVideoTest";
    char const* inputFileName = "test.dv";
    ServerMediaSession* sms
      = ServerMediaSession::createNew(*env, streamName, streamName,
				      descriptionString);
    sms->addSubsession(DVVideoFileServerMediaSubsession
		       ::createNew(*env, inputFileName, reuseFirstSource));
    rtspServer->addServerMediaSession(sms);

    announceStream(rtspServer, sms, streamName, inputFileName);
  }

  // A AC3 video elementary stream:
  {
    char const* streamName = "ac3AudioTest";
    char const* inputFileName = "test.ac3";
    ServerMediaSession* sms
      = ServerMediaSession::createNew(*env, streamName, streamName,
				      descriptionString);

    sms->addSubsession(AC3AudioFileServerMediaSubsession
		       ::createNew(*env, inputFileName, reuseFirstSource));

    rtspServer->addServerMediaSession(sms);

    announceStream(rtspServer, sms, streamName, inputFileName);
  }

  // A Matroska ('.mkv') file, with video+audio+subtitle streams:
  {
    char const* streamName = "matroskaFileTest";
    char const* inputFileName = "test.mkv";
    ServerMediaSession* sms
      = ServerMediaSession::createNew(*env, streamName, streamName,
				      descriptionString);

    newDemuxWatchVariable = 0;
    MatroskaFileServerDemux::createNew(*env, inputFileName, onMatroskaDemuxCreation, NULL);
    env->taskScheduler().doEventLoop(&newDemuxWatchVariable);

    Boolean sessionHasTracks = False;
    ServerMediaSubsession* smss;
    while ((smss = matroskaDemux->newServerMediaSubsession()) != NULL) {
      sms->addSubsession(smss);
      sessionHasTracks = True;
    }
    if (sessionHasTracks) {
      rtspServer->addServerMediaSession(sms);
    }
    // otherwise, because the stream has no tracks, we don't add a ServerMediaSession to the server.

    announceStream(rtspServer, sms, streamName, inputFileName);
  }

  // A WebM ('.webm') file, with video(VP8)+audio(Vorbis) streams:
  // (Note: ".webm' files are special types of Matroska files, so we use the same code as the Matroska ('.mkv') file code above.)
  {
    char const* streamName = "webmFileTest";
    char const* inputFileName = "test.webm";
    ServerMediaSession* sms
      = ServerMediaSession::createNew(*env, streamName, streamName,
				      descriptionString);

    newDemuxWatchVariable = 0;
    MatroskaFileServerDemux::createNew(*env, inputFileName, onMatroskaDemuxCreation, NULL);
    env->taskScheduler().doEventLoop(&newDemuxWatchVariable);

    Boolean sessionHasTracks = False;
    ServerMediaSubsession* smss;
    while ((smss = matroskaDemux->newServerMediaSubsession()) != NULL) {
      sms->addSubsession(smss);
      sessionHasTracks = True;
    }
    if (sessionHasTracks) {
      rtspServer->addServerMediaSession(sms);
    }
    // otherwise, because the stream has no tracks, we don't add a ServerMediaSession to the server.

    announceStream(rtspServer, sms, streamName, inputFileName);
  }

  // An Ogg ('.ogg') file, with video and/or audio streams:
  {
    char const* streamName = "oggFileTest";
    char const* inputFileName = "test.ogg";
    ServerMediaSession* sms
      = ServerMediaSession::createNew(*env, streamName, streamName,
				      descriptionString);

    newDemuxWatchVariable = 0;
    OggFileServerDemux::createNew(*env, inputFileName, onOggDemuxCreation, NULL);
    env->taskScheduler().doEventLoop(&newDemuxWatchVariable);

    Boolean sessionHasTracks = False;
    ServerMediaSubsession* smss;
    while ((smss = oggDemux->newServerMediaSubsession()) != NULL) {
      sms->addSubsession(smss);
      sessionHasTracks = True;
    }
    if (sessionHasTracks) {
      rtspServer->addServerMediaSession(sms);
    }
    // otherwise, because the stream has no tracks, we don't add a ServerMediaSession to the server.

    announceStream(rtspServer, sms, streamName, inputFileName);
  }

  // An Opus ('.opus') audio file:
  // (Note: ".opus' files are special types of Ogg files, so we use the same code as the Ogg ('.ogg') file code above.)
  {
    char const* streamName = "opusFileTest";
    char const* inputFileName = "test.opus";
    ServerMediaSession* sms
      = ServerMediaSession::createNew(*env, streamName, streamName,
				      descriptionString);

    newDemuxWatchVariable = 0;
    OggFileServerDemux::createNew(*env, inputFileName, onOggDemuxCreation, NULL);
    env->taskScheduler().doEventLoop(&newDemuxWatchVariable);

    Boolean sessionHasTracks = False;
    ServerMediaSubsession* smss;
    while ((smss = oggDemux->newServerMediaSubsession()) != NULL) {
      sms->addSubsession(smss);
      sessionHasTracks = True;
    }
    if (sessionHasTracks) {
      rtspServer->addServerMediaSession(sms);
    }
    // otherwise, because the stream has no tracks, we don't add a ServerMediaSession to the server.

    announceStream(rtspServer, sms, streamName, inputFileName);
  }

  // A MPEG-2 Transport Stream, coming from a live UDP (raw-UDP or RTP/UDP) source:
  {
    char const* streamName = "mpeg2TransportStreamFromUDPSourceTest";
    char const* inputAddressStr = "239.255.42.42";
        // This causes the server to take its input from the stream sent by the "testMPEG2TransportStreamer" demo application.
        // (Note: If the input UDP source is unicast rather than multicast, then change this to NULL.)
    portNumBits const inputPortNum = 1234;
        // This causes the server to take its input from the stream sent by the "testMPEG2TransportStreamer" demo application.
    Boolean const inputStreamIsRawUDP = False;
    ServerMediaSession* sms
      = ServerMediaSession::createNew(*env, streamName, streamName,
				      descriptionString);
    sms->addSubsession(MPEG2TransportUDPServerMediaSubsession
		       ::createNew(*env, inputAddressStr, inputPortNum, inputStreamIsRawUDP));
    rtspServer->addServerMediaSession(sms);

    char* url = rtspServer->rtspURL(sms);
    *env << "\n\"" << streamName << "\" stream, from a UDP Transport Stream input source \n\t(";
    if (inputAddressStr != NULL) {
      *env << "IP multicast address " << inputAddressStr << ",";
    } else {
      *env << "unicast;";
    }
    *env << " port " << inputPortNum << ")\n";
    *env << "Play this stream using the URL \"" << url << "\"\n";
    delete[] url;
  }

  // Also, attempt to create a HTTP server for RTSP-over-HTTP tunneling.
  // Try first with the default HTTP port (80), and then with the alternative HTTP
  // port numbers (8000 and 8080).

  if (rtspServer->setUpTunnelingOverHTTP(80) || rtspServer->setUpTunnelingOverHTTP(8000) || rtspServer->setUpTunnelingOverHTTP(8080)) {
    *env << "\n(We use port " << rtspServer->httpServerPortNum() << " for optional RTSP-over-HTTP tunneling.)\n";
  } else {
    *env << "\n(RTSP-over-HTTP tunneling is not available.)\n";
  }

  env->taskScheduler().doEventLoop(); // does not return

  return 0; // only to prevent compiler warning
}
int main(int argc, char** argv) {
  // Begin by setting up our usage environment:
  TaskScheduler* scheduler = BasicTaskScheduler::createNew();
  env = BasicUsageEnvironment::createNew(*scheduler);

  UserAuthenticationDatabase* authDB = NULL;
#ifdef ACCESS_CONTROL
  // To implement client access control to the RTSP server, do the following:
  authDB = new UserAuthenticationDatabase;
  authDB->addUserRecord("username1", "password1"); // replace these with real strings
  // Repeat the above with each <username>, <password> that you wish to allow
  // access to the server.
#endif

  // Create the RTSP server:
  RTSPServer* rtspServer = RTSPServer::createNew(*env, 8554, authDB);
  if (rtspServer == NULL) {
    *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
    exit(1);
  }

  char const* descriptionString
    = "Session streamed by \"testOnDemandRTSPServer\"";

  // Set up each of the possible streams that can be served by the
  // RTSP server.  Each such stream is implemented using a
  // "ServerMediaSession" object, plus one or more
  // "ServerMediaSubsession" objects for each audio/video substream.

  // A MPEG-4 video elementary stream:
  {
    char const* streamName = "mpeg4ESVideoTest";
    char const* inputFileName = "test.m4e";
    ServerMediaSession* sms
      = ServerMediaSession::createNew(*env, streamName, streamName,
				      descriptionString);
    sms->addSubsession(MPEG4VideoFileServerMediaSubsession
		       ::createNew(*env, inputFileName, reuseFirstSource));
    rtspServer->addServerMediaSession(sms);

    announceStream(rtspServer, sms, streamName, inputFileName);
  }
  // A H264 video elementary stream
  {
 /*   char const* streamName = "h264VideoTest";
    char const* inputFileName = "test.h264";
    ServerMediaSession* sms
      = ServerMediaSession::createNew(*env, streamName, streamName,
                                      descriptionString);
    sms->addSubsession(H264VideoFileServerMediaSubsession
                       ::createNew(*env, inputFileName, reuseFirstSource));
    rtspServer->addServerMediaSession(sms);
    announceStream(rtspServer, sms, streamName, inputFileName);
  */
  }
  // A MPEG-1 or 2 audio+video program stream:
  {
    char const* streamName = "mpeg1or2AudioVideoTest";
    char const* inputFileName = "test.mpg";
    // NOTE: This *must* be a Program Stream; not an Elementary Stream
    ServerMediaSession* sms
      = ServerMediaSession::createNew(*env, streamName, streamName,
				      descriptionString);
    MPEG1or2FileServerDemux* demux
      = MPEG1or2FileServerDemux::createNew(*env, inputFileName, reuseFirstSource);
    sms->addSubsession(demux->newVideoServerMediaSubsession(iFramesOnly));
    sms->addSubsession(demux->newAudioServerMediaSubsession());
    rtspServer->addServerMediaSession(sms);

    announceStream(rtspServer, sms, streamName, inputFileName);
  }

  // A MPEG-1 or 2 video elementary stream:
  {
    char const* streamName = "mpeg1or2ESVideoTest";
    char const* inputFileName = "testv.mpg";
    // NOTE: This *must* be a Video Elementary Stream; not a Program Stream
    ServerMediaSession* sms
      = ServerMediaSession::createNew(*env, streamName, streamName,
				      descriptionString);
    sms->addSubsession(MPEG1or2VideoFileServerMediaSubsession
	       ::createNew(*env, inputFileName, reuseFirstSource, iFramesOnly));
    rtspServer->addServerMediaSession(sms);

    announceStream(rtspServer, sms, streamName, inputFileName);
  }

  // A MP3 audio stream (actually, any MPEG-1 or 2 audio file will work):
  // To stream using 'ADUs' rather than raw MP3 frames, uncomment the following:
//#define STREAM_USING_ADUS 1
  // To also reorder ADUs before streaming, uncomment the following:
//#define INTERLEAVE_ADUS 1
  // (For more information about ADUs and interleaving,
  //  see <http://www.live555.com/rtp-mp3/>)
  {
    char const* streamName = "mp3AudioTest";
    char const* inputFileName = "test.mp3";
    ServerMediaSession* sms
      = ServerMediaSession::createNew(*env, streamName, streamName,
				      descriptionString);
    Boolean useADUs = False;
    Interleaving* interleaving = NULL;
#ifdef STREAM_USING_ADUS
    useADUs = True;
#ifdef INTERLEAVE_ADUS
    unsigned char interleaveCycle[] = {0,2,1,3}; // or choose your own...
    unsigned const interleaveCycleSize
      = (sizeof interleaveCycle)/(sizeof (unsigned char));
    interleaving = new Interleaving(interleaveCycleSize, interleaveCycle);
#endif
#endif
    sms->addSubsession(MP3AudioFileServerMediaSubsession
		       ::createNew(*env, inputFileName, reuseFirstSource,
				   useADUs, interleaving));
    rtspServer->addServerMediaSession(sms);

    announceStream(rtspServer, sms, streamName, inputFileName);
  }

  // A WAV audio stream:
  {
    char const* streamName = "wavAudioTest";
    char const* inputFileName = "test.wav";
    ServerMediaSession* sms
      = ServerMediaSession::createNew(*env, streamName, streamName,
				      descriptionString);
    // To convert 16-bit PCM data to 8-bit u-law, prior to streaming,
    // change the following to True:
    Boolean convertToULaw = False;
    sms->addSubsession(WAVAudioFileServerMediaSubsession
	       ::createNew(*env, inputFileName, reuseFirstSource, convertToULaw));
    rtspServer->addServerMediaSession(sms);

    announceStream(rtspServer, sms, streamName, inputFileName);
  }

  // An AMR audio stream:
  {
    char const* streamName = "amrAudioTest";
    char const* inputFileName = "test.amr";
    ServerMediaSession* sms
      = ServerMediaSession::createNew(*env, streamName, streamName,
				      descriptionString);
    sms->addSubsession(AMRAudioFileServerMediaSubsession
		       ::createNew(*env, inputFileName, reuseFirstSource));
    rtspServer->addServerMediaSession(sms);

    announceStream(rtspServer, sms, streamName, inputFileName);
  }

  // A 'VOB' file (e.g., from an unencrypted DVD):
  {
    char const* streamName = "vobTest";
    char const* inputFileName = "test.vob";
    ServerMediaSession* sms
      = ServerMediaSession::createNew(*env, streamName, streamName,
				      descriptionString);
    // Note: VOB files are MPEG-2 Program Stream files, but using AC-3 audio
    MPEG1or2FileServerDemux* demux
      = MPEG1or2FileServerDemux::createNew(*env, inputFileName, reuseFirstSource);
    sms->addSubsession(demux->newVideoServerMediaSubsession(iFramesOnly));
    sms->addSubsession(demux->newAC3AudioServerMediaSubsession());
    rtspServer->addServerMediaSession(sms);

    announceStream(rtspServer, sms, streamName, inputFileName);
  }

  // A MPEG-2 Transport Stream:
  {
    char const* streamName = "mpeg2TransportStreamTest";
    char const* inputFileName = "test.ts";
    char const* indexFileName = "test.tsx";
    ServerMediaSession* sms
      = ServerMediaSession::createNew(*env, streamName, streamName,
				      descriptionString);
    sms->addSubsession(MPEG2TransportFileServerMediaSubsession
		       ::createNew(*env, inputFileName, indexFileName, reuseFirstSource));
    rtspServer->addServerMediaSession(sms);

    announceStream(rtspServer, sms, streamName, inputFileName);
  }

  // An AAC audio stream (ADTS-format file):
  {
    char const* streamName = "aacAudioTest";
    char const* inputFileName = "test.aac";
    ServerMediaSession* sms
      = ServerMediaSession::createNew(*env, streamName, streamName,
				      descriptionString);
    sms->addSubsession(ADTSAudioFileServerMediaSubsession
		       ::createNew(*env, inputFileName, reuseFirstSource));
    rtspServer->addServerMediaSession(sms);

    announceStream(rtspServer, sms, streamName, inputFileName);
  }

  // A DV video stream:
  {
    // First, make sure that the RTPSinks' buffers will be large enough to handle the huge size of DV frames (as big as 288000).
    OutPacketBuffer::maxSize = 300000;

    char const* streamName = "dvVideoTest";
    char const* inputFileName = "test.dv";
    ServerMediaSession* sms
      = ServerMediaSession::createNew(*env, streamName, streamName,
				      descriptionString);
    sms->addSubsession(DVVideoFileServerMediaSubsession
		       ::createNew(*env, inputFileName, reuseFirstSource));
    rtspServer->addServerMediaSession(sms);

    announceStream(rtspServer, sms, streamName, inputFileName);
  }

  env->taskScheduler().doEventLoop(); // does not return

  return 0; // only to prevent compiler warning
}
Exemple #18
0
int main(int argc, char ** argv)
{
    // Initialise settings from configuration files.
    dvswitch_read_config(handle_config);

    // Parse arguments.

    int opt;
    while ((opt = getopt_long(argc, argv, "c:v", options, NULL)) != -1)
    {
	switch (opt)
	{
	case 'c':
	    fw_port_name = optarg;
	    break;
	case 'h':
	    listen_host = optarg;
	    break;
	case 'p':
	    listen_port = optarg;
	    break;
	case 'v':
	    verbose = true;
	    break;
	case 'H': // --help
	    usage(argv[0]);
	    return 0;
	default:
	    usage(argv[0]);
	    return 2;
	}
    }

    if (optind != argc)
	fw_port_name = argv[optind++];

    if (optind != argc)
    {
	fprintf(stderr, "%s: excess argument \"%s\"\n",
		argv[0], argv[optind]);
	usage(argv[0]);
	return 2;
    }

    // Catch SIGINT.
    struct sigaction sigint_action;
    sigint_action.sa_handler = handle_sigint;
    sigemptyset(&sigint_action.sa_mask);
    sigint_action.sa_flags = SA_RESTART;
    if (sigaction(SIGINT, &sigint_action, NULL))
    {
	perror("ERROR: sigaction");
	return 1;
    }

    // Set up liveMedia framework
    BasicTaskScheduler * sched = BasicTaskScheduler::createNew();
    BasicUsageEnvironment * env = BasicUsageEnvironment::createNew(*sched);
    RTSPServer * server = RTSPServer::createNew(*env, 8554, NULL);
    if (server == NULL)
    {
	*env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
	return 1;
    }
    OutPacketBuffer::maxSize = DIF_MAX_FRAME_SIZE;

    // Set up session
    std::string stream_name("firewire");
    stream_name.append(fw_port_name);
    std::string stream_desc("DV stream from Firewire port ");
    stream_desc.append(fw_port_name);
    ServerMediaSession * sms =
	ServerMediaSession::createNew(*env, stream_name.c_str(),
				      stream_desc.c_str(), stream_desc.c_str());
    sms->addSubsession(new firewire_subsession(*env, fw_port_name));
    server->addServerMediaSession(sms);

    // Loop until SIGINT received
    if (verbose)
	printf("INFO: Serving at rtsp://*:8554/%s\n", stream_name.c_str());
    sched->doEventLoop(&received_sigint);

    env->reclaim();

    return 0;
}
Exemple #19
0
int main(int argc, char const** argv) {
  // Begin by setting up our usage environment:
  TaskScheduler* scheduler = BasicTaskScheduler::createNew();
  env = BasicUsageEnvironment::createNew(*scheduler);

  // Parse command-line options:
  // (Unfortunately we can't use getopt() here; Windoze doesn't have it)
  programName = argv[0];
  while (argc > 2) {
    char const* const opt = argv[1];
    if (opt[0] != '-') break;
    switch (opt[1]) {

    case 'i': { // transmit video I-frames only
      iFramesOnly = True;
      break;
    }

    case 'a': { // transmit audio, but not video
      mediaToStream &=~ VOB_VIDEO;
      break;
    }

    case 'v': { // transmit video, but not audio
      mediaToStream &=~ VOB_AUDIO;
      break;
    }

    case 'p': { // specify port number for built-in RTSP server
      int portArg;
      if (sscanf(argv[2], "%d", &portArg) != 1) {
        usage();
      }
      if (portArg <= 0 || portArg >= 65536) {
        *env << "bad port number: " << portArg
	     << " (must be in the range (0,65536))\n";
        usage();
      }
      rtspServerPortNum = (unsigned short)portArg;
      ++argv; --argc;
      break;
    }

    default: {
      usage();
      break;
    }
    }

    ++argv; --argc;
  }
  if (argc < 2) usage();
  if (mediaToStream == 0) {
    *env << "The -a and -v flags cannot both be used!\n";
    usage();
  }
  if (iFramesOnly && (mediaToStream&VOB_VIDEO) == 0) {
    *env << "Warning: Because we're not streaming video, the -i flag has no effect.\n";
  }

  inputFileNames = &argv[1];
  curInputFileName = inputFileNames;

  // Create 'groupsocks' for RTP and RTCP:
  struct in_addr destinationAddress;
  destinationAddress.s_addr = chooseRandomIPv4SSMAddress(*env);

  const unsigned short rtpPortNumAudio = 4444;
  const unsigned short rtcpPortNumAudio = rtpPortNumAudio+1;
  const unsigned short rtpPortNumVideo = 8888;
  const unsigned short rtcpPortNumVideo = rtpPortNumVideo+1;
  const unsigned char ttl = 255;

  const Port rtpPortAudio(rtpPortNumAudio);
  const Port rtcpPortAudio(rtcpPortNumAudio);
  const Port rtpPortVideo(rtpPortNumVideo);
  const Port rtcpPortVideo(rtcpPortNumVideo);

  const unsigned maxCNAMElen = 100;
  unsigned char CNAME[maxCNAMElen+1];
  gethostname((char*)CNAME, maxCNAMElen);
  CNAME[maxCNAMElen] = '\0'; // just in case

  if (mediaToStream&VOB_AUDIO) {
    rtpGroupsockAudio
      = new Groupsock(*env, destinationAddress, rtpPortAudio, ttl);
    rtpGroupsockAudio->multicastSendOnly(); // because we're a SSM source

    // Create an 'AC3 Audio RTP' sink from the RTP 'groupsock':
    audioSink
      = AC3AudioRTPSink::createNew(*env, rtpGroupsockAudio, 96, 0);
    // set the RTP timestamp frequency 'for real' later

    // Create (and start) a 'RTCP instance' for this RTP sink:
    rtcpGroupsockAudio
      = new Groupsock(*env, destinationAddress, rtcpPortAudio, ttl);
    rtcpGroupsockAudio->multicastSendOnly(); // because we're a SSM source
    const unsigned estimatedSessionBandwidthAudio
      = 160; // in kbps; for RTCP b/w share
    audioRTCP = RTCPInstance::createNew(*env, rtcpGroupsockAudio,
					estimatedSessionBandwidthAudio, CNAME,
					audioSink, NULL /* we're a server */,
					True /* we're a SSM source */);
    // Note: This starts RTCP running automatically
  }

  if (mediaToStream&VOB_VIDEO) {
    rtpGroupsockVideo
      = new Groupsock(*env, destinationAddress, rtpPortVideo, ttl);
    rtpGroupsockVideo->multicastSendOnly(); // because we're a SSM source

    // Create a 'MPEG Video RTP' sink from the RTP 'groupsock':
    videoSink = MPEG1or2VideoRTPSink::createNew(*env, rtpGroupsockVideo);

    // Create (and start) a 'RTCP instance' for this RTP sink:
    rtcpGroupsockVideo
      = new Groupsock(*env, destinationAddress, rtcpPortVideo, ttl);
    rtcpGroupsockVideo->multicastSendOnly(); // because we're a SSM source
    const unsigned estimatedSessionBandwidthVideo
      = 4500; // in kbps; for RTCP b/w share
    videoRTCP = RTCPInstance::createNew(*env, rtcpGroupsockVideo,
					estimatedSessionBandwidthVideo, CNAME,
					videoSink, NULL /* we're a server */,
					True /* we're a SSM source */);
    // Note: This starts RTCP running automatically
  }

  if (rtspServer == NULL) {
    rtspServer = RTSPServer::createNew(*env, rtspServerPortNum);
    if (rtspServer == NULL) {
      *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
      *env << "To change the RTSP server's port number, use the \"-p <port number>\" option.\n";
      exit(1);
    }
    ServerMediaSession* sms
      = ServerMediaSession::createNew(*env, "vobStream", *curInputFileName,
	     "Session streamed by \"vobStreamer\"", True /*SSM*/);
    if (audioSink != NULL) sms->addSubsession(PassiveServerMediaSubsession::createNew(*audioSink, audioRTCP));
    if (videoSink != NULL) sms->addSubsession(PassiveServerMediaSubsession::createNew(*videoSink, videoRTCP));
    rtspServer->addServerMediaSession(sms);

    *env << "Created RTSP server.\n";

    // Display our "rtsp://" URL, for clients to connect to:
    char* url = rtspServer->rtspURL(sms);
    *env << "Access this stream using the URL:\n\t" << url << "\n";
    delete[] url;
  }

  // Finally, start the streaming:
  *env << "Beginning streaming...\n";
  play();

  env->taskScheduler().doEventLoop(); // does not return

  return 0; // only to prevent compiler warning
}
Exemple #20
0
int main( int argc, char **argv )
{
	//int ret = 0;
	PTZControlInit();
	demo_setting * ext_gSettings = NULL;
	
	// Allocate the "global" settings
	ext_gSettings = (demo_setting*)malloc( sizeof( demo_setting ) );
	if ( NULL == ext_gSettings ) {
		printf( "main::out of memory!\n" );
		return -1;
	}
	
	sig_init();
    atexit(appExit);
	//init the setting struct
	Settings_Initialize( ext_gSettings );

	read_Parse(ext_gSettings);
	//printf("video type = %d \n", ext_gSettings->video_types);
	//...do your job

	//close the led
	setled_off();
	//init dma memory
	akuio_pmem_init();
	encode_init();
	printf("encode_init ok\n");
	//open camera
	camera_open(ext_gSettings->width, ext_gSettings->height);
	printf("camera_open ok\n");

	//encode_open
	T_ENC_INPUT encInput;
	encInput.width = ext_gSettings->width;			//实际编码图像的宽度,能被4整除
	encInput.height = ext_gSettings->height;			//实际编码图像的长度,能被2整除
	encInput.kbpsmode = ext_gSettings->kbpsmode; 
	encInput.qpHdr = ext_gSettings->qpHdr;			//初始的QP的值
	encInput.iqpHdr = ext_gSettings->iqpHdr;			//初始的QP的值
	encInput.bitPerSecond = ext_gSettings->bitPerSecond;	//目标bps
	encInput.minQp = ext_gSettings->minQp;
	encInput.maxQp = ext_gSettings->maxQp;
	encInput.framePerSecond = ext_gSettings->framePerSecond;
	encInput.video_tytes = ext_gSettings->video_types;
	encode_open(&encInput);
	printf("encode_open ok\n");

	//set mux
	mux_input.rec_path = ext_gSettings->rec_path;
	mux_input.m_MediaRecType = MEDIALIB_REC_AVI_NORMAL;

	if (ext_gSettings->bhasAudio)
	{
		bHasAudio = 1;
		//mux_input.m_bCaptureAudio = 1;
	}
	else
	{
		bHasAudio = 0;
		//mux_input.m_bCaptureAudio = 0;
	}
	mux_input.m_bCaptureAudio = 1;
	//mux video
	if(parse.format2 == 0)
	{
		mux_input.m_eVideoType = MEDIALIB_VIDEO_H264;
	}
	else if(parse.format2 == 1)
	{
		mux_input.m_eVideoType = MEDIALIB_VIDEO_MJPEG;
	}
	mux_input.m_nWidth = parse.width2;
	mux_input.m_nHeight = parse.height2;
	
	//mux audio
	mux_input.m_eAudioType = MEDIALIB_AUDIO_AAC;
	mux_input.m_nSampleRate = 8000;
	//mux_input.abitsrate = ext_gSettings->abitsrate;

	printf("mux_open ok\n");

	//if (ext_gSettings->bhasAudio)
	{
		T_AUDIO_INPUT audioInput;
		audioInput.enc_type = (AUDIO_ENCODE_TYPE_CC)ext_gSettings->audioType;
		audioInput.nBitsRate = ext_gSettings->abitsrate;
		audioInput.nBitsPerSample = 16;
		audioInput.nChannels = 1;
		audioInput.nSampleRate = ext_gSettings->aSamplerate;
		audio_open(&audioInput);
		printf("audio_open ok\n");
		audio_start();
	}

	//start ftp server
	//startFTPSrv();

	Init_photograph();
	//PTZControlInit();
	//start video process
	video_process_start();
	InitMotionDetect();
	DemuxForLiveSetCallBack();
	TaskScheduler* scheduler = BasicTaskScheduler::createNew();
	env = BasicUsageEnvironment::createNew(*scheduler);
	UserAuthenticationDatabase* authDB = NULL;
#ifdef ACCESS_CONTROL
	// To implement client access control to the RTSP server, do the following:
	authDB = new UserAuthenticationDatabase;
	authDB->addUserRecord("username1", "password1"); // replace these with real strings
	// Repeat the above with each <username>, <password> that you wish to allow
	// access to the server.
#endif
       
	// Create the RTSP server:
	RTSPServer* rtspServer = AKRTSPServer::createNew(*env, RTSPPORT, authDB);
	if (rtspServer == NULL) 
	{
		*env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
		appExit();
		exit(1);
	}

	char const* descriptionString = "Session streamed by \"testOnDemandRTSPServer\"";

	// Set up each of the possible streams that can be served by the
	// RTSP server.  Each such stream is implemented using a
	// "ServerMediaSession" object, plus one or more
	// "ServerMediaSubsession" objects for each audio/video substream.

	int vsIndex = 0;
	VIDEO_MODE vm[2] = {VIDEO_MODE_VGA,VIDEO_MODE_VGA};
	const char* streamName1 = "vs1";
	const char* streamName2 = "vs2";
	((AKRTSPServer*)rtspServer)->SetStreamName(streamName1, streamName2);	
	
	if(ext_gSettings->video_types == 1)
	{
		if(ext_gSettings->width == 640)
		{
			vm[0] = VIDEO_MODE_VGA;
		}
		else if(ext_gSettings->width == 320)
		{
			vm[0] = VIDEO_MODE_QVGA;
		}
		else if(ext_gSettings->width == 720)
		{
			vm[0] = VIDEO_MODE_D1;
		}
		
		AKIPCMJPEGFramedSource* ipcMJPEGSourcecam = NULL;
		ServerMediaSession* smsMJPEGcam = ServerMediaSession::createNew(*env, streamName1, 0, descriptionString);
		AKIPCMJPEGOnDemandMediaSubsession* subsMJPEGcam = AKIPCMJPEGOnDemandMediaSubsession::createNew(*env,ipcMJPEGSourcecam, ext_gSettings->width, ext_gSettings->height, vsIndex);
		smsMJPEGcam->addSubsession(subsMJPEGcam); 
		subsMJPEGcam->getframefunc = video_process_get_buf;
		subsMJPEGcam->setledstart = setled_view_start;
		subsMJPEGcam->setledexit = setled_view_stop;
		
		if(bHasAudio)
			smsMJPEGcam->addSubsession(AKIPCAACAudioOnDemandMediaSubsession::createNew(*env,True,getAACBuf, vsIndex));

		rtspServer->addServerMediaSession(smsMJPEGcam);
		char* url1 = rtspServer->rtspURL(smsMJPEGcam);
		*env << "using url \"" << url1 <<"\"\n";
		delete[] url1;
	}
	else if(ext_gSettings->video_types == 0)
	{
		if(ext_gSettings->width == 1280)
		{
			vm[0] = VIDEO_MODE_720P;
		}
		else if(ext_gSettings->width == 640)
		{
			vm[0] = VIDEO_MODE_VGA;
		}
		else if(ext_gSettings->width == 320)
		{
			vm[0] = VIDEO_MODE_QVGA;
		}
		else if(ext_gSettings->width == 720)
		{
			vm[0] = VIDEO_MODE_D1;
		}
		
		AKIPCH264FramedSource* ipcSourcecam = NULL;
		ServerMediaSession* smscam = ServerMediaSession::createNew(*env, streamName1, 0, descriptionString);
		AKIPCH264OnDemandMediaSubsession* subscam = AKIPCH264OnDemandMediaSubsession::createNew(*env,ipcSourcecam, 0, vsIndex);
		smscam->addSubsession(subscam);
		if(bHasAudio)
			smscam->addSubsession(AKIPCAACAudioOnDemandMediaSubsession::createNew(*env,True,getAACBuf, vsIndex));
	
		subscam->getframefunc = video_process_get_buf;
		subscam->setledstart = setled_view_start;
		subscam->setledexit = setled_view_stop;

		rtspServer->addServerMediaSession(smscam);
		char* url1 = rtspServer->rtspURL(smscam);
		*env << "using url \"" << url1 <<"\"\n";
		delete[] url1;
	}

	vsIndex = 1;
	
	if(parse.format2 == 0)//264
	{
		if(parse.width2 == 1280)
		{
			vm[1] = VIDEO_MODE_720P;
		}
		else if(parse.width2 == 640)
		{
			vm[1] = VIDEO_MODE_VGA;
		}
		else if(parse.width2 == 320)
		{
			vm[1] = VIDEO_MODE_QVGA;
		}
		else if(parse.width2 == 720)
		{
			vm[1] = VIDEO_MODE_D1;
		}
		
		AKIPCH264FramedSource* ipcSourcecam = NULL;
		ServerMediaSession* smscam = ServerMediaSession::createNew(*env, streamName2, 0, descriptionString);
		AKIPCH264OnDemandMediaSubsession* subscam = AKIPCH264OnDemandMediaSubsession::createNew(*env,ipcSourcecam, 0, vsIndex);
		smscam->addSubsession(subscam);
		if(bHasAudio)
			smscam->addSubsession(AKIPCAACAudioOnDemandMediaSubsession::createNew(*env,True,getAACBuf, vsIndex));
	
		subscam->getframefunc = video_process_get_buf;
		subscam->setledstart = setled_view_start;
		subscam->setledexit = setled_view_stop;

		rtspServer->addServerMediaSession(smscam);
		char* url2 = rtspServer->rtspURL(smscam);
		*env << "using url \"" << url2 <<"\"\n";
		delete[] url2;
	}
	else if(parse.format2 == 1)//mjpeg
	{
		if(parse.width2 == 640)
		{
			vm[1] = VIDEO_MODE_VGA;
		}
		else if(parse.width2 == 320)
		{
			vm[1] = VIDEO_MODE_QVGA;
		}
		else if(parse.width2 == 720)
		{
			vm[1] = VIDEO_MODE_D1;
		}
		
		AKIPCMJPEGFramedSource* ipcMJPEGSourcecam = NULL;
		ServerMediaSession* smsMJPEGcam = ServerMediaSession::createNew(*env, streamName2, 0, descriptionString);
		AKIPCMJPEGOnDemandMediaSubsession* subsMJPEGcam = AKIPCMJPEGOnDemandMediaSubsession::createNew(*env,ipcMJPEGSourcecam, parse.width2, parse.height2, vsIndex);
		smsMJPEGcam->addSubsession(subsMJPEGcam); 
		subsMJPEGcam->getframefunc = video_process_get_buf;
		subsMJPEGcam->setledstart = setled_view_start;
		subsMJPEGcam->setledexit = setled_view_stop;
		
		if(bHasAudio)
			smsMJPEGcam->addSubsession(AKIPCAACAudioOnDemandMediaSubsession::createNew(*env,True,getAACBuf, vsIndex));

		rtspServer->addServerMediaSession(smsMJPEGcam);
		char* url2 = rtspServer->rtspURL(smsMJPEGcam);
		*env << "using url \"" << url2 <<"\"\n";
		delete[] url2;
	}
#if 0
	if (rtspServer->setUpTunnelingOverHTTP(80) || rtspServer->setUpTunnelingOverHTTP(8000) || rtspServer->setUpTunnelingOverHTTP(8080)) 
	{
		*env << "\n(We use port " << rtspServer->httpServerPortNum() << " for optional RTSP-over-HTTP tunneling.)\n";
	}
	else 
	{
		*env << "\n(RTSP-over-HTTP tunneling is not available.)\n";
	}
#endif

	//printf("streamName:%s,Port:%d\n", streamName1, RTSPPORT);
	
	
	NetCtlSrvPar ncsp;
	memset(&ncsp, 0, sizeof(ncsp));
	getDeviceID(ncsp.strDeviceID);
	printf("device id:**%s**\n", ncsp.strDeviceID);
	strcpy(ncsp.strStreamName1, streamName1);
	strcpy(ncsp.strStreamName2, streamName2);
	ncsp.vm1 = vm[0];
	ncsp.vm2 = vm[1];
	ncsp.nRtspPort = RTSPPORT;
	ncsp.nMainFps = parse.fps1;
	ncsp.nSubFps = parse.fps2;
	//start net command server
	startNetCtlServer(&ncsp);

    printf("[##]start record...\n");
    auto_record_file();
    printf("[##]auto_record_file() called..\n");

	//at last,start rtsp loop
	env->taskScheduler().doEventLoop(); // does not return

	return 0;
}
Exemple #21
0
void play() {
  // Open the file as a 'WAV' file:
  WAVAudioFileSource* pcmSource
    = WAVAudioFileSource::createNew(*env, inputFileName);
  if (pcmSource == NULL) {
    *env << "Unable to open file \"" << inputFileName
	 << "\" as a WAV audio file source: "
	 << env->getResultMsg() << "\n";
    exit(1);
  }

  // Get attributes of the audio source:
  unsigned char const bitsPerSample = pcmSource->bitsPerSample();
  if (bitsPerSample != 8 && bitsPerSample !=  16) {
    *env << "The input file contains " << bitsPerSample
	 << " bit-per-sample audio, which we don't handle\n";
    exit(1);
  }
  sessionState.source = pcmSource;
  unsigned const samplingFrequency = pcmSource->samplingFrequency();
  unsigned char const numChannels = pcmSource->numChannels();
  unsigned bitsPerSecond
    = samplingFrequency*bitsPerSample*numChannels;
  *env << "Audio source parameters:\n\t" << samplingFrequency << " Hz, ";
  *env << bitsPerSample << " bits-per-sample, ";
  *env << numChannels << " channels => ";
  *env << bitsPerSecond << " bits-per-second\n";

  // Add in any filter necessary to transform the data prior to streaming.
  // (This is where any audio compression would get added.)
  char const* mimeType;
  unsigned char payloadFormatCode;
  if (bitsPerSample == 16) {
#ifdef CONVERT_TO_ULAW
    // Add a filter that converts from raw 16-bit PCM audio (in little-endian order)
    // to 8-bit u-law audio:
    sessionState.source
      = uLawFromPCMAudioSource::createNew(*env, pcmSource, 1/*little-endian*/);
    if (sessionState.source == NULL) {
      *env << "Unable to create a u-law filter from the PCM audio source: "
	   << env->getResultMsg() << "\n";
      exit(1);
    }
    bitsPerSecond /= 2;
    mimeType = "PCMU";
    if (samplingFrequency == 8000 && numChannels == 1) {
      payloadFormatCode = 0; // a static RTP payload type
    } else {
      payloadFormatCode = 96; // a dynamic RTP payload type
    }
    *env << "Converting to 8-bit u-law audio for streaming => "
	 << bitsPerSecond << " bits-per-second\n";
#else
    // The 16-bit samples in WAV files are in little-endian order.
    // Add a filter that converts them to network (i.e., big-endian) order:
    sessionState.source = EndianSwap16::createNew(*env, pcmSource);
    if (sessionState.source == NULL) {
      *env << "Unable to create a little->bit-endian order filter from the PCM audio source: "
	   << env->getResultMsg() << "\n";
      exit(1);
    }
    mimeType = "L16";
    if (samplingFrequency == 44100 && numChannels == 2) {
      payloadFormatCode = 10; // a static RTP payload type
    } else if (samplingFrequency == 44100 && numChannels == 1) {
      payloadFormatCode = 11; // a static RTP payload type
    } else {
      payloadFormatCode = 96; // a dynamic RTP payload type
    }
    *env << "Converting to network byte order for streaming\n";
#endif
  } else { // bitsPerSample == 8
    // Don't do any transformation; send the 8-bit PCM data 'as is':
    mimeType = "L8";
    payloadFormatCode = 96; // a dynamic RTP payload type
  }

  // Create 'groupsocks' for RTP and RTCP:
  struct in_addr destinationAddress;
  destinationAddress.s_addr = chooseRandomIPv4SSMAddress(*env);
  // Note: This is a multicast address.  If you wish instead to stream
  // using unicast, then you should use the "testOnDemandRTSPServer"
  // test program - not this test program - as a model.

  const unsigned short rtpPortNum = 2222;
  const unsigned short rtcpPortNum = rtpPortNum+1;
  const unsigned char ttl = 255;

  const Port rtpPort(rtpPortNum);
  const Port rtcpPort(rtcpPortNum);

  sessionState.rtpGroupsock
    = new Groupsock(*env, destinationAddress, rtpPort, ttl);
  sessionState.rtpGroupsock->multicastSendOnly(); // we're a SSM source
  sessionState.rtcpGroupsock
    = new Groupsock(*env, destinationAddress, rtcpPort, ttl);
  sessionState.rtcpGroupsock->multicastSendOnly(); // we're a SSM source

  // Create an appropriate audio RTP sink (using "SimpleRTPSink")
  // from the RTP 'groupsock':
  sessionState.sink
    = SimpleRTPSink::createNew(*env, sessionState.rtpGroupsock,
			       payloadFormatCode, samplingFrequency,
			       "audio", mimeType, numChannels);

  // Create (and start) a 'RTCP instance' for this RTP sink:
  const unsigned estimatedSessionBandwidth = bitsPerSecond/1000;
      // in kbps; for RTCP b/w share
  const unsigned maxCNAMElen = 100;
  unsigned char CNAME[maxCNAMElen+1];
  gethostname((char*)CNAME, maxCNAMElen);
  CNAME[maxCNAMElen] = '\0'; // just in case
  sessionState.rtcpInstance
    = RTCPInstance::createNew(*env, sessionState.rtcpGroupsock,
			      estimatedSessionBandwidth, CNAME,
			      sessionState.sink, NULL /* we're a server */,
			      True /* we're a SSM source*/);
  // Note: This starts RTCP running automatically

  // Create and start a RTSP server to serve this stream:
  sessionState.rtspServer = RTSPServer::createNew(*env, 8554);
  if (sessionState.rtspServer == NULL) {
    *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
    exit(1);
  }
  ServerMediaSession* sms
    = ServerMediaSession::createNew(*env, "testStream", inputFileName,
	   "Session streamed by \"testWAVAudiotreamer\"", True/*SSM*/);
  sms->addSubsession(PassiveServerMediaSubsession::createNew(*sessionState.sink, sessionState.rtcpInstance));
  sessionState.rtspServer->addServerMediaSession(sms);

  char* url = sessionState.rtspServer->rtspURL(sms);
  *env << "Play this stream using the URL \"" << url << "\"\n";
  delete[] url;

  // Finally, start the streaming:
  *env << "Beginning streaming...\n";
  sessionState.sink->startPlaying(*sessionState.source, afterPlaying, NULL);
}
int myRTSPServer(){
    
    Boolean bFlag;
    // Begin by setting up our usage environment:
    TaskScheduler* scheduler = BasicTaskScheduler::createNew();
    env = BasicUsageEnvironment::createNew(*scheduler);
    
    UserAuthenticationDatabase* authDB = NULL;
    
    // Create the RTSP server:
    RTSPServer* rtspServer = RTSPServer::createNew(*env, 8554, authDB);
    if (rtspServer == NULL) {
        *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
        exit(1);
    }
    
    char const* descriptionString
    = "Session streamed by \"testOnDemandRTSPServer\"";
    
    // Set up each of the possible streams that can be served by the
    // RTSP server.  Each such stream is implemented using a
    // "ServerMediaSession" object, plus one or more
    // "ServerMediaSubsession" objects for each audio/video substream.
    
    // A H.264 video elementary stream:
    {
        char const* streamName = "BackChannelTest";
        char const* inputFileName = "slamtv10.264";
        char const* audioFileName = "slamtv10.aac";
        char const* outputFileName = "receive.pcm";
        reuseFirstSource = True;
        
        // check if test file is exist
        {
            FILE *fp=NULL;
            fp = fopen(inputFileName,"r");
            if(fp==NULL) printf("File %s is not exist\n", inputFileName); else fclose(fp);
            fp = fopen(audioFileName,"r");
            if(fp==NULL) printf("File %s is not exist\n", audioFileName); else fclose(fp);
        }
        
        // Stream 1: H.264 video
        ServerMediaSession* sms
        = ServerMediaSession::createNew(*env, streamName, streamName,
                                        descriptionString);
        H264VideoFileServerMediaSubsession *sub =H264VideoFileServerMediaSubsession
        ::createNew(*env, inputFileName, reuseFirstSource);
        
        bFlag = sms->addSubsession(sub);
        if(bFlag==False) printf("addSubsession for %s error\n", inputFileName);
        
        
        // Stream 2: AAC audio stream (ADTS-format file):
        ADTSAudioFileServerMediaSubsession *sub2 =ADTSAudioFileServerMediaSubsession
        ::createNew(*env, audioFileName, reuseFirstSource);
        
        bFlag = sms->addSubsession(sub2);
        if(bFlag==False) printf("addSubsession for %s error\n", audioFileName);
        
        
        // Stream 3: backchannel AAC audio
        // TODO: modify here to support backchannel
        
        // implement a new class named ADTSBackChannelAudioFileServerMediaSubsession
        // use RTPSource to receive data and use ADTSAudioFileSink to save data to file
        
        //ADTSBackChannelAudioFileServerMediaSubsession *sub3 =ADTSBackChannelAudioFileServerMediaSubsession
        WaveBackChannelAudioFileServerMediaSubsession* sub3 = WaveBackChannelAudioFileServerMediaSubsession
        ::createNew(*env, outputFileName, reuseFirstSource);
        
        sub3->setSubsessionAsBackChannel();
        bFlag = sms->addSubsession(sub3);
        if(bFlag==False) printf("addSubsession for %s error\n", outputFileName);
        
        rtspServer->addServerMediaSession(sms);
        
        // 20140703 albert.liao modified start
        // we should notify OnDemandServerMediaSubsession or ServerMediaSubSession that we already create a backchannel subsession
        // so that ServerMediaSubSession can do
        // 1. create a SDP with backchannel
        // 2. create a RTPSource to read data from RTPClient
        // 3. create a FileSink to save received data to file
        // 20140703 albert.liao modified end
        
        announceStream(rtspServer, sms, streamName, inputFileName);
    }
    
    // Also, attempt to create a HTTP server for RTSP-over-HTTP tunneling.
    // Try first with the default HTTP port (80), and then with the alternative HTTP
    // port numbers (8000 and 8080).
    
    if (rtspServer->setUpTunnelingOverHTTP(80) || rtspServer->setUpTunnelingOverHTTP(8000) || rtspServer->setUpTunnelingOverHTTP(8080)) {
        *env << "\n(We use port " << rtspServer->httpServerPortNum() << " for optional RTSP-over-HTTP tunneling.)\n";
    } else {
        *env << "\n(RTSP-over-HTTP tunneling is not available.)\n";
    }
    
    env->taskScheduler().doEventLoop(); // does not return
    
    return 0; // only to prevent compiler warning
}
Exemple #23
0
static ServerMediaSession* createNewSMS(UsageEnvironment& env,
                                        char const* fileName, FILE* /*fid*/) {
  // Use the file name extension to determine the type of "ServerMediaSession":
  char const* extension = strrchr(fileName, '.');
  if (extension == NULL) return NULL;

  ServerMediaSession* sms = NULL;
  Boolean const reuseSource = False;
  if (strcmp(extension, ".aac") == 0) {
    // Assumed to be an AAC Audio (ADTS format) file:
    NEW_SMS("AAC Audio");
    sms->addSubsession(ADTSAudioFileServerMediaSubsession::createNew(env, fileName, reuseSource));
  } else if (strcmp(extension, ".amr") == 0) {
    // Assumed to be an AMR Audio file:
    NEW_SMS("AMR Audio");
    sms->addSubsession(AMRAudioFileServerMediaSubsession::createNew(env, fileName, reuseSource));
  } else if (strcmp(extension, ".ac3") == 0) {
    // Assumed to be an AC-3 Audio file:
    NEW_SMS("AC-3 Audio");
    sms->addSubsession(AC3AudioFileServerMediaSubsession::createNew(env, fileName, reuseSource));
  } else if (strcmp(extension, ".m4e") == 0) {
    // Assumed to be a MPEG-4 Video Elementary Stream file:
    NEW_SMS("MPEG-4 Video");
    sms->addSubsession(MPEG4VideoFileServerMediaSubsession::createNew(env, fileName, reuseSource));
  } else if (strcmp(extension, ".264") == 0) {
    // Assumed to be a H.264 Video Elementary Stream file:
    NEW_SMS("H.264 Video");
    OutPacketBuffer::maxSize = 100000; // allow for some possibly large H.264 frames
    sms->addSubsession(H264VideoFileServerMediaSubsession::createNew(env, fileName, reuseSource));
  } else if (strcmp(extension, ".265") == 0) {
    // Assumed to be a H.265 Video Elementary Stream file:
    NEW_SMS("H.265 Video");
    OutPacketBuffer::maxSize = 100000; // allow for some possibly large H.265 frames
    sms->addSubsession(H265VideoFileServerMediaSubsession::createNew(env, fileName, reuseSource));
  } else if (strcmp(extension, ".mp3") == 0) {
    // Assumed to be a MPEG-1 or 2 Audio file:
    NEW_SMS("MPEG-1 or 2 Audio");
    // To stream using 'ADUs' rather than raw MP3 frames, uncomment the following:
//#define STREAM_USING_ADUS 1
    // To also reorder ADUs before streaming, uncomment the following:
//#define INTERLEAVE_ADUS 1
    // (For more information about ADUs and interleaving,
    //  see <http://www.live555.com/rtp-mp3/>)
    Boolean useADUs = False;
    Interleaving* interleaving = NULL;
#ifdef STREAM_USING_ADUS
    useADUs = True;
#ifdef INTERLEAVE_ADUS
    unsigned char interleaveCycle[] = {0,2,1,3}; // or choose your own...
    unsigned const interleaveCycleSize
      = (sizeof interleaveCycle)/(sizeof (unsigned char));
    interleaving = new Interleaving(interleaveCycleSize, interleaveCycle);
#endif
#endif
    sms->addSubsession(MP3AudioFileServerMediaSubsession::createNew(env, fileName, reuseSource, useADUs, interleaving));
  } else if (strcmp(extension, ".mpg") == 0) {
    // Assumed to be a MPEG-1 or 2 Program Stream (audio+video) file:
    NEW_SMS("MPEG-1 or 2 Program Stream");
    MPEG1or2FileServerDemux* demux
      = MPEG1or2FileServerDemux::createNew(env, fileName, reuseSource);
    sms->addSubsession(demux->newVideoServerMediaSubsession());
    sms->addSubsession(demux->newAudioServerMediaSubsession());
  } else if (strcmp(extension, ".vob") == 0) {
    // Assumed to be a VOB (MPEG-2 Program Stream, with AC-3 audio) file:
    NEW_SMS("VOB (MPEG-2 video with AC-3 audio)");
    MPEG1or2FileServerDemux* demux
      = MPEG1or2FileServerDemux::createNew(env, fileName, reuseSource);
    sms->addSubsession(demux->newVideoServerMediaSubsession());
    sms->addSubsession(demux->newAC3AudioServerMediaSubsession());
  } else if (strcmp(extension, ".ts") == 0) {
    // Assumed to be a MPEG Transport Stream file:
    // Use an index file name that's the same as the TS file name, except with ".tsx":
    unsigned indexFileNameLen = strlen(fileName) + 2; // allow for trailing "x\0"
    char* indexFileName = new char[indexFileNameLen];
    sprintf(indexFileName, "%sx", fileName);
    NEW_SMS("MPEG Transport Stream");
    sms->addSubsession(MPEG2TransportFileServerMediaSubsession::createNew(env, fileName, indexFileName, reuseSource));
    delete[] indexFileName;
  } else if (strcmp(extension, ".wav") == 0) {
    // Assumed to be a WAV Audio file:
    NEW_SMS("WAV Audio Stream");
    // To convert 16-bit PCM data to 8-bit u-law, prior to streaming,
    // change the following to True:
    Boolean convertToULaw = False;
    sms->addSubsession(WAVAudioFileServerMediaSubsession::createNew(env, fileName, reuseSource, convertToULaw));
  } else if (strcmp(extension, ".dv") == 0) {
    // Assumed to be a DV Video file
    // First, make sure that the RTPSinks' buffers will be large enough to handle the huge size of DV frames (as big as 288000).
    OutPacketBuffer::maxSize = 300000;

    NEW_SMS("DV Video");
    sms->addSubsession(DVVideoFileServerMediaSubsession::createNew(env, fileName, reuseSource));
  } else if (strcmp(extension, ".mkv") == 0 || strcmp(extension, ".webm") == 0) {
    // Assumed to be a Matroska file (note that WebM ('.webm') files are also Matroska files)
    OutPacketBuffer::maxSize = 100000; // allow for some possibly large VP8 or VP9 frames
    NEW_SMS("Matroska video+audio+(optional)subtitles");

    // Create a Matroska file server demultiplexor for the specified file.
    // (We enter the event loop to wait for this to complete.)
    MatroskaDemuxCreationState creationState;
    creationState.watchVariable = 0;
    MatroskaFileServerDemux::createNew(env, fileName, onMatroskaDemuxCreation, &creationState);
    env.taskScheduler().doEventLoop(&creationState.watchVariable);

    ServerMediaSubsession* smss;
    while ((smss = creationState.demux->newServerMediaSubsession()) != NULL) {
      sms->addSubsession(smss);
    }
  } else if (strcmp(extension, ".ogg") == 0 || strcmp(extension, ".ogv") == 0 || strcmp(extension, ".opus") == 0) {
    // Assumed to be an Ogg file
    NEW_SMS("Ogg video and/or audio");

    // Create a Ogg file server demultiplexor for the specified file.
    // (We enter the event loop to wait for this to complete.)
    OggDemuxCreationState creationState;
    creationState.watchVariable = 0;
    OggFileServerDemux::createNew(env, fileName, onOggDemuxCreation, &creationState);
    env.taskScheduler().doEventLoop(&creationState.watchVariable);

    ServerMediaSubsession* smss;
    while ((smss = creationState.demux->newServerMediaSubsession()) != NULL) {
      sms->addSubsession(smss);
    }
  }

  return sms;
}
static void RtspServerStart(RTSP_PARAM_INFO *cRtspParamInfo)
{
	enum {ENUM_UNICAST = 0, ENUM_MULTICAST};
	scheduler = BasicTaskScheduler::createNew();
	env = BasicUsageEnvironment::createNew(*scheduler);
	Boolean reuseFirstSource = true;
	OutPacketBuffer::maxSize = 500000;
	const unsigned short statusPort = 10086;
	Port RTSPStatusPort(statusPort);
	Port RTSPServerPort(cRtspParamInfo->iRTSPServerPort);
	RTPSink *videoSinkMajor = NULL;
	RTPSink *videoSinkMinor = NULL;
	char streamName[RTSP_STRING_LEN] = {0};
	char secondStreamName[RTSP_STRING_LEN] = {0};
	strncpy(streamName, cRtspParamInfo->rgStreamName[RTSP_STREAM_MAJOR], RTSP_STRING_LEN - 1);
	strncpy(secondStreamName, cRtspParamInfo->rgStreamName[RTSP_STREAM_MINOR], RTSP_STRING_LEN - 1);
	// Create 'groupsocks' for RTP and RTCP:
	struct in_addr destinationAddressMajor;
	struct in_addr destinationAddressMinor;
	UserAuthenticationDatabase *authDB = NULL;
//#ifdef ACCESS_CONTROL
	if(cRtspParamInfo->iAuthenticateEnable){
		authDB = new UserAuthenticationDatabase;
//		authDB->addUserRecord(cRtspParamInfo->rgUserName, cRtspParamInfo->rgPassword);
		authDB->addUserRecord("admin", "admin");
		fprintf(stdout, "%s %d Authentication Enable!\n", __FILE__, __LINE__);
	}
//#endif
	destinationAddressMajor.s_addr = chooseRandomIPv4SSMAddress(*env);
	destinationAddressMinor.s_addr = chooseRandomIPv4SSMAddress(*env);
	// Note: This is a multicast address.  If you wish instead to stream
	// using unicast, then you should use the "testOnDemandRTSPServer"
	// test program - not this test program - as a model.

	const unsigned short rtpPortNumMajor = 18888;
	const unsigned short rtcpPortNumMajor = rtpPortNumMajor + 1;
	const unsigned short rtpPortNumMinor = rtcpPortNumMajor + 1;
	const unsigned short rtcpPortNumMinor = rtpPortNumMinor + 1;
	const unsigned char ttl = 255;

	const Port rtpPortMajor(rtpPortNumMajor);
	const Port rtcpPortMajor(rtcpPortNumMajor);
	const Port rtpPortMinor(rtpPortNumMinor);
	const Port rtcpPortMinor(rtcpPortNumMinor);

	Groupsock rtpGroupsockMajor(*env, destinationAddressMajor, rtpPortMajor, ttl);
	rtpGroupsockMajor.multicastSendOnly(); // we're a SSM source
	Groupsock rtcpGroupsockMajor(*env, destinationAddressMajor, rtcpPortMajor, ttl);
	rtcpGroupsockMajor.multicastSendOnly(); // we're a SSM source
	Groupsock rtpGroupsockMinor(*env, destinationAddressMinor, rtpPortMinor, ttl);
	rtpGroupsockMinor.multicastSendOnly();
	Groupsock rtcpGroupsockMinor(*env, destinationAddressMinor, rtcpPortMinor, ttl);
	rtcpGroupsockMinor.multicastSendOnly();

	// Create a 'H264 Video RTP' sink from the RTP 'groupsock':
	OutPacketBuffer::maxSize = 1000000;

	// Create (and start) a 'RTCP instance' for this RTP sink:
	const unsigned estimatedSessionBandwidth = 500; // in kbps; for RTCP b/w share
	const unsigned maxCNAMElen = 100;
	unsigned char CNAME[maxCNAMElen+1];
	gethostname((char*)CNAME, maxCNAMElen);
	CNAME[maxCNAMElen] = '\0'; // just in case

	RTSPServer* rtspServer = RTSPServer::createNew(*env, RTSPServerPort, authDB);
	if (rtspServer == NULL) {
		*env << "Failed to create RTSP server: " 
			 << env->getResultMsg() << "\n";
		exit(1);
	}

	//first stream
	if(cRtspParamInfo->rgCommunicationMode[RTSP_STREAM_MAJOR] == ENUM_UNICAST){
		ServerMediaSession* smsMajor
			= ServerMediaSession::createNew(*env, streamName, 
											streamName, "Session streamed by \"testH264VideoStreamer\"");
		smsMajor->addSubsession(H264VideoFileServerMediaSubsession::createNew(*env, 
																			  firstInputFileName, reuseFirstSource));
		rtspServer->addServerMediaSession(smsMajor);
	}else if(cRtspParamInfo->rgCommunicationMode[RTSP_STREAM_MAJOR] == ENUM_MULTICAST){
		videoSinkMajor = H264VideoRTPSink::createNew(*env, &rtpGroupsockMajor, 96);
		RTCPInstance* rtcpMajor = RTCPInstance::createNew(*env, &rtcpGroupsockMajor,
														  estimatedSessionBandwidth, CNAME,
														  videoSinkMajor, NULL /* we're a server */,
														  True /* we're a SSM source */);
		ServerMediaSession* smsMajor = ServerMediaSession::createNew(*env, streamName, firstInputFileName,
																	 "Session streamed by \"swH264VideoStreamer\"",
																	 True /*SSM*/);
		smsMajor->addSubsession(PassiveServerMediaSubsession::createNew(*videoSinkMajor, rtcpMajor));
		rtspServer->addServerMediaSession(smsMajor);
		play(videoSinkMajor, firstInputFileName);
	}

	if((cRtspParamInfo->iRTSPStreamNum > 1) && 
	   (cRtspParamInfo->rgCommunicationMode[RTSP_STREAM_MINOR] == ENUM_UNICAST)){
		ServerMediaSession* smsMinor
			= ServerMediaSession::createNew(*env, secondStreamName, 
											secondStreamName,
											"Session streamed by \"testH264VideoStreamer\"");
		smsMinor->addSubsession(H264VideoFileServerMediaSubsession::createNew(*env, 
																			  secondInputFileName, reuseFirstSource));
		rtspServer->addServerMediaSession(smsMinor);
	}else if((cRtspParamInfo->iRTSPStreamNum > 1) && 
			 (cRtspParamInfo->rgCommunicationMode[RTSP_STREAM_MINOR] == ENUM_MULTICAST)){
		videoSinkMinor = H264VideoRTPSink::createNew(*env, &rtpGroupsockMinor, 96);
		RTCPInstance* rtcpMinor = RTCPInstance::createNew(*env, &rtcpGroupsockMinor,
														  estimatedSessionBandwidth, CNAME,
														  videoSinkMinor, NULL /* we're a server */,
														  True /* we're a SSM source */);
		ServerMediaSession *smsMinor = ServerMediaSession::createNew(*env, secondStreamName, secondInputFileName,
																	 "Session streamed by \"swH264VideoStreamer\"",
																	 True /*SSM*/);
		smsMinor->addSubsession(PassiveServerMediaSubsession::createNew(*videoSinkMinor, rtcpMinor));
		rtspServer->addServerMediaSession(smsMinor);
		play(videoSinkMinor, secondInputFileName);
	}

	rtspServer->setUpConnectionStatus(RTSPStatusPort);
	rtspServer->setStreamName(0, streamName, strlen(streamName));		//0 for major
	rtspServer->setStreamName(1, secondStreamName, strlen(secondStreamName));
	rtspServer->setAutoControlBitrate(cRtspParamInfo->iAutoControlBitrateEnable);

	if(rtspServer->setUpTunnelingOverHTTP(80) ||
	   rtspServer->setUpTunnelingOverHTTP(8000) ||
	   rtspServer->setUpTunnelingOverHTTP(8080)) {
		*env << "\n(We use port " << rtspServer->httpServerPortNum() 
			 << " for optional RTSP-over-HTTP tunneling.)\n";
	} else {
		*env << "\n(RTSP-over-HTTP tunneling is not available.)\n";
	}

	env->taskScheduler().doEventLoop(); // does not return
	
}
int main(int argc, char** argv) {
  // Begin by setting up our usage environment:
  TaskScheduler* scheduler = BasicTaskScheduler::createNew();
  env = BasicUsageEnvironment::createNew(*scheduler);

  // Create 'groupsocks' for RTP and RTCP:
  struct in_addr destinationAddress;
  destinationAddress.s_addr = chooseRandomIPv4SSMAddress(*env);
  // Note: This is a multicast address.  If you wish instead to stream
  // using unicast, then you should use the "testOnDemandRTSPServer"
  // test program - not this test program - as a model.

  const unsigned short rtpPortNum = 18888;
  const unsigned short rtcpPortNum = rtpPortNum+1;
  const unsigned char ttl = 255;

  const Port rtpPort(rtpPortNum);
  const Port rtcpPort(rtcpPortNum);

  Groupsock rtpGroupsock(*env, destinationAddress, rtpPort, ttl);
  rtpGroupsock.multicastSendOnly(); // we're a SSM source
  Groupsock rtcpGroupsock(*env, destinationAddress, rtcpPort, ttl);
  rtcpGroupsock.multicastSendOnly(); // we're a SSM source

  // Create a 'H264 Video RTP' sink from the RTP 'groupsock':
  OutPacketBuffer::maxSize = 100000;
  videoSink = H264VideoRTPSink::createNew(*env, &rtpGroupsock, 96);

  // Create (and start) a 'RTCP instance' for this RTP sink:
  const unsigned estimatedSessionBandwidth = 5000; // in kbps; for RTCP b/w share
  const unsigned maxCNAMElen = 100;
  unsigned char CNAME[maxCNAMElen+1];
  gethostname((char*)CNAME, maxCNAMElen);
  CNAME[maxCNAMElen] = '\0'; // just in case
  RTCPInstance* rtcp
  = RTCPInstance::createNew(*env, &rtcpGroupsock,
			    estimatedSessionBandwidth, CNAME,
			    videoSink, NULL /* we're a server */,
			    True /* we're a SSM source */);
  // Note: This starts RTCP running automatically

  RTSPServer* rtspServer = RTSPServer::createNew(*env, 8554);
  if (rtspServer == NULL) {
    *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
    exit(1);
  }
  ServerMediaSession* sms
    = ServerMediaSession::createNew(*env, "testStream", inputFileName,
		   "Session streamed by \"testH264VideoStreamer\"",
					   True /*SSM*/);
  sms->addSubsession(PassiveServerMediaSubsession::createNew(*videoSink, rtcp));
  rtspServer->addServerMediaSession(sms);

  char* url = rtspServer->rtspURL(sms);
  *env << "Play this stream using the URL \"" << url << "\"\n";
  delete[] url;

  // Start the streaming:
  *env << "Beginning streaming...\n";
  play();

  env->taskScheduler().doEventLoop(); // does not return

  return 0; // only to prevent compiler warning
}
boost::system::error_code RtspService::init(uint16_t uiRtspPort)
{
  VLOG(2) << "Initialising RTSP service";
  // init live555 environment
  // Setup the liveMedia environment
  m_pScheduler = LiveSourceTaskScheduler::createNew(m_channelManager);
  // live media env
  m_pEnv = BasicUsageEnvironment::createNew(*m_pScheduler);
  VLOG(2) << "Creating RTSP server";
  m_pRtspServer = LiveRtspServer::createNew(*m_pEnv, uiRtspPort, 0, m_pFactory, m_pGlobalRateControl);
  if (m_pRtspServer == NULL)
  {
    *m_pEnv << "Failed to create RTSP server: " << m_pEnv->getResultMsg() << "\n";
    LOG(WARNING) << "Failed to create RTSP server";
    // TODO: add custom error codes!!!
    return boost::system::error_code(boost::system::errc::bad_file_descriptor, boost::system::get_generic_category());
  }
#if 0
  // disable TCP streaming for testing
  m_pRtspServer->disableStreamingRTPOverTCP();
#endif
  // set notification for PLAY requests
  m_pRtspServer->setOnClientSessionPlayCallback(boost::bind(&RtspService::onRtspClientSessionPlay, this, _1));

  // taken from testOnDemandRTSPServer
#ifdef TEST_STREAMS
  char const* descriptionString
    = "Session streamed by \"testOnDemandRTSPServer\"";

  // An AAC audio stream (ADTS-format file):
  {
    char const* streamName = "aacAudioTest";
    char const* inputFileName = "test.aac";
    ServerMediaSession* sms
      = ServerMediaSession::createNew(*m_pEnv, streamName, streamName,
      descriptionString);
    sms->addSubsession(ADTSAudioFileServerMediaSubsession
      ::createNew(*m_pEnv, inputFileName, false));
    m_pRtspServer->addServerMediaSession(sms);

    //announceStream(rtspServer, sms, streamName, inputFileName);
  }

  // An AMR audio stream:
  {
    char const* streamName = "amrAudioTest";
    char const* inputFileName = "test.amr";
    ServerMediaSession* sms
      = ServerMediaSession::createNew(*m_pEnv, streamName, streamName,
      descriptionString);
    sms->addSubsession(AMRAudioFileServerMediaSubsession
      ::createNew(*m_pEnv, inputFileName, false));
    m_pRtspServer->addServerMediaSession(sms);

    // announceStream(m_pRtspServer, sms, streamName, inputFileName);
  }
#endif

  // Add task that checks if there's new data in the queue
  checkSessionsTask(this);
  checkChannelsTask(this);

  return boost::system::error_code();
}