예제 #1
0
bool ElcRTSPServer::setSource(const string& url, const string& iname)
{
	LOG_INFO("ElcRTSPServer::setSource -url="<<url<<" -name="<<iname);
	mUrl = url;
	mName = iname;
	Boolean reuseFirstSource = true;
	if(!rtspServer)
	{
		LOG_ERROR("ElcRTSPServer::setSource failed! Rtspserver does not created propertly");
		return false;
	}
	mediaSession = ServerMediaSession::createNew(*mEnv, mName.c_str(), mName.c_str(), "ELC");
	if(!mediaSession)
		return false;

	// if multicast streaming
	if(Config::isMulticast)
	{
		struct in_addr destinationAddress;
		if(Config::streamingDestinationIp == "default" || Config::streamingDestinationIp =="")
			destinationAddress.s_addr = chooseRandomIPv4SSMAddress(*mEnv);
		else
			destinationAddress.s_addr = our_inet_addr(Config::streamingDestinationIp.c_str());

		Port rtpPort(Config::rtpPortNum);
		Port rtcpPort(Config::rtcpPortNum);

		rtpGroupsock = new Groupsock(*mEnv, destinationAddress, rtpPort, 255/*Config::ttl*/);
		rtpGroupsock->multicastSendOnly(); // we're a SSM source
		rtcpGroupsock = new Groupsock(*mEnv, destinationAddress, rtcpPort, 255/*Config::ttl*/);
		rtcpGroupsock->multicastSendOnly(); // we're a SSM source

		videoSink = MPEG4ESVideoRTPSink::createNew(*mEnv, rtpGroupsock, 96);

		const unsigned estimatedSessionBandwidth = 500; // in kbps; for RTCP b/w share
		const unsigned maxCNAMElen = 100;
		unsigned char CNAME[maxCNAMElen+1];
		gethostname((char*)CNAME, maxCNAMElen);
		CNAME[maxCNAMElen] = '\0'; // just in case

		rtcp = RTCPInstance::createNew(*mEnv, rtcpGroupsock,
						estimatedSessionBandwidth, CNAME,
						videoSink, NULL /* we're a server*/ ,
						True /* we're a SSM source */);

		mediaSession->addSubsession(PassiveServerMediaSubsession::createNew(*videoSink, rtcp));
		rtspServer->addServerMediaSession(mediaSession);

		LOG_INFO("ElcRTSPServer::setSource OK");
	}
	else // unicast streaming
	{
		mediaSubSession = Mpeg4LiveServerMediaSubSession::createNew(*mEnv, reuseFirstSource, mUrl);
		mediaSession->addSubsession(mediaSubSession);
		rtspServer->addServerMediaSession(mediaSession);
		LOG_INFO("ElcRTSPServer::setSource OK");
	}

	return true;
}
예제 #2
0
int main(int argc, char** argv) {
  // Begin by setting up our usage environment:
  TaskScheduler* scheduler = BasicTaskScheduler::createNew();
  env = BasicUsageEnvironment::createNew(*scheduler);

  // Define our destination (multicast) IP address:
  destinationAddress.s_addr = chooseRandomIPv4SSMAddress(*env);
    // Note: This is a multicast address.  If you wish instead to stream
    // using unicast, then you should use the "testOnDemandRTSPServer"
    // test program - not this test program - as a model.

  // Create our RTSP server.  (Receivers will need to use RTSP to access the stream.)
  rtspServer = RTSPServer::createNew(*env, 8554);
  if (rtspServer == NULL) {
    *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
    exit(1);
  }
  sms = ServerMediaSession::createNew(*env, "testStream", inputFileName,
				      "Session streamed by \"testMKVStreamer\"",
				      True /*SSM*/);

  // Arrange to create a "MatroskaFile" object for the specified file.
  // (Note that this object is not created immediately, but instead via a callback.)
  MatroskaFile::createNew(*env, inputFileName, onMatroskaFileCreation, NULL, "jpn");

  env->taskScheduler().doEventLoop(); // does not return

  return 0; // only to prevent compiler warning
}
예제 #3
0
void CCreateRTPServer::createGroupsock()
{
    m_prtcpGroupsockAudio = NULL;
    m_prtpGroupsockAudio = NULL;
    m_prtcpGroupsockVideo = NULL;
    m_prtpGroupsockVideo = NULL;

    struct in_addr destAddress;
    //destAddress.s_addr = our_inet_addr(m_destAddressStr.c_str());
    if(!m_rtpDestAddressStr.empty())
        destAddress.s_addr = our_inet_addr(m_rtpDestAddressStr.c_str());
    else
        destAddress.s_addr = chooseRandomIPv4SSMAddress(*env);

    if(m_pAudioSourceQueue != NULL)
    {
        const unsigned short rtpPortNumAudio = m_rtpPortNum;
        const unsigned short rtcpPortNumAudio = rtpPortNumAudio + 1;
        const Port rtpPortAudio(rtpPortNumAudio);
        const Port rtcpPortAudio(rtcpPortNumAudio);
        m_prtpGroupsockAudio = new Groupsock(*env, destAddress, rtpPortAudio, m_ttl);
        m_prtcpGroupsockAudio = new Groupsock(*env, destAddress, rtcpPortAudio, m_ttl);
        if(m_prtpGroupsockAudio && m_prtcpGroupsockAudio)
        {
            m_rtpPortNum += 2;
        }
    }

    if(m_pVideoSourceQueue != NULL)
    {
        const unsigned short rtpPortNumVideo = m_rtpPortNum;
        const unsigned short rtcpPortNumVideo = rtpPortNumVideo + 1;
        const Port rtpPortVideo(rtpPortNumVideo);
        const Port rtcpPortVideo(rtcpPortNumVideo);
        m_prtpGroupsockVideo = new Groupsock(*env, destAddress, rtpPortVideo, m_ttl);
        m_prtcpGroupsockVideo = new Groupsock(*env, destAddress, rtcpPortVideo, m_ttl);
    }
}
예제 #4
0
int main(int argc, char** argv) {
  // Begin by setting up our usage environment:
  TaskScheduler* scheduler = BasicTaskScheduler::createNew();
  env = BasicUsageEnvironment::createNew(*scheduler);

  // Create 'groupsocks' for RTP and RTCP:
  struct in_addr destinationAddress;
  destinationAddress.s_addr = chooseRandomIPv4SSMAddress(*env);
  // Note: This is a multicast address.  If you wish instead to stream
  // using unicast, then you should use the "testOnDemandRTSPServer"
  // test program - not this test program - as a model.

  const unsigned short rtpPortNum = 18888;
  const unsigned short rtcpPortNum = rtpPortNum+1;
  const unsigned char ttl = 255;

  const Port rtpPort(rtpPortNum);
  const Port rtcpPort(rtcpPortNum);

  Groupsock rtpGroupsock(*env, destinationAddress, rtpPort, ttl);
  rtpGroupsock.multicastSendOnly(); // we're a SSM source
  Groupsock rtcpGroupsock(*env, destinationAddress, rtcpPort, ttl);
  rtcpGroupsock.multicastSendOnly(); // we're a SSM source

  // Create a 'H264 Video RTP' sink from the RTP 'groupsock':
  OutPacketBuffer::maxSize = 100000;
  videoSink = H264VideoRTPSink::createNew(*env, &rtpGroupsock, 96);

  // Create (and start) a 'RTCP instance' for this RTP sink:
  const unsigned estimatedSessionBandwidth = 5000; // in kbps; for RTCP b/w share
  const unsigned maxCNAMElen = 100;
  unsigned char CNAME[maxCNAMElen+1];
  gethostname((char*)CNAME, maxCNAMElen);
  CNAME[maxCNAMElen] = '\0'; // just in case
  RTCPInstance* rtcp
  = RTCPInstance::createNew(*env, &rtcpGroupsock,
			    estimatedSessionBandwidth, CNAME,
			    videoSink, NULL /* we're a server */,
			    True /* we're a SSM source */);
  // Note: This starts RTCP running automatically

  RTSPServer* rtspServer = RTSPServer::createNew(*env, 8554);
  if (rtspServer == NULL) {
    *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
    exit(1);
  }
  ServerMediaSession* sms
    = ServerMediaSession::createNew(*env, "testStream", inputFileName,
		   "Session streamed by \"testH264VideoStreamer\"",
					   True /*SSM*/);
  sms->addSubsession(PassiveServerMediaSubsession::createNew(*videoSink, rtcp));
  rtspServer->addServerMediaSession(sms);

  char* url = rtspServer->rtspURL(sms);
  *env << "Play this stream using the URL \"" << url << "\"\n";
  delete[] url;

  // Start the streaming:
  *env << "Beginning streaming...\n";
  play();

  env->taskScheduler().doEventLoop(); // does not return

  return 0; // only to prevent compiler warning
}
예제 #5
0
int main(int argc, char const** argv) {
  // Begin by setting up our usage environment:
  TaskScheduler* scheduler = BasicTaskScheduler::createNew();
  env = BasicUsageEnvironment::createNew(*scheduler);

  // Parse command-line options:
  // (Unfortunately we can't use getopt() here; Windoze doesn't have it)
  programName = argv[0];
  while (argc > 2) {
    char const* const opt = argv[1];
    if (opt[0] != '-') break;
    switch (opt[1]) {

    case 'i': { // transmit video I-frames only
      iFramesOnly = True;
      break;
    }

    case 'a': { // transmit audio, but not video
      mediaToStream &=~ VOB_VIDEO;
      break;
    }

    case 'v': { // transmit video, but not audio
      mediaToStream &=~ VOB_AUDIO;
      break;
    }

    case 'p': { // specify port number for built-in RTSP server
      int portArg;
      if (sscanf(argv[2], "%d", &portArg) != 1) {
        usage();
      }
      if (portArg <= 0 || portArg >= 65536) {
        *env << "bad port number: " << portArg
	     << " (must be in the range (0,65536))\n";
        usage();
      }
      rtspServerPortNum = (unsigned short)portArg;
      ++argv; --argc;
      break;
    }

    default: {
      usage();
      break;
    }
    }

    ++argv; --argc;
  }
  if (argc < 2) usage();
  if (mediaToStream == 0) {
    *env << "The -a and -v flags cannot both be used!\n";
    usage();
  }
  if (iFramesOnly && (mediaToStream&VOB_VIDEO) == 0) {
    *env << "Warning: Because we're not streaming video, the -i flag has no effect.\n";
  }

  inputFileNames = &argv[1];
  curInputFileName = inputFileNames;

  // Create 'groupsocks' for RTP and RTCP:
  struct in_addr destinationAddress;
  destinationAddress.s_addr = chooseRandomIPv4SSMAddress(*env);

  const unsigned short rtpPortNumAudio = 4444;
  const unsigned short rtcpPortNumAudio = rtpPortNumAudio+1;
  const unsigned short rtpPortNumVideo = 8888;
  const unsigned short rtcpPortNumVideo = rtpPortNumVideo+1;
  const unsigned char ttl = 255;

  const Port rtpPortAudio(rtpPortNumAudio);
  const Port rtcpPortAudio(rtcpPortNumAudio);
  const Port rtpPortVideo(rtpPortNumVideo);
  const Port rtcpPortVideo(rtcpPortNumVideo);

  const unsigned maxCNAMElen = 100;
  unsigned char CNAME[maxCNAMElen+1];
  gethostname((char*)CNAME, maxCNAMElen);
  CNAME[maxCNAMElen] = '\0'; // just in case

  if (mediaToStream&VOB_AUDIO) {
    rtpGroupsockAudio
      = new Groupsock(*env, destinationAddress, rtpPortAudio, ttl);
    rtpGroupsockAudio->multicastSendOnly(); // because we're a SSM source

    // Create an 'AC3 Audio RTP' sink from the RTP 'groupsock':
    audioSink
      = AC3AudioRTPSink::createNew(*env, rtpGroupsockAudio, 96, 0);
    // set the RTP timestamp frequency 'for real' later

    // Create (and start) a 'RTCP instance' for this RTP sink:
    rtcpGroupsockAudio
      = new Groupsock(*env, destinationAddress, rtcpPortAudio, ttl);
    rtcpGroupsockAudio->multicastSendOnly(); // because we're a SSM source
    const unsigned estimatedSessionBandwidthAudio
      = 160; // in kbps; for RTCP b/w share
    audioRTCP = RTCPInstance::createNew(*env, rtcpGroupsockAudio,
					estimatedSessionBandwidthAudio, CNAME,
					audioSink, NULL /* we're a server */,
					True /* we're a SSM source */);
    // Note: This starts RTCP running automatically
  }

  if (mediaToStream&VOB_VIDEO) {
    rtpGroupsockVideo
      = new Groupsock(*env, destinationAddress, rtpPortVideo, ttl);
    rtpGroupsockVideo->multicastSendOnly(); // because we're a SSM source

    // Create a 'MPEG Video RTP' sink from the RTP 'groupsock':
    videoSink = MPEG1or2VideoRTPSink::createNew(*env, rtpGroupsockVideo);

    // Create (and start) a 'RTCP instance' for this RTP sink:
    rtcpGroupsockVideo
      = new Groupsock(*env, destinationAddress, rtcpPortVideo, ttl);
    rtcpGroupsockVideo->multicastSendOnly(); // because we're a SSM source
    const unsigned estimatedSessionBandwidthVideo
      = 4500; // in kbps; for RTCP b/w share
    videoRTCP = RTCPInstance::createNew(*env, rtcpGroupsockVideo,
					estimatedSessionBandwidthVideo, CNAME,
					videoSink, NULL /* we're a server */,
					True /* we're a SSM source */);
    // Note: This starts RTCP running automatically
  }

  if (rtspServer == NULL) {
    rtspServer = RTSPServer::createNew(*env, rtspServerPortNum);
    if (rtspServer == NULL) {
      *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
      *env << "To change the RTSP server's port number, use the \"-p <port number>\" option.\n";
      exit(1);
    }
    ServerMediaSession* sms
      = ServerMediaSession::createNew(*env, "vobStream", *curInputFileName,
	     "Session streamed by \"vobStreamer\"", True /*SSM*/);
    if (audioSink != NULL) sms->addSubsession(PassiveServerMediaSubsession::createNew(*audioSink, audioRTCP));
    if (videoSink != NULL) sms->addSubsession(PassiveServerMediaSubsession::createNew(*videoSink, videoRTCP));
    rtspServer->addServerMediaSession(sms);

    *env << "Created RTSP server.\n";

    // Display our "rtsp://" URL, for clients to connect to:
    char* url = rtspServer->rtspURL(sms);
    *env << "Access this stream using the URL:\n\t" << url << "\n";
    delete[] url;
  }

  // Finally, start the streaming:
  *env << "Beginning streaming...\n";
  play();

  env->taskScheduler().doEventLoop(); // does not return

  return 0; // only to prevent compiler warning
}
예제 #6
0
// -----------------------------------------
//    entry point
// -----------------------------------------
int main(int argc, char** argv) 
{
	// default parameters
	const char *dev_name = "/dev/video0";	
	int format = V4L2_PIX_FMT_H264;
	int width = 640;
	int height = 480;
	int queueSize = 10;
	int fps = 25;
	unsigned short rtpPortNum = 20000;
	unsigned short rtcpPortNum = rtpPortNum+1;
	unsigned char ttl = 5;
	struct in_addr destinationAddress;
	unsigned short rtspPort = 8554;
	unsigned short rtspOverHTTPPort = 0;
	bool multicast = false;
	int verbose = 0;
	std::string outputFile;
	bool useMmap = false;

	// decode parameters
	int c = 0;     
	while ((c = getopt (argc, argv, "hW:H:Q:P:F:v::O:T:mM")) != -1)
	{
		switch (c)
		{
			case 'O':	outputFile = optarg; break;
			case 'v':	verbose = 1; if (optarg && *optarg=='v') verbose++;  break;
			case 'm':	multicast = true; break;
			case 'W':	width = atoi(optarg); break;
			case 'H':	height = atoi(optarg); break;
			case 'Q':	queueSize = atoi(optarg); break;
			case 'P':	rtspPort = atoi(optarg); break;
			case 'T':	rtspOverHTTPPort = atoi(optarg); break;
			case 'F':	fps = atoi(optarg); break;
			case 'M':	useMmap = true; break;
			case 'h':
			{
				std::cout << argv[0] << " [-v[v]][-m] [-P RTSP port][-P RTSP/HTTP port][-Q queueSize] [-M] [-W width] [-H height] [-F fps] [-O file] [device]" << std::endl;
				std::cout << "\t -v       : verbose " << std::endl;
				std::cout << "\t -v v     : very verbose " << std::endl;
				std::cout << "\t -Q length: Number of frame queue  (default "<< queueSize << ")" << std::endl;
				std::cout << "\t -O file  : Dump capture to a file" << std::endl;
				std::cout << "\t RTSP options :" << std::endl;
				std::cout << "\t -m       : Enable multicast output" << std::endl;
				std::cout << "\t -P port  : RTSP port (default "<< rtspPort << ")" << std::endl;
				std::cout << "\t -H port  : RTSP over HTTP port (default "<< rtspOverHTTPPort << ")" << std::endl;
				std::cout << "\t V4L2 options :" << std::endl;
				std::cout << "\t -M       : V4L2 capture using memory mapped buffers (default use read interface)" << std::endl;
				std::cout << "\t -F fps   : V4L2 capture framerate (default "<< fps << ")" << std::endl;
				std::cout << "\t -W width : V4L2 capture width (default "<< width << ")" << std::endl;
				std::cout << "\t -H height: V4L2 capture height (default "<< height << ")" << std::endl;
				std::cout << "\t device   : V4L2 capture device (default "<< dev_name << ")" << std::endl;
				exit(0);
			}
		}
	}
	if (optind<argc)
	{
		dev_name = argv[optind];
	}
     
	// create live555 environment
	TaskScheduler* scheduler = BasicTaskScheduler::createNew();
	UsageEnvironment* env = BasicUsageEnvironment::createNew(*scheduler);	
	
	// create RTSP server
	RTSPServer* rtspServer = RTSPServer::createNew(*env, rtspPort);
	if (rtspServer == NULL) 
	{
		*env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
	}
	else
	{
		// set http tunneling
		if (rtspOverHTTPPort)
		{
			rtspServer->setUpTunnelingOverHTTP(rtspOverHTTPPort);
		}
		
		// Init capture
		*env << "Create V4L2 Source..." << dev_name << "\n";
		V4L2DeviceParameters param(dev_name,format,width,height,fps,verbose);
		V4L2Device* videoCapture = NULL;
		if (useMmap)
		{
			videoCapture = V4L2MMAPDeviceSource::createNew(param);
		}
		else
		{
			videoCapture = V4L2READDeviceSource::createNew(param);
		}
		V4L2DeviceSource* videoES =  V4L2DeviceSource::createNew(*env, param, videoCapture, outputFile, queueSize, verbose);
		if (videoES == NULL) 
		{
			*env << "Unable to create source for device " << dev_name << "\n";
		}
		else
		{
			destinationAddress.s_addr = chooseRandomIPv4SSMAddress(*env);	
			OutPacketBuffer::maxSize = videoCapture->getBufferSize();
			StreamReplicator* replicator = StreamReplicator::createNew(*env, videoES, false);

			// Create Server Multicast Session
			if (multicast)
			{
				addSession(rtspServer, "multicast", MulticastServerMediaSubsession::createNew(*env,destinationAddress, Port(rtpPortNum), Port(rtcpPortNum), ttl, 96, replicator,format));
			}
			
			// Create Server Unicast Session
			addSession(rtspServer, "unicast", UnicastServerMediaSubsession::createNew(*env,replicator,format));

			// main loop
			signal(SIGINT,sighandler);
			env->taskScheduler().doEventLoop(&quit); 
			*env << "Exiting..\n";			
		}
		
		Medium::close(videoES);
		delete videoCapture;
		Medium::close(rtspServer);
	}
	
	env->reclaim();
	delete scheduler;	
	
	return 0;
}
예제 #7
0
파일: main.cpp 프로젝트: Xianleewu/camrtsp
// -----------------------------------------
//    entry point
// -----------------------------------------
int main(int argc, char** argv) 
{
	// default parameters
	const char *dev_name = "/dev/video0";	
	int format = V4L2_PIX_FMT_H264;
	int width = 640;
	int height = 480;
	int queueSize = 10;
	int fps = 25;
	unsigned short rtspPort = 8554;
	unsigned short rtspOverHTTPPort = 0;
	bool multicast = false;
	int verbose = 0;
	std::string outputFile;
	bool useMmap = true;
	std::string url = "unicast";
	std::string murl = "multicast";
	bool useThread = true;
	std::string maddr;
	bool repeatConfig = true;
	int timeout = 65;

	// decode parameters
	int c = 0;     
	while ((c = getopt (argc, argv, "v::Q:O:" "I:P:T:m:u:M:ct:" "rsfF:W:H:" "h")) != -1)
	{
		switch (c)
		{
			case 'v':	verbose = 1; if (optarg && *optarg=='v') verbose++;  break;
			case 'Q':	queueSize = atoi(optarg); break;
			case 'O':	outputFile = optarg; break;
			// RTSP/RTP
			case 'I':       ReceivingInterfaceAddr = inet_addr(optarg); break;
			case 'P':	rtspPort = atoi(optarg); break;
			case 'T':	rtspOverHTTPPort = atoi(optarg); break;
			case 'u':	url = optarg; break;
			case 'm':	multicast = true; murl = optarg; break;
			case 'M':	multicast = true; maddr = optarg; break;
			case 'c':	repeatConfig = false; break;
			case 't':	timeout = atoi(optarg); break;
			// V4L2
			case 'r':	useMmap =  false; break;
			case 's':	useThread =  false; break;
			case 'f':	format = 0; break;
			case 'F':	fps = atoi(optarg); break;
			case 'W':	width = atoi(optarg); break;
			case 'H':	height = atoi(optarg); break;

			case 'h':
			default:
			{
				std::cout << argv[0] << " [-v[v]] [-Q queueSize] [-O file]"                                        << std::endl;
				std::cout << "\t          [-I interface] [-P RTSP port] [-T RTSP/HTTP port] [-m multicast url] [-u unicast url] [-M multicast addr] [-c] [-t timeout]" << std::endl;
				std::cout << "\t          [-r] [-s] [-W width] [-H height] [-F fps] [device] [device]"           << std::endl;
				std::cout << "\t -v       : verbose"                                                               << std::endl;
				std::cout << "\t -vv      : very verbose"                                                          << std::endl;
				std::cout << "\t -Q length: Number of frame queue  (default "<< queueSize << ")"                   << std::endl;
				std::cout << "\t -O output: Copy captured frame to a file or a V4L2 device"                        << std::endl;
				std::cout << "\t RTSP options :"                                                                   << std::endl;
				std::cout << "\t -I addr  : RTSP interface (default autodetect)"                                   << std::endl;
				std::cout << "\t -P port  : RTSP port (default "<< rtspPort << ")"                                 << std::endl;
				std::cout << "\t -T port  : RTSP over HTTP port (default "<< rtspOverHTTPPort << ")"               << std::endl;
				std::cout << "\t -u url   : unicast url (default " << url << ")"                                   << std::endl;
				std::cout << "\t -m url   : multicast url (default " << murl << ")"                                << std::endl;
				std::cout << "\t -M addr  : multicast group:port (default is random_address:20000)"                << std::endl;
				std::cout << "\t -c       : don't repeat config (default repeat config before IDR frame)"          << std::endl;
				std::cout << "\t -t secs  : RTCP expiration timeout (default " << timeout << ")"                   << std::endl;
				std::cout << "\t V4L2 options :"                                                                   << std::endl;
				std::cout << "\t -r       : V4L2 capture using read interface (default use memory mapped buffers)" << std::endl;
				std::cout << "\t -s       : V4L2 capture using live555 mainloop (default use a reader thread)"     << std::endl;
				std::cout << "\t -f       : V4L2 capture using current format (-W,-H,-F are ignore)"               << std::endl;
				std::cout << "\t -W width : V4L2 capture width (default "<< width << ")"                           << std::endl;
				std::cout << "\t -H height: V4L2 capture height (default "<< height << ")"                         << std::endl;
				std::cout << "\t -F fps   : V4L2 capture framerate (default "<< fps << ")"                         << std::endl;
				std::cout << "\t device   : V4L2 capture device (default "<< dev_name << ")"                       << std::endl;
				exit(0);
			}
		}
	}
	std::list<std::string> devList;
	while (optind<argc)
	{
		devList.push_back(argv[optind]);
		optind++;
	}
	if (devList.empty())
	{
		devList.push_back(dev_name);
	}

	// init logger
	initLogger(verbose);
     
	// create live555 environment
	TaskScheduler* scheduler = BasicTaskScheduler::createNew();
	UsageEnvironment* env = BasicUsageEnvironment::createNew(*scheduler);	

	// split multicast info
	std::istringstream is(maddr);
	std::string ip;
	getline(is, ip, ':');						
	struct in_addr destinationAddress;
	destinationAddress.s_addr = chooseRandomIPv4SSMAddress(*env);
	if (!ip.empty())
	{
		destinationAddress.s_addr = inet_addr(ip.c_str());
	}						
	
	std::string port;
	getline(is, port, ':');						
	unsigned short rtpPortNum = 20000;
	if (!port.empty())
	{
		rtpPortNum = atoi(port.c_str());
	}	
	unsigned short rtcpPortNum = rtpPortNum+1;
	unsigned char ttl = 5;
	
	// create RTSP server
	RTSPServer* rtspServer = createRTSPServer(*env, rtspPort, rtspOverHTTPPort, timeout);
	if (rtspServer == NULL) 
	{
		LOG(ERROR) << "Failed to create RTSP server: " << env->getResultMsg();
	}
	else
	{			
		int nbSource = 0;
		std::list<std::string>::iterator devIt;
		for ( devIt=devList.begin() ; devIt!=devList.end() ; ++devIt)
		{
			std::string deviceName(*devIt);
			
			// Init capture
			LOG(NOTICE) << "Create V4L2 Source..." << deviceName;
			V4L2DeviceParameters param(deviceName.c_str(),format,width,height,fps, verbose);
			V4l2Capture* videoCapture = V4l2DeviceFactory::CreateVideoCapure(param, useMmap);
			if (videoCapture)
			{
				nbSource++;
				format = videoCapture->getFormat();				
				int outfd = -1;
				
				V4l2Output* out = NULL;
				if (!outputFile.empty())
				{
					V4L2DeviceParameters outparam(outputFile.c_str(), videoCapture->getFormat(), videoCapture->getWidth(), videoCapture->getHeight(), 0,verbose);
					V4l2Output* out = V4l2DeviceFactory::CreateVideoOutput(outparam, useMmap);
					if (out != NULL)
					{
						outfd = out->getFd();
					}
				}
				
				LOG(NOTICE) << "Start V4L2 Capture..." << deviceName;
				if (!videoCapture->captureStart())
				{
					LOG(NOTICE) << "Cannot start V4L2 Capture for:" << deviceName;
				}
				V4L2DeviceSource* videoES = NULL;
				if (format == V4L2_PIX_FMT_H264)
				{
					videoES = H264_V4L2DeviceSource::createNew(*env, param, videoCapture, outfd, queueSize, useThread, repeatConfig);
				}
				else
				{
					videoES = V4L2DeviceSource::createNew(*env, param, videoCapture, outfd, queueSize, useThread);
				}
				if (videoES == NULL) 
				{
					LOG(FATAL) << "Unable to create source for device " << deviceName;
					delete videoCapture;
				}
				else
				{	
					// extend buffer size if needed
					if (videoCapture->getBufferSize() > OutPacketBuffer::maxSize)
					{
						OutPacketBuffer::maxSize = videoCapture->getBufferSize();
					}
					
					StreamReplicator* replicator = StreamReplicator::createNew(*env, videoES, false);
					
					std::string baseUrl;
					if (devList.size() > 1)
					{
						baseUrl = basename(deviceName.c_str());
						baseUrl.append("/");
					}
					
					// Create Multicast Session
					if (multicast)						
					{		
						LOG(NOTICE) << "RTP  address " << inet_ntoa(destinationAddress) << ":" << rtpPortNum;
						LOG(NOTICE) << "RTCP address " << inet_ntoa(destinationAddress) << ":" << rtcpPortNum;
						addSession(rtspServer, baseUrl+murl, MulticastServerMediaSubsession::createNew(*env,destinationAddress, Port(rtpPortNum), Port(rtcpPortNum), ttl, replicator,format));					
						
						// increment ports for next sessions
						rtpPortNum+=2;
						rtcpPortNum+=2;
						
					}
					// Create Unicast Session
					addSession(rtspServer, baseUrl+url, UnicastServerMediaSubsession::createNew(*env,replicator,format));
				}	
				if (out)
				{
					delete out;
				}
			}
		}

		if (nbSource>0)
		{
			// main loop
			signal(SIGINT,sighandler);
			env->taskScheduler().doEventLoop(&quit); 
			LOG(NOTICE) << "Exiting....";			
		}
		
		Medium::close(rtspServer);
	}
	
	env->reclaim();
	delete scheduler;	
	
	return 0;
}
예제 #8
0
void play() {
  // Open the file as a 'WAV' file:
  WAVAudioFileSource* pcmSource
    = WAVAudioFileSource::createNew(*env, inputFileName);
  if (pcmSource == NULL) {
    *env << "Unable to open file \"" << inputFileName
	 << "\" as a WAV audio file source: "
	 << env->getResultMsg() << "\n";
    exit(1);
  }

  // Get attributes of the audio source:
  unsigned char const bitsPerSample = pcmSource->bitsPerSample();
  if (bitsPerSample != 8 && bitsPerSample !=  16) {
    *env << "The input file contains " << bitsPerSample
	 << " bit-per-sample audio, which we don't handle\n";
    exit(1);
  }
  sessionState.source = pcmSource;
  unsigned const samplingFrequency = pcmSource->samplingFrequency();
  unsigned char const numChannels = pcmSource->numChannels();
  unsigned bitsPerSecond
    = samplingFrequency*bitsPerSample*numChannels;
  *env << "Audio source parameters:\n\t" << samplingFrequency << " Hz, ";
  *env << bitsPerSample << " bits-per-sample, ";
  *env << numChannels << " channels => ";
  *env << bitsPerSecond << " bits-per-second\n";

  // Add in any filter necessary to transform the data prior to streaming.
  // (This is where any audio compression would get added.)
  char const* mimeType;
  unsigned char payloadFormatCode;
  if (bitsPerSample == 16) {
#ifdef CONVERT_TO_ULAW
    // Add a filter that converts from raw 16-bit PCM audio (in little-endian order)
    // to 8-bit u-law audio:
    sessionState.source
      = uLawFromPCMAudioSource::createNew(*env, pcmSource, 1/*little-endian*/);
    if (sessionState.source == NULL) {
      *env << "Unable to create a u-law filter from the PCM audio source: "
	   << env->getResultMsg() << "\n";
      exit(1);
    }
    bitsPerSecond /= 2;
    mimeType = "PCMU";
    if (samplingFrequency == 8000 && numChannels == 1) {
      payloadFormatCode = 0; // a static RTP payload type
    } else {
      payloadFormatCode = 96; // a dynamic RTP payload type
    }
    *env << "Converting to 8-bit u-law audio for streaming => "
	 << bitsPerSecond << " bits-per-second\n";
#else
    // The 16-bit samples in WAV files are in little-endian order.
    // Add a filter that converts them to network (i.e., big-endian) order:
    sessionState.source = EndianSwap16::createNew(*env, pcmSource);
    if (sessionState.source == NULL) {
      *env << "Unable to create a little->bit-endian order filter from the PCM audio source: "
	   << env->getResultMsg() << "\n";
      exit(1);
    }
    mimeType = "L16";
    if (samplingFrequency == 44100 && numChannels == 2) {
      payloadFormatCode = 10; // a static RTP payload type
    } else if (samplingFrequency == 44100 && numChannels == 1) {
      payloadFormatCode = 11; // a static RTP payload type
    } else {
      payloadFormatCode = 96; // a dynamic RTP payload type
    }
    *env << "Converting to network byte order for streaming\n";
#endif
  } else { // bitsPerSample == 8
    // Don't do any transformation; send the 8-bit PCM data 'as is':
    mimeType = "L8";
    payloadFormatCode = 96; // a dynamic RTP payload type
  }

  // Create 'groupsocks' for RTP and RTCP:
  struct in_addr destinationAddress;
  destinationAddress.s_addr = chooseRandomIPv4SSMAddress(*env);
  // Note: This is a multicast address.  If you wish instead to stream
  // using unicast, then you should use the "testOnDemandRTSPServer"
  // test program - not this test program - as a model.

  const unsigned short rtpPortNum = 2222;
  const unsigned short rtcpPortNum = rtpPortNum+1;
  const unsigned char ttl = 255;

  const Port rtpPort(rtpPortNum);
  const Port rtcpPort(rtcpPortNum);

  sessionState.rtpGroupsock
    = new Groupsock(*env, destinationAddress, rtpPort, ttl);
  sessionState.rtpGroupsock->multicastSendOnly(); // we're a SSM source
  sessionState.rtcpGroupsock
    = new Groupsock(*env, destinationAddress, rtcpPort, ttl);
  sessionState.rtcpGroupsock->multicastSendOnly(); // we're a SSM source

  // Create an appropriate audio RTP sink (using "SimpleRTPSink")
  // from the RTP 'groupsock':
  sessionState.sink
    = SimpleRTPSink::createNew(*env, sessionState.rtpGroupsock,
			       payloadFormatCode, samplingFrequency,
			       "audio", mimeType, numChannels);

  // Create (and start) a 'RTCP instance' for this RTP sink:
  const unsigned estimatedSessionBandwidth = bitsPerSecond/1000;
      // in kbps; for RTCP b/w share
  const unsigned maxCNAMElen = 100;
  unsigned char CNAME[maxCNAMElen+1];
  gethostname((char*)CNAME, maxCNAMElen);
  CNAME[maxCNAMElen] = '\0'; // just in case
  sessionState.rtcpInstance
    = RTCPInstance::createNew(*env, sessionState.rtcpGroupsock,
			      estimatedSessionBandwidth, CNAME,
			      sessionState.sink, NULL /* we're a server */,
			      True /* we're a SSM source*/);
  // Note: This starts RTCP running automatically

  // Create and start a RTSP server to serve this stream:
  sessionState.rtspServer = RTSPServer::createNew(*env, 8554);
  if (sessionState.rtspServer == NULL) {
    *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
    exit(1);
  }
  ServerMediaSession* sms
    = ServerMediaSession::createNew(*env, "testStream", inputFileName,
	   "Session streamed by \"testWAVAudiotreamer\"", True/*SSM*/);
  sms->addSubsession(PassiveServerMediaSubsession::createNew(*sessionState.sink, sessionState.rtcpInstance));
  sessionState.rtspServer->addServerMediaSession(sms);

  char* url = sessionState.rtspServer->rtspURL(sms);
  *env << "Play this stream using the URL \"" << url << "\"\n";
  delete[] url;

  // Finally, start the streaming:
  *env << "Beginning streaming...\n";
  sessionState.sink->startPlaying(*sessionState.source, afterPlaying, NULL);
}
예제 #9
0
// -----------------------------------------
//    entry point
// -----------------------------------------
int main(int argc, char** argv) 
{

#if AUDIO_STREAM

	StreamReplicator* audio_replicator = NULL;
#endif
	// default parameters
	const char *dev_name = "/dev/video1";	
	int format = V4L2_PIX_FMT_H264;
	int width = 1280;
	int height = 720;
	int queueSize = 10;
	int fps = 30;
	int isp_fps = 30;
	int bitrate = 1024; //(Kbps)
	int mjpeg_qp = 120;
	int m2m_en = 1;
	int gop = fps;
	unsigned short rtpPortNum = 20000;
	unsigned short rtcpPortNum = rtpPortNum+1;
	unsigned char ttl = 5;
	struct in_addr destinationAddress;
	unsigned short rtspPort = 554;
	unsigned short rtspOverHTTPPort = 0;
	bool multicast = false;
	int verbose = 0;
	std::string outputFile;
	//bool useMmap = true;
	std::string url = "unicast";
	std::string murl = "multicast";
	bool useThread = true;
	in_addr_t maddr = INADDR_NONE;
	bool audio_en = false;

	// decode parameters
	int c = 0;     
	//while ((c = getopt (argc, argv, "hW:H:Q:P:F:v::O:T:m:u:rsM:")) != -1)
#if AUDIO_STREAM
	while ((c = getopt (argc, argv, "hb:W:H:g:Q:P:F:i:O:T:m:u:M:aj:")) != -1)
#else
	while ((c = getopt (argc, argv, "hb:W:H:g:Q:P:F:i:O:T:m:u:M:j:")) != -1)
#endif
	{
		switch (c)
		{
			case 'O':	outputFile = optarg; break;
			//case 'v':	verbose = 1; if (optarg && *optarg=='v') verbose++;  break;
			case 'm':	multicast = true; if (optarg) murl = optarg; break;
			case 'M':	multicast = true; if (optarg) maddr = inet_addr(optarg); break;
			case 'g':	gop = atoi(optarg); break; 
			case 'b':	bitrate = atoi(optarg); break; 
			case 'W':	width = atoi(optarg); break;
			case 'H':	height = atoi(optarg); break;
			case 'Q':	queueSize = atoi(optarg); break;
			case 'P':	rtspPort = atoi(optarg); break;
			case 'T':	rtspOverHTTPPort = atoi(optarg); break;
			case 'F':	fps = atoi(optarg); break;
			case 'i':	isp_fps = atoi(optarg); break;
			//case 'r':	useMmap =  false; break;
			//case 's':	useThread =  false; break;
			case 'u':	url = optarg; break;
#if AUDIO_STREAM
			case 'a':	audio_en = true; break;
#endif
			case 'j':	format = V4L2_PIX_FMT_MJPEG; mjpeg_qp = atoi(optarg);break;	
			case 'h':
			default:
			{
				std::cout << argv[0] << "Version:" << SNX_RTSP_SERVER_VERSION										<< std::endl;
				std::cout << "Usage :"                                                              				<< std::endl;
				std::cout << "\t " << argv[0] << " [-a] [-j mjpeg_qp] [-m] [-P RTSP port][-T RTSP/HTTP port][-Q queueSize] [-M groupaddress] [-b bitrate] [-W width] [-H height] [-F fps] [-i isp_fps] [device]" << std::endl;

				std::cout << "\t -Q length: Number of frame queue  (default "<< queueSize << ")"                   << std::endl;
				std::cout << "\t RTSP options :"                                                                   << std::endl;
				std::cout << "\t -u url     : unicast url (default " << url << ")"                                   << std::endl;
				std::cout << "\t -m url     : multicast url (default " << murl << ")"                                << std::endl;
				std::cout << "\t -M addr    : multicast group   (default is a random address)"                                << std::endl;
				std::cout << "\t -P port    : RTSP port (default "<< rtspPort << ")"                                 << std::endl;
				std::cout << "\t -T port    : RTSP over HTTP port (default "<< rtspOverHTTPPort << ")"               << std::endl;
				std::cout << "\t V4L2 options :"                                                                   << std::endl;
				//std::cout << "\t -r       : V4L2 capture using read interface (default use memory mapped buffers)" << std::endl;
				//std::cout << "\t -s       : V4L2 capture using live555 mainloop (default use a separated reading thread)" << std::endl;
				std::cout << "\t -F fps     : V4L2 capture framerate (default "<< fps << ")"                         << std::endl;
				std::cout << "\t -i isp_fps : ISP capture framerate (default "<< isp_fps << ")"                         << std::endl;
				std::cout << "\t -W width   : V4L2 capture width (default "<< width << ")"                           << std::endl;
				std::cout << "\t -H height  : V4L2 capture height (default "<< height << ")"                         << std::endl;
				
				std::cout << "\t V4L2 H264 options :"                                                              << std::endl;

				std::cout << "\t -b bitrate : V4L2 capture bitrate kbps(default "<< bitrate << " kbps)"				<< std::endl;
				std::cout << "\t -g gop     : V4L2 capture gop (default "<< gop << " )"									<< std::endl;
				std::cout << "\t device     : V4L2 capture device (default "<< dev_name << ")"                       << std::endl;

				std::cout << "\t V4L2 MJPEG options :"                                                              << std::endl;
				std::cout << "\t -j mjpeg_qp : MJPEG streaming and qp (default is 60)"							<< std::endl;

#if AUDIO_STREAM
				std::cout << "\t -a         : enable A-law pcm streaming "											 << std::endl;
				std::cout << "\t H264 example : "<< argv[0] << " -a -Q 5 -u media/stream1 -P 554"                       << std::endl;
#else
				std::cout << "\t H264 example : "<< argv[0] << " -Q 5 -u media/stream1 -P 554"                       << std::endl;
#endif
				std::cout << "\t MJPEG example : "<< argv[0] << " -W 640 -H 480 -j 120 -Q 5 -u media/stream1 -P 554"		<< std::endl;
				exit(0);
			}
		}
	}
	if (optind<argc)
	{
		dev_name = argv[optind];
	}
     
	// create live555 environment
	scheduler = BasicTaskScheduler::createNew();
	env = BasicUsageEnvironment::createNew(*scheduler);	
	
	// create RTSP server
	rtspServer = RTSPServer::createNew(*env, rtspPort);
	if (rtspServer == NULL) 
	{
		//LOG(ERROR) << "Failed to create RTSP server: " << env->getResultMsg();
		fprintf(stderr, "Failed to create RTSP server: %s \n", env->getResultMsg());
	}
	else
	{
		// set http tunneling
		if (rtspOverHTTPPort)
		{
			rtspServer->setUpTunnelingOverHTTP(rtspOverHTTPPort);
		}
		
		// Init capture
		//LOG(NOTICE) << "Create V4L2 Source..." << dev_name;
		fprintf(stderr, "create Video source = %s \n", dev_name);
		
		V4L2DeviceParameters param(dev_name,format,width,height,fps, isp_fps, verbose, bitrate, m2m_en, gop, mjpeg_qp, queueSize );
		videoCapture = createVideoCapure(param);

#if AUDIO_STREAM
		if (audio_en) {
				audioCapture = createAudioCapure();
		}
#endif
		if (videoCapture)
		{
			int outputFd = -1;
			//int outputFd = createOutput(outputFile, videoCapture->getFd());			
			//LOG(NOTICE) << "Start V4L2 Capture..." << dev_name;
			fprintf(stderr, "Start V4L2 Capture... %s \n",  dev_name);
			//videoCapture->captureStart();

			snx98600_video_start(videoCapture);
			printf("\n\n------- V4L2 Infomation -------- \n");
			printf("m2m_en: %d\n", videoCapture->m2m->m2m);
			printf("codec_dev: %s\n", videoCapture->m2m->codec_dev);
			printf("codec_fps: %d\n", videoCapture->m2m->codec_fps);
			if(videoCapture->m2m->m2m)
				printf("isp_fps: %d\n", videoCapture->m2m->isp_fps);
			printf("width: %d\n", videoCapture->m2m->width);
			printf("height: %d\n", videoCapture->m2m->height);
			printf("scale: %d\n", videoCapture->m2m->scale);
			printf("bit_rate: %d\n", videoCapture->m2m->bit_rate);
			printf("dyn_fps_en: %d\n", videoCapture->m2m->dyn_fps_en);
			if(videoCapture->m2m->dyn_fps_en) {
				printf("framerate: %d\n", videoCapture->rate_ctl->framerate);
			}
			printf("GOP: %d\n", videoCapture->rate_ctl->gop);
			printf("ds_font_num: %d\n", videoCapture->m2m->ds_font_num);
			printf("\n----------------------------- \n\n");

#if AUDIO_STREAM
			/* 
				Start Audio Device 

			*/
			if (audio_en) {
				int rc;
				if (audioCapture) {
					if ((rc = snx98600_record_audio_start(audioCapture))) {
						fprintf(stderr, "failed to start audio source: %s\n", strerror(rc));
					}
				}
			}
#endif
			/* Determind which Class to use */
			if (format == V4L2_PIX_FMT_H264)
				videoES =  H264_V4L2DeviceSource::createNew(*env, param, outputFd, useThread);
			else  {
				videoES = V4L2DeviceSource::createNew(*env, param, outputFd, useThread);
			}

			/*  check if create a Device source success */
			if (videoES == NULL)
			{
				//LOG(FATAL) << "Unable to create source for device " << dev_name;
				fprintf(stderr, "Unable to create source for device  %s \n",  dev_name);
			}
			else
			{

				videoCapture->devicesource = videoES;
				
				// Setup the outpacket size;
				if (m2m_en) {
					//OutPacketBuffer::maxSize = (unsigned int)videoCapture->m2m->isp_buffers->length;
					OutPacketBuffer::maxSize = bitrate << 8;    //2X Bitrate as the max packet size
					fprintf(stderr, "isp buffers: %u , outpack maxsize : %u\n", (unsigned int)videoCapture->m2m->isp_buffers->length, OutPacketBuffer::maxSize  );
				}else {

					OutPacketBuffer::maxSize = width * height * 3 / 2;
				}

#if AUDIO_STREAM
				/* 
					create Alsa Device source Class 
				*/
				if (audio_en && audioCapture) {
					audioES =  AlsaDeviceSource::createNew(*env, -1, queueSize, useThread);

					if (audioES == NULL) 
					{
						fprintf(stderr, "Unable to create audio devicesource \n");
					}
					else
					{
						audioCapture->devicesource = audioES;
					}
				}
#endif

				replicator = StreamReplicator::createNew(*env, videoES, false);

#if AUDIO_STREAM
				if (audio_en && audioCapture)
					audio_replicator = StreamReplicator::createNew(*env, audioES, false);
#endif
				// Create Server Multicast Session
				if (multicast)
				{
					ServerMediaSubsession * multicast_video_subSession = NULL;
					ServerMediaSubsession * multicast_audio_subSession = NULL;
					if (maddr == INADDR_NONE) maddr = chooseRandomIPv4SSMAddress(*env);	
					destinationAddress.s_addr = maddr;
					//LOG(NOTICE) << "Mutlicast address " << inet_ntoa(destinationAddress);
					fprintf(stderr, "Mutlicast address  %s \n",  inet_ntoa(destinationAddress));


					multicast_video_subSession = MulticastServerMediaSubsession::createNew(*env,destinationAddress, Port(rtpPortNum), Port(rtcpPortNum), ttl, replicator,format,param);
#if AUDIO_STREAM
					if (audio_en && audioCapture) 
						multicast_audio_subSession =  MulticastServerMediaSubsession::createNew(*env,destinationAddress, Port(rtpPortNum), Port(rtcpPortNum), ttl, audio_replicator,WA_PCMA,param);
#endif
					addSession(rtspServer, murl.c_str(), multicast_video_subSession, multicast_audio_subSession);
				
				}

				ServerMediaSubsession * video_subSession = NULL;
				ServerMediaSubsession * audio_subSession = NULL;

				video_subSession = UnicastServerMediaSubsession::createNew(*env,replicator,format, param);

#if AUDIO_STREAM
				if (audio_en && audioCapture) 
					audio_subSession = UnicastServerMediaSubsession::createNew(*env,audio_replicator,WA_PCMA, param);
#endif
				// Create Server Unicast Session
				addSession(rtspServer, url.c_str(), video_subSession, audio_subSession);

				// main loop
				signal(SIGINT,sighandler);
				env->taskScheduler().doEventLoop(&quit); 
	
				fprintf(stderr, "Exiting....  \n");		

#if AUDIO_STREAM
				if (audioES) 
				{
					Medium::close(audioES);
				}
#endif
					Medium::close(videoES);
			}
#if AUDIO_STREAM
			if (audio_en && audioCapture) 
				closeAudioCapure(audioCapture);
#endif
			if (videoCapture)
			closeVideoCapure(videoCapture);
			
			//delete videoCapture;
			if (outputFd != -1)
			{
				close(outputFd);
			}
		}
		Medium::close(rtspServer);
	}
	
	env->reclaim();
	delete scheduler;	
	
	return 0;
}
예제 #10
0
static void RtspServerStart(RTSP_PARAM_INFO *cRtspParamInfo)
{
	enum {ENUM_UNICAST = 0, ENUM_MULTICAST};
	scheduler = BasicTaskScheduler::createNew();
	env = BasicUsageEnvironment::createNew(*scheduler);
	Boolean reuseFirstSource = true;
	OutPacketBuffer::maxSize = 500000;
	const unsigned short statusPort = 10086;
	Port RTSPStatusPort(statusPort);
	Port RTSPServerPort(cRtspParamInfo->iRTSPServerPort);
	RTPSink *videoSinkMajor = NULL;
	RTPSink *videoSinkMinor = NULL;
	char streamName[RTSP_STRING_LEN] = {0};
	char secondStreamName[RTSP_STRING_LEN] = {0};
	strncpy(streamName, cRtspParamInfo->rgStreamName[RTSP_STREAM_MAJOR], RTSP_STRING_LEN - 1);
	strncpy(secondStreamName, cRtspParamInfo->rgStreamName[RTSP_STREAM_MINOR], RTSP_STRING_LEN - 1);
	// Create 'groupsocks' for RTP and RTCP:
	struct in_addr destinationAddressMajor;
	struct in_addr destinationAddressMinor;
	UserAuthenticationDatabase *authDB = NULL;
//#ifdef ACCESS_CONTROL
	if(cRtspParamInfo->iAuthenticateEnable){
		authDB = new UserAuthenticationDatabase;
//		authDB->addUserRecord(cRtspParamInfo->rgUserName, cRtspParamInfo->rgPassword);
		authDB->addUserRecord("admin", "admin");
		fprintf(stdout, "%s %d Authentication Enable!\n", __FILE__, __LINE__);
	}
//#endif
	destinationAddressMajor.s_addr = chooseRandomIPv4SSMAddress(*env);
	destinationAddressMinor.s_addr = chooseRandomIPv4SSMAddress(*env);
	// Note: This is a multicast address.  If you wish instead to stream
	// using unicast, then you should use the "testOnDemandRTSPServer"
	// test program - not this test program - as a model.

	const unsigned short rtpPortNumMajor = 18888;
	const unsigned short rtcpPortNumMajor = rtpPortNumMajor + 1;
	const unsigned short rtpPortNumMinor = rtcpPortNumMajor + 1;
	const unsigned short rtcpPortNumMinor = rtpPortNumMinor + 1;
	const unsigned char ttl = 255;

	const Port rtpPortMajor(rtpPortNumMajor);
	const Port rtcpPortMajor(rtcpPortNumMajor);
	const Port rtpPortMinor(rtpPortNumMinor);
	const Port rtcpPortMinor(rtcpPortNumMinor);

	Groupsock rtpGroupsockMajor(*env, destinationAddressMajor, rtpPortMajor, ttl);
	rtpGroupsockMajor.multicastSendOnly(); // we're a SSM source
	Groupsock rtcpGroupsockMajor(*env, destinationAddressMajor, rtcpPortMajor, ttl);
	rtcpGroupsockMajor.multicastSendOnly(); // we're a SSM source
	Groupsock rtpGroupsockMinor(*env, destinationAddressMinor, rtpPortMinor, ttl);
	rtpGroupsockMinor.multicastSendOnly();
	Groupsock rtcpGroupsockMinor(*env, destinationAddressMinor, rtcpPortMinor, ttl);
	rtcpGroupsockMinor.multicastSendOnly();

	// Create a 'H264 Video RTP' sink from the RTP 'groupsock':
	OutPacketBuffer::maxSize = 1000000;

	// Create (and start) a 'RTCP instance' for this RTP sink:
	const unsigned estimatedSessionBandwidth = 500; // in kbps; for RTCP b/w share
	const unsigned maxCNAMElen = 100;
	unsigned char CNAME[maxCNAMElen+1];
	gethostname((char*)CNAME, maxCNAMElen);
	CNAME[maxCNAMElen] = '\0'; // just in case

	RTSPServer* rtspServer = RTSPServer::createNew(*env, RTSPServerPort, authDB);
	if (rtspServer == NULL) {
		*env << "Failed to create RTSP server: " 
			 << env->getResultMsg() << "\n";
		exit(1);
	}

	//first stream
	if(cRtspParamInfo->rgCommunicationMode[RTSP_STREAM_MAJOR] == ENUM_UNICAST){
		ServerMediaSession* smsMajor
			= ServerMediaSession::createNew(*env, streamName, 
											streamName, "Session streamed by \"testH264VideoStreamer\"");
		smsMajor->addSubsession(H264VideoFileServerMediaSubsession::createNew(*env, 
																			  firstInputFileName, reuseFirstSource));
		rtspServer->addServerMediaSession(smsMajor);
	}else if(cRtspParamInfo->rgCommunicationMode[RTSP_STREAM_MAJOR] == ENUM_MULTICAST){
		videoSinkMajor = H264VideoRTPSink::createNew(*env, &rtpGroupsockMajor, 96);
		RTCPInstance* rtcpMajor = RTCPInstance::createNew(*env, &rtcpGroupsockMajor,
														  estimatedSessionBandwidth, CNAME,
														  videoSinkMajor, NULL /* we're a server */,
														  True /* we're a SSM source */);
		ServerMediaSession* smsMajor = ServerMediaSession::createNew(*env, streamName, firstInputFileName,
																	 "Session streamed by \"swH264VideoStreamer\"",
																	 True /*SSM*/);
		smsMajor->addSubsession(PassiveServerMediaSubsession::createNew(*videoSinkMajor, rtcpMajor));
		rtspServer->addServerMediaSession(smsMajor);
		play(videoSinkMajor, firstInputFileName);
	}

	if((cRtspParamInfo->iRTSPStreamNum > 1) && 
	   (cRtspParamInfo->rgCommunicationMode[RTSP_STREAM_MINOR] == ENUM_UNICAST)){
		ServerMediaSession* smsMinor
			= ServerMediaSession::createNew(*env, secondStreamName, 
											secondStreamName,
											"Session streamed by \"testH264VideoStreamer\"");
		smsMinor->addSubsession(H264VideoFileServerMediaSubsession::createNew(*env, 
																			  secondInputFileName, reuseFirstSource));
		rtspServer->addServerMediaSession(smsMinor);
	}else if((cRtspParamInfo->iRTSPStreamNum > 1) && 
			 (cRtspParamInfo->rgCommunicationMode[RTSP_STREAM_MINOR] == ENUM_MULTICAST)){
		videoSinkMinor = H264VideoRTPSink::createNew(*env, &rtpGroupsockMinor, 96);
		RTCPInstance* rtcpMinor = RTCPInstance::createNew(*env, &rtcpGroupsockMinor,
														  estimatedSessionBandwidth, CNAME,
														  videoSinkMinor, NULL /* we're a server */,
														  True /* we're a SSM source */);
		ServerMediaSession *smsMinor = ServerMediaSession::createNew(*env, secondStreamName, secondInputFileName,
																	 "Session streamed by \"swH264VideoStreamer\"",
																	 True /*SSM*/);
		smsMinor->addSubsession(PassiveServerMediaSubsession::createNew(*videoSinkMinor, rtcpMinor));
		rtspServer->addServerMediaSession(smsMinor);
		play(videoSinkMinor, secondInputFileName);
	}

	rtspServer->setUpConnectionStatus(RTSPStatusPort);
	rtspServer->setStreamName(0, streamName, strlen(streamName));		//0 for major
	rtspServer->setStreamName(1, secondStreamName, strlen(secondStreamName));
	rtspServer->setAutoControlBitrate(cRtspParamInfo->iAutoControlBitrateEnable);

	if(rtspServer->setUpTunnelingOverHTTP(80) ||
	   rtspServer->setUpTunnelingOverHTTP(8000) ||
	   rtspServer->setUpTunnelingOverHTTP(8080)) {
		*env << "\n(We use port " << rtspServer->httpServerPortNum() 
			 << " for optional RTSP-over-HTTP tunneling.)\n";
	} else {
		*env << "\n(RTSP-over-HTTP tunneling is not available.)\n";
	}

	env->taskScheduler().doEventLoop(); // does not return
	
}
예제 #11
0
int main(int argc, char** argv) {
  init_signals();
  setpriority(PRIO_PROCESS, 0, 0);
  int IsSilence = 0;
  int svcEnable = 0;
  int cnt=0;
  int activePortCnt=0;
  if( GetSampleRate() == 16000 )
  {
	audioOutputBitrate = 128000;
	audioSamplingFrequency = 16000;
  }else{
	audioOutputBitrate = 64000;
	audioSamplingFrequency = 8000;
  }
  // Begin by setting up our usage environment:
  TaskScheduler* scheduler = BasicTaskScheduler::createNew();
  UsageEnvironment* env = BasicUsageEnvironment::createNew(*scheduler);
  int msg_type, video_type;
  APPROInput* MjpegInputDevice = NULL;
  APPROInput* H264InputDevice = NULL;
  APPROInput* Mpeg4InputDevice = NULL;
  static pid_t child[4] = {
	-1,-1,-1,-1
  };

  StreamingMode streamingMode = STREAMING_UNICAST;
  netAddressBits multicastAddress = 0;//our_inet_addr("224.1.4.6");
  portNumBits videoRTPPortNum = 0;
  portNumBits audioRTPPortNum = 0;

  IsSilence = 0;
  svcEnable = 0;
  audioType = AUDIO_G711;
  streamingMode = STREAMING_UNICAST;

  for( cnt = 1; cnt < argc ;cnt++ )
  {
	if( strcmp( argv[cnt],"-m" )== 0  )
	{
		streamingMode = STREAMING_MULTICAST_SSM;
	}

	if( strcmp( argv[cnt],"-s" )== 0  )
	{
		IsSilence = 1;
	}

	if( strcmp( argv[cnt],"-a" )== 0  )
	{
		audioType = AUDIO_AAC;
	}

	if( strcmp( argv[cnt],"-v" )== 0  )
	{
		svcEnable = 1;
	}
  }

#if 0
  printf("###########IsSilence = %d ################\n",IsSilence);
  printf("###########streamingMode = %d ################\n",streamingMode);
  printf("###########audioType = %d ################\n",audioType);
  printf("###########svcEnable = %d ################\n",svcEnable);
#endif

  child[0] = fork();

  if( child[0] != 0 )
  {
	child[1] = fork();
  }

  if( child[0] != 0 && child[1] != 0 )
  {
	child[2] = fork();
  }

  if( child[0] != 0 && child[1] != 0 && child[2] != 0 )
  {
	child[3] = fork();
  }

  if(svcEnable) {
	  if( child[0] != 0 && child[1] != 0 && child[2] != 0 && child[3] != 0)
	  {
		child[4] = fork();
	  }

	  if( child[0] != 0 && child[1] != 0 && child[2] != 0 && child[3] != 0 && child[4] != 0)
	  {
		child[5] = fork();
	  }

	  if( child[0] != 0 && child[1] != 0 && child[2] != 0 && child[3] != 0 && child[4] != 0 && child[5] != 0)
	  {
		child[6] = fork();
	  }

	  if( child[0] != 0 && child[1] != 0 && child[2] != 0 && child[3] != 0 && child[4] != 0 && child[5] != 0 && child[6] != 0)
	  {
		child[7] = fork();
	  }
  }

  if( child[0] == 0 )
  {
	/* parent, success */
	msg_type = LIVE_MSG_TYPE4;
	video_type = VIDEO_TYPE_H264_CIF;
	rtspServerPortNum = 8556;
	H264VideoBitrate = 12000000;
	videoRTPPortNum = 6012;
	audioRTPPortNum = 6014;
  }
  if( child[1] == 0 )
  {
	/* parent, success */
	msg_type = LIVE_MSG_TYPE3;
	video_type = VIDEO_TYPE_MJPEG;
	rtspServerPortNum = 8555;
	MjpegVideoBitrate = 12000000;
	videoRTPPortNum = 6008;
	audioRTPPortNum = 6010;
  }
  if( child[2] == 0 )
  {
	/* parent, success */
	msg_type = LIVE_MSG_TYPE;
	video_type = VIDEO_TYPE_MPEG4;
	rtspServerPortNum = 8553;
	Mpeg4VideoBitrate = 12000000;
	videoRTPPortNum = 6000;
	audioRTPPortNum = 6002;
  }
  if( child[3] == 0 )
  {
	/* parent, success */
	msg_type = LIVE_MSG_TYPE2;
	video_type = VIDEO_TYPE_MPEG4_CIF;
	rtspServerPortNum = 8554;
	Mpeg4VideoBitrate = 12000000;
	videoRTPPortNum = 6004;
	audioRTPPortNum = 6006;
  }

  if(svcEnable) {
	  if( child[4] == 0 )
	  {
		/* parent, success */
		msg_type = LIVE_MSG_TYPE5;
		video_type = VIDEO_TYPE_H264_SVC_30FPS;
		rtspServerPortNum = 8601;
		H264VideoBitrate = 12000000;
		videoRTPPortNum = 6016;
		audioRTPPortNum = 6018;
	  }
	  if( child[5] == 0 )
	  {
		/* parent, success */
		msg_type = LIVE_MSG_TYPE6;
		video_type = VIDEO_TYPE_H264_SVC_15FPS;
		rtspServerPortNum = 8602;
		H264VideoBitrate = 12000000;
		videoRTPPortNum = 6020;
		audioRTPPortNum = 6022;
	  }
	  if( child[6] == 0 )
	  {
		/* parent, success */
		msg_type = LIVE_MSG_TYPE7;
		video_type = VIDEO_TYPE_H264_SVC_7FPS;
		rtspServerPortNum = 8603;
		H264VideoBitrate = 12000000;
		videoRTPPortNum = 6024;
		audioRTPPortNum = 6026;
	  }
	  if( child[7] == 0 )
	  {
		/* parent, success */
		msg_type = LIVE_MSG_TYPE8;
		video_type = VIDEO_TYPE_H264_SVC_3FPS;
		rtspServerPortNum = 8604;
		H264VideoBitrate = 12000000;
		videoRTPPortNum = 6028;
		audioRTPPortNum = 6030;
	  }
	  if( child[0] != 0 && child[1] != 0 && child[2] != 0 && child[3] != 0 && child[4] != 0 && child[5] != 0 && child[6] != 0 && child[7] != 0)
	  {
		/* parent, success */
		msg_type = LIVE_MSG_TYPE9;
		video_type = VIDEO_TYPE_H264;
		rtspServerPortNum = 8557;
		H264VideoBitrate = 12000000;
		videoRTPPortNum = 6032;
		audioRTPPortNum = 6034;
	  }
 }
 else {
  	  if( child[0] != 0 && child[1] != 0 && child[2] != 0 && child[3] != 0)
	  {
		/* parent, success */
		msg_type = LIVE_MSG_TYPE5;
		video_type = VIDEO_TYPE_H264;
		rtspServerPortNum = 8557;
		H264VideoBitrate = 12000000;
		videoRTPPortNum = 6032;
		audioRTPPortNum = 6034;
	  }
 }

  videoType = video_type;

  // Objects used for multicast streaming:
  static Groupsock* rtpGroupsockAudio = NULL;
  static Groupsock* rtcpGroupsockAudio = NULL;
  static Groupsock* rtpGroupsockVideo = NULL;
  static Groupsock* rtcpGroupsockVideo = NULL;
  static FramedSource* sourceAudio = NULL;
  static RTPSink* sinkAudio = NULL;
  static RTCPInstance* rtcpAudio = NULL;
  static FramedSource* sourceVideo = NULL;
  static RTPSink* sinkVideo = NULL;
  static RTCPInstance* rtcpVideo = NULL;

  share_memory_init(msg_type);

  //init_signals();

  *env << "Initializing...\n";


  // Initialize the WIS input device:
  if( video_type == VIDEO_TYPE_MJPEG)
  {
	  MjpegInputDevice = APPROInput::createNew(*env, VIDEO_TYPE_MJPEG);
	  if (MjpegInputDevice == NULL) {
	    err(*env) << "Failed to create MJPEG input device\n";
	    exit(1);
	  }
  }

  if( video_type == VIDEO_TYPE_H264 || video_type == VIDEO_TYPE_H264_CIF || video_type == VIDEO_TYPE_H264_SVC_30FPS ||
		video_type == VIDEO_TYPE_H264_SVC_15FPS || video_type == VIDEO_TYPE_H264_SVC_7FPS || video_type == VIDEO_TYPE_H264_SVC_3FPS)
  {
	  H264InputDevice = APPROInput::createNew(*env, video_type);
	  if (H264InputDevice == NULL) {
	    err(*env) << "Failed to create MJPEG input device\n";
	    exit(1);
	  }
  }

  if( video_type == VIDEO_TYPE_MPEG4 || video_type == VIDEO_TYPE_MPEG4_CIF )
  {
	  Mpeg4InputDevice = APPROInput::createNew(*env, video_type);
	  if (Mpeg4InputDevice == NULL) {
		err(*env) << "Failed to create MPEG4 input device\n";
		exit(1);
	  }
  }

  // Create the RTSP server:
  RTSPServer* rtspServer = NULL;
  // Normal case: Streaming from a built-in RTSP server:
  rtspServer = RTSPServer::createNew(*env, rtspServerPortNum, NULL);
  if (rtspServer == NULL) {
    *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
    exit(1);
  }

  *env << "...done initializing\n";

  if( streamingMode == STREAMING_UNICAST )
  {
	  if( video_type == VIDEO_TYPE_MJPEG)
	  {
	    ServerMediaSession* sms
	      = ServerMediaSession::createNew(*env, MjpegStreamName, MjpegStreamName, streamDescription,streamingMode == STREAMING_MULTICAST_SSM);
	    sms->addSubsession(WISJPEGVideoServerMediaSubsession
				 ::createNew(sms->envir(), *MjpegInputDevice, MjpegVideoBitrate));
	    if( IsSilence == 0)
	    {
			sms->addSubsession(WISPCMAudioServerMediaSubsession::createNew(sms->envir(), *MjpegInputDevice));
	    }

	    rtspServer->addServerMediaSession(sms);

	    char *url = rtspServer->rtspURL(sms);
	    *env << "Play this stream using the URL:\n\t" << url << "\n";
	    delete[] url;
	  }

	  if( video_type == VIDEO_TYPE_H264 || video_type == VIDEO_TYPE_H264_CIF || video_type == VIDEO_TYPE_H264_SVC_30FPS ||
			video_type == VIDEO_TYPE_H264_SVC_15FPS || video_type == VIDEO_TYPE_H264_SVC_7FPS || video_type ==VIDEO_TYPE_H264_SVC_3FPS)
	  {
            ServerMediaSession* sms;
            sms
	      = ServerMediaSession::createNew(*env, H264StreamName, H264StreamName, streamDescription,streamingMode == STREAMING_MULTICAST_SSM);
	    sms->addSubsession(WISH264VideoServerMediaSubsession
				 ::createNew(sms->envir(), *H264InputDevice, H264VideoBitrate));
	    if( IsSilence == 0)
	    {
	    	sms->addSubsession(WISPCMAudioServerMediaSubsession::createNew(sms->envir(), *H264InputDevice));

	    }
	    rtspServer->addServerMediaSession(sms);

	    char *url = rtspServer->rtspURL(sms);
	    *env << "Play this stream using the URL:\n\t" << url << "\n";
	    delete[] url;
	  }

	    // Create a record describing the media to be streamed:
	  if( video_type == VIDEO_TYPE_MPEG4 || video_type == VIDEO_TYPE_MPEG4_CIF )
	  {
	    ServerMediaSession* sms
	      = ServerMediaSession::createNew(*env, Mpeg4StreamName, Mpeg4StreamName, streamDescription,streamingMode == STREAMING_MULTICAST_SSM);
	    sms->addSubsession(WISMPEG4VideoServerMediaSubsession
				 ::createNew(sms->envir(), *Mpeg4InputDevice, Mpeg4VideoBitrate));
	    if( IsSilence == 0)
	    {
	    	sms->addSubsession(WISPCMAudioServerMediaSubsession::createNew(sms->envir(), *Mpeg4InputDevice));
	    }

	    rtspServer->addServerMediaSession(sms);


	    char *url = rtspServer->rtspURL(sms);
	    *env << "Play this stream using the URL:\n\t" << url << "\n";
	    delete[] url;
	  }
  }else{


	if (streamingMode == STREAMING_MULTICAST_SSM)
	{
		if (multicastAddress == 0)
			multicastAddress = chooseRandomIPv4SSMAddress(*env);
	} else if (multicastAddress != 0) {
		streamingMode = STREAMING_MULTICAST_ASM;
	}

	struct in_addr dest; dest.s_addr = multicastAddress;
	const unsigned char ttl = 255;

	// For RTCP:
	const unsigned maxCNAMElen = 100;
	unsigned char CNAME[maxCNAMElen + 1];
	gethostname((char *) CNAME, maxCNAMElen);
	CNAME[maxCNAMElen] = '\0';      // just in case

	ServerMediaSession* sms=NULL;

	if( video_type == VIDEO_TYPE_MJPEG)
	{
		sms = ServerMediaSession::createNew(*env, MjpegStreamName, MjpegStreamName, streamDescription,streamingMode == STREAMING_MULTICAST_SSM);

		sourceAudio = MjpegInputDevice->audioSource();
		sourceVideo = WISJPEGStreamSource::createNew(MjpegInputDevice->videoSource());
		// Create 'groupsocks' for RTP and RTCP:
	    const Port rtpPortVideo(videoRTPPortNum);
	    const Port rtcpPortVideo(videoRTPPortNum+1);
	    rtpGroupsockVideo = new Groupsock(*env, dest, rtpPortVideo, ttl);
	    rtcpGroupsockVideo = new Groupsock(*env, dest, rtcpPortVideo, ttl);
	    if (streamingMode == STREAMING_MULTICAST_SSM) {
	      rtpGroupsockVideo->multicastSendOnly();
	      rtcpGroupsockVideo->multicastSendOnly();
	    }
		setVideoRTPSinkBufferSize();
		sinkVideo = JPEGVideoRTPSink::createNew(*env, rtpGroupsockVideo);

	}

	if( video_type == VIDEO_TYPE_H264 || video_type == VIDEO_TYPE_H264_CIF ||
		video_type == VIDEO_TYPE_H264_SVC_30FPS || video_type == VIDEO_TYPE_H264_SVC_15FPS ||
			video_type == VIDEO_TYPE_H264_SVC_7FPS || video_type == VIDEO_TYPE_H264_SVC_3FPS)
	{
 		sms = ServerMediaSession::createNew(*env, H264StreamName, H264StreamName, streamDescription,streamingMode == STREAMING_MULTICAST_SSM);

		sourceAudio = H264InputDevice->audioSource();
		sourceVideo = H264VideoStreamFramer::createNew(*env, H264InputDevice->videoSource());

		// Create 'groupsocks' for RTP and RTCP:
	    const Port rtpPortVideo(videoRTPPortNum);
	    const Port rtcpPortVideo(videoRTPPortNum+1);
	    rtpGroupsockVideo = new Groupsock(*env, dest, rtpPortVideo, ttl);
	    rtcpGroupsockVideo = new Groupsock(*env, dest, rtcpPortVideo, ttl);
	    if (streamingMode == STREAMING_MULTICAST_SSM) {
	      rtpGroupsockVideo->multicastSendOnly();
	      rtcpGroupsockVideo->multicastSendOnly();
	    }
		setVideoRTPSinkBufferSize();
		{
			char BuffStr[200];
			extern int GetSprop(void *pBuff, char vType);
			GetSprop(BuffStr,video_type);
			sinkVideo = H264VideoRTPSink::createNew(*env, rtpGroupsockVideo,96, 0x64001F,BuffStr);
		}

	}

	// Create a record describing the media to be streamed:
	if( video_type == VIDEO_TYPE_MPEG4 || video_type == VIDEO_TYPE_MPEG4_CIF )
	{
		sms = ServerMediaSession::createNew(*env, Mpeg4StreamName, Mpeg4StreamName, streamDescription,streamingMode == STREAMING_MULTICAST_SSM);

		sourceAudio = Mpeg4InputDevice->audioSource();
		sourceVideo = MPEG4VideoStreamDiscreteFramer::createNew(*env, Mpeg4InputDevice->videoSource());

		// Create 'groupsocks' for RTP and RTCP:
	    const Port rtpPortVideo(videoRTPPortNum);
	    const Port rtcpPortVideo(videoRTPPortNum+1);
	    rtpGroupsockVideo = new Groupsock(*env, dest, rtpPortVideo, ttl);
	    rtcpGroupsockVideo = new Groupsock(*env, dest, rtcpPortVideo, ttl);
	    if (streamingMode == STREAMING_MULTICAST_SSM) {
	      rtpGroupsockVideo->multicastSendOnly();
	      rtcpGroupsockVideo->multicastSendOnly();
	    }
		setVideoRTPSinkBufferSize();
		sinkVideo = MPEG4ESVideoRTPSink::createNew(*env, rtpGroupsockVideo,97);

	}
	/* VIDEO Channel initial */
	if(1)
	{
		// Create (and start) a 'RTCP instance' for this RTP sink:
		unsigned totalSessionBandwidthVideo = (Mpeg4VideoBitrate+500)/1000; // in kbps; for RTCP b/w share
		rtcpVideo = RTCPInstance::createNew(*env, rtcpGroupsockVideo,
					totalSessionBandwidthVideo, CNAME,
					sinkVideo, NULL /* we're a server */ ,
					streamingMode == STREAMING_MULTICAST_SSM);
	    // Note: This starts RTCP running automatically
		sms->addSubsession(PassiveServerMediaSubsession::createNew(*sinkVideo, rtcpVideo));

		// Start streaming:
		sinkVideo->startPlaying(*sourceVideo, NULL, NULL);
	}
	/* AUDIO Channel initial */
	if( IsSilence == 0)
	{
		// there's a separate RTP stream for audio
		// Create 'groupsocks' for RTP and RTCP:
		const Port rtpPortAudio(audioRTPPortNum);
		const Port rtcpPortAudio(audioRTPPortNum+1);

		rtpGroupsockAudio = new Groupsock(*env, dest, rtpPortAudio, ttl);
		rtcpGroupsockAudio = new Groupsock(*env, dest, rtcpPortAudio, ttl);

		if (streamingMode == STREAMING_MULTICAST_SSM)
		{
			rtpGroupsockAudio->multicastSendOnly();
			rtcpGroupsockAudio->multicastSendOnly();
		}
		if( audioSamplingFrequency == 16000 )
		{

			if( audioType == AUDIO_G711)
			{
				sinkAudio = SimpleRTPSink::createNew(*env, rtpGroupsockAudio, 96, audioSamplingFrequency, "audio", "PCMU", 1);
			}
			else
			{
				char const* encoderConfigStr = "1408";// (2<<3)|(8>>1) = 0x14 ; ((8<<7)&0xFF)|(1<<3)=0x08 ;
				sinkAudio = MPEG4GenericRTPSink::createNew(*env, rtpGroupsockAudio,
						       96,
						       audioSamplingFrequency,
						       "audio", "AAC-hbr",
						       encoderConfigStr, audioNumChannels);
			}
		}
		else{
			if(audioType == AUDIO_G711)
			{
				sinkAudio = SimpleRTPSink::createNew(*env, rtpGroupsockAudio, 0, audioSamplingFrequency, "audio", "PCMU", 1);
			}
			else{
				char const* encoderConfigStr =  "1588";// (2<<3)|(11>>1) = 0x15 ; ((11<<7)&0xFF)|(1<<3)=0x88 ;
				sinkAudio = MPEG4GenericRTPSink::createNew(*env, rtpGroupsockAudio,
						       96,
						       audioSamplingFrequency,
						       "audio", "AAC-hbr",
						       encoderConfigStr, audioNumChannels);

			}
		}

		// Create (and start) a 'RTCP instance' for this RTP sink:
		unsigned totalSessionBandwidthAudio = (audioOutputBitrate+500)/1000; // in kbps; for RTCP b/w share
		rtcpAudio = RTCPInstance::createNew(*env, rtcpGroupsockAudio,
					  totalSessionBandwidthAudio, CNAME,
					  sinkAudio, NULL /* we're a server */,
					  streamingMode == STREAMING_MULTICAST_SSM);
		// Note: This starts RTCP running automatically
		sms->addSubsession(PassiveServerMediaSubsession::createNew(*sinkAudio, rtcpAudio));

		// Start streaming:
		sinkAudio->startPlaying(*sourceAudio, NULL, NULL);
    }

	rtspServer->addServerMediaSession(sms);
	{
		struct in_addr dest; dest.s_addr = multicastAddress;
		char *url = rtspServer->rtspURL(sms);
		//char *url2 = inet_ntoa(dest);
		*env << "Mulicast Play this stream using the URL:\n\t" << url << "\n";
		//*env << "2 Mulicast addr:\n\t" << url2 << "\n";
		delete[] url;
	}
  }


  // Begin the LIVE555 event loop:
  env->taskScheduler().doEventLoop(&watchVariable); // does not return


  if( streamingMode!= STREAMING_UNICAST )
  {
	Medium::close(rtcpAudio);
	Medium::close(sinkAudio);
	Medium::close(sourceAudio);
	delete rtpGroupsockAudio;
	delete rtcpGroupsockAudio;

	Medium::close(rtcpVideo);
	Medium::close(sinkVideo);
	Medium::close(sourceVideo);
	delete rtpGroupsockVideo;
	delete rtcpGroupsockVideo;

  }

  Medium::close(rtspServer); // will also reclaim "sms" and its "ServerMediaSubsession"s
  if( MjpegInputDevice != NULL )
  {
	Medium::close(MjpegInputDevice);
  }

  if( H264InputDevice != NULL )
  {
	Medium::close(H264InputDevice);
  }

  if( Mpeg4InputDevice != NULL )
  {
	Medium::close(Mpeg4InputDevice);
  }

  env->reclaim();

  delete scheduler;

  ApproInterfaceExit();

  return 0; // only to prevent compiler warning

}