int main(int argc, char** argv) { // Increase the maximum size of video frames that we can 'proxy' without truncation. // (Such frames are unreasonably large; the back-end servers should really not be sending frames this large!) OutPacketBuffer::maxSize = 100000; // bytes // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); env = BasicUsageEnvironment::createNew(*scheduler); *env << "LIVE555 Proxy Server\n" << "\t(LIVE555 Streaming Media library version " << LIVEMEDIA_LIBRARY_VERSION_STRING << "; licensed under the GNU LGPL)\n\n"; // Check command-line arguments: optional parameters, then one or more rtsp:// URLs (of streams to be proxied): progName = argv[0]; if (argc < 2) usage(); while (argc > 1) { // Process initial command-line options (beginning with "-"): char* const opt = argv[1]; if (opt[0] != '-') break; // the remaining parameters are assumed to be "rtsp://" URLs switch (opt[1]) { case 'v': { // verbose output verbosityLevel = 1; break; } case 'V': { // more verbose output verbosityLevel = 2; break; } case 't': { // Stream RTP and RTCP over the TCP 'control' connection. // (This is for the 'back end' (i.e., proxied) stream only.) streamRTPOverTCP = True; break; } case 'T': { // stream RTP and RTCP over a HTTP connection if (argc > 3 && argv[2][0] != '-') { // The next argument is the HTTP server port number: if (sscanf(argv[2], "%hu", &tunnelOverHTTPPortNum) == 1 && tunnelOverHTTPPortNum > 0) { ++argv; --argc; break; } } // If we get here, the option was specified incorrectly: usage(); break; } case 'p': { // specify a rtsp server port number if (argc > 3 && argv[2][0] != '-') { // The next argument is the rtsp server port number: if (sscanf(argv[2], "%hu", &rtspServerPortNum) == 1 && rtspServerPortNum > 0) { ++argv; --argc; break; } } // If we get here, the option was specified incorrectly: usage(); break; } case 'u': { // specify a username and password (to be used if the 'back end' (i.e., proxied) stream requires authentication) if (argc < 4) usage(); // there's no argv[3] (for the "password") username = argv[2]; password = argv[3]; argv += 2; argc -= 2; break; } case 'U': { // specify a username and password to use to authenticate incoming "REGISTER" commands if (argc < 4) usage(); // there's no argv[3] (for the "password") usernameForREGISTER = argv[2]; passwordForREGISTER = argv[3]; if (authDBForREGISTER == NULL) authDBForREGISTER = new UserAuthenticationDatabase; authDBForREGISTER->addUserRecord(usernameForREGISTER, passwordForREGISTER); argv += 2; argc -= 2; break; } case 'R': { // Handle incoming "REGISTER" requests by proxying the specified stream: proxyREGISTERRequests = True; break; } default: { usage(); break; } } ++argv; --argc; } if (argc < 2 && !proxyREGISTERRequests) usage(); // there must be at least one "rtsp://" URL at the end // Make sure that the remaining arguments appear to be "rtsp://" URLs: int i; for (i = 1; i < argc; ++i) { if (strncmp(argv[i], "rtsp://", 7) != 0) usage(); } // Do some additional checking for invalid command-line argument combinations: if (authDBForREGISTER != NULL && !proxyREGISTERRequests) { *env << "The '-U <username> <password>' option can be used only with -R\n"; usage(); } if (streamRTPOverTCP) { if (tunnelOverHTTPPortNum > 0) { *env << "The -t and -T options cannot both be used!\n"; usage(); } else { tunnelOverHTTPPortNum = (portNumBits)(~0); // hack to tell "ProxyServerMediaSession" to stream over TCP, but not using HTTP } } #ifdef ACCESS_CONTROL // To implement client access control to the RTSP server, do the following: authDB = new UserAuthenticationDatabase; authDB->addUserRecord("username1", "password1"); // replace these with real strings // Repeat this line with each <username>, <password> that you wish to allow access to the server. #endif // Create the RTSP server. Try first with the configured port number, // and then with the default port number (554) if different, // and then with the alternative port number (8554): RTSPServer* rtspServer; rtspServer = createRTSPServer(rtspServerPortNum); if (rtspServer == NULL) { if (rtspServerPortNum != 554) { *env << "Unable to create a RTSP server with port number " << rtspServerPortNum << ": " << env->getResultMsg() << "\n"; *env << "Trying instead with the standard port numbers (554 and 8554)...\n"; rtspServerPortNum = 554; rtspServer = createRTSPServer(rtspServerPortNum); } } if (rtspServer == NULL) { rtspServerPortNum = 8554; rtspServer = createRTSPServer(rtspServerPortNum); } if (rtspServer == NULL) { *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n"; exit(1); } // Create a proxy for each "rtsp://" URL specified on the command line: for (i = 1; i < argc; ++i) { char const* proxiedStreamURL = argv[i]; char streamName[30]; if (argc == 2) { sprintf(streamName, "%s", "proxyStream"); // there's just one stream; give it this name } else { sprintf(streamName, "proxyStream-%d", i); // there's more than one stream; distinguish them by name } ServerMediaSession* sms = ProxyServerMediaSession::createNew(*env, rtspServer, proxiedStreamURL, streamName, username, password, tunnelOverHTTPPortNum, verbosityLevel); rtspServer->addServerMediaSession(sms); char* proxyStreamURL = rtspServer->rtspURL(sms); *env << "RTSP stream, proxying the stream \"" << proxiedStreamURL << "\"\n"; *env << "\tPlay this stream using the URL: " << proxyStreamURL << "\n"; delete[] proxyStreamURL; } if (proxyREGISTERRequests) { *env << "(We handle incoming \"REGISTER\" requests on port " << rtspServerPortNum << ")\n"; } // Also, attempt to create a HTTP server for RTSP-over-HTTP tunneling. // Try first with the default HTTP port (80), and then with the alternative HTTP // port numbers (8000 and 8080). if (rtspServer->setUpTunnelingOverHTTP(80) || rtspServer->setUpTunnelingOverHTTP(8000) || rtspServer->setUpTunnelingOverHTTP(8080)) { *env << "\n(We use port " << rtspServer->httpServerPortNum() << " for optional RTSP-over-HTTP tunneling.)\n"; } else { *env << "\n(RTSP-over-HTTP tunneling is not available.)\n"; } // Now, enter the event loop: env->taskScheduler().doEventLoop(); // does not return return 0; // only to prevent compiler warning }
int main(int argc, char** argv) { // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); UsageEnvironment* env = BasicUsageEnvironment::createNew(*scheduler); UserAuthenticationDatabase* authDB = NULL; #ifdef ACCESS_CONTROL // To implement client access control to the RTSP server, do the following: authDB = new UserAuthenticationDatabase; authDB->addUserRecord("username1", "password1"); // replace these with real strings // Repeat the above with each <username>, <password> that you wish to allow // access to the server. #endif // Create the RTSP server. Try first with the default port number (554), // and then with the alternative port number (8554): RTSPServer* rtspServer; portNumBits rtspServerPortNum = 554; rtspServer = DynamicRTSPServer::createNew(*env, rtspServerPortNum, authDB); if (rtspServer == NULL) { rtspServerPortNum = 8554; rtspServer = DynamicRTSPServer::createNew(*env, rtspServerPortNum, authDB); } if (rtspServer == NULL) { *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n"; exit(1); } *env << "LIVE555 Media Server\n"; *env << "\tversion " << MEDIA_SERVER_VERSION_STRING << " (LIVE555 Streaming Media library version " << LIVEMEDIA_LIBRARY_VERSION_STRING << ").\n"; char* urlPrefix = rtspServer->rtspURLPrefix(); *env << "Play streams from this server using the URL\n\t" << urlPrefix << "<filename>\nwhere <filename> is a file present in the current directory.\n"; *env << "Each file's type is inferred from its name suffix:\n"; *env << "\t\".264\" => a H.264 Video Elementary Stream file\n"; *env << "\t\".aac\" => an AAC Audio (ADTS format) file\n"; *env << "\t\".ac3\" => an AC-3 Audio file\n"; *env << "\t\".amr\" => an AMR Audio file\n"; *env << "\t\".dv\" => a DV Video file\n"; *env << "\t\".m4e\" => a MPEG-4 Video Elementary Stream file\n"; *env << "\t\".mkv\" => a Matroska audio+video+(optional)subtitles file\n"; *env << "\t\".mp3\" => a MPEG-1 or 2 Audio file\n"; *env << "\t\".mpg\" => a MPEG-1 or 2 Program Stream (audio+video) file\n"; *env << "\t\".ts\" => a MPEG Transport Stream file\n"; *env << "\t\t(a \".tsx\" index file - if present - provides server 'trick play' support)\n"; *env << "\t\".wav\" => a WAV Audio file\n"; *env << "\t\".webm\" => a WebM audio(Vorbis)+video(VP8) file\n"; *env << "See http://www.live555.com/mediaServer/ for additional documentation.\n"; // Also, attempt to create a HTTP server for RTSP-over-HTTP tunneling. // Try first with the default HTTP port (80), and then with the alternative HTTP // port numbers (8000 and 8080). if (rtspServer->setUpTunnelingOverHTTP(80) || rtspServer->setUpTunnelingOverHTTP(8000) || rtspServer->setUpTunnelingOverHTTP(8080)) { *env << "(We use port " << rtspServer->httpServerPortNum() << " for optional RTSP-over-HTTP tunneling, or for HTTP live streaming (for indexed Transport Stream files only).)\n"; } else { *env << "(RTSP-over-HTTP tunneling is not available.)\n"; } env->taskScheduler().doEventLoop(); // does not return return 0; // only to prevent compiler warning }
int main(int argc, char** argv) { // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); env = BasicUsageEnvironment::createNew(*scheduler); UserAuthenticationDatabase* authDB = NULL; #ifdef ACCESS_CONTROL // To implement client access control to the RTSP server, do the following: authDB = new UserAuthenticationDatabase; authDB->addUserRecord("username1", "password1"); // replace these with real strings // Repeat the above with each <username>, <password> that you wish to allow // access to the server. #endif // Create the RTSP server: RTSPServer* rtspServer = RTSPServer::createNew(*env, 8554, authDB); if (rtspServer == NULL) { *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n"; exit(1); } char const* descriptionString = "Session streamed by \"testOnDemandRTSPServer\""; // Set up each of the possible streams that can be served by the // RTSP server. Each such stream is implemented using a // "ServerMediaSession" object, plus one or more // "ServerMediaSubsession" objects for each audio/video substream. // A MPEG-4 video elementary stream: { char const* streamName = "mpeg4ESVideoTest"; char const* inputFileName = "test.m4e"; ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); sms->addSubsession(MPEG4VideoFileServerMediaSubsession ::createNew(*env, inputFileName, reuseFirstSource)); rtspServer->addServerMediaSession(sms); announceStream(rtspServer, sms, streamName, inputFileName); } // A H.264 video elementary stream: { char const* streamName = "h264ESVideoTest"; char const* inputFileName = "test.264"; ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); sms->addSubsession(H264VideoFileServerMediaSubsession ::createNew(*env, inputFileName, reuseFirstSource)); rtspServer->addServerMediaSession(sms); announceStream(rtspServer, sms, streamName, inputFileName); } // A H.265 video elementary stream: { char const* streamName = "h265ESVideoTest"; char const* inputFileName = "test.265"; ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); sms->addSubsession(H265VideoFileServerMediaSubsession ::createNew(*env, inputFileName, reuseFirstSource)); rtspServer->addServerMediaSession(sms); announceStream(rtspServer, sms, streamName, inputFileName); } // A MPEG-1 or 2 audio+video program stream: { char const* streamName = "mpeg1or2AudioVideoTest"; char const* inputFileName = "test.mpg"; // NOTE: This *must* be a Program Stream; not an Elementary Stream ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); MPEG1or2FileServerDemux* demux = MPEG1or2FileServerDemux::createNew(*env, inputFileName, reuseFirstSource); sms->addSubsession(demux->newVideoServerMediaSubsession(iFramesOnly)); sms->addSubsession(demux->newAudioServerMediaSubsession()); rtspServer->addServerMediaSession(sms); announceStream(rtspServer, sms, streamName, inputFileName); } // A MPEG-1 or 2 video elementary stream: { char const* streamName = "mpeg1or2ESVideoTest"; char const* inputFileName = "testv.mpg"; // NOTE: This *must* be a Video Elementary Stream; not a Program Stream ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); sms->addSubsession(MPEG1or2VideoFileServerMediaSubsession ::createNew(*env, inputFileName, reuseFirstSource, iFramesOnly)); rtspServer->addServerMediaSession(sms); announceStream(rtspServer, sms, streamName, inputFileName); } // A MP3 audio stream (actually, any MPEG-1 or 2 audio file will work): // To stream using 'ADUs' rather than raw MP3 frames, uncomment the following: //#define STREAM_USING_ADUS 1 // To also reorder ADUs before streaming, uncomment the following: //#define INTERLEAVE_ADUS 1 // (For more information about ADUs and interleaving, // see <http://www.live555.com/rtp-mp3/>) { char const* streamName = "mp3AudioTest"; char const* inputFileName = "test.mp3"; ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); Boolean useADUs = False; Interleaving* interleaving = NULL; #ifdef STREAM_USING_ADUS useADUs = True; #ifdef INTERLEAVE_ADUS unsigned char interleaveCycle[] = {0,2,1,3}; // or choose your own... unsigned const interleaveCycleSize = (sizeof interleaveCycle)/(sizeof (unsigned char)); interleaving = new Interleaving(interleaveCycleSize, interleaveCycle); #endif #endif sms->addSubsession(MP3AudioFileServerMediaSubsession ::createNew(*env, inputFileName, reuseFirstSource, useADUs, interleaving)); rtspServer->addServerMediaSession(sms); announceStream(rtspServer, sms, streamName, inputFileName); } // A WAV audio stream: { char const* streamName = "wavAudioTest"; char const* inputFileName = "test.wav"; ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); // To convert 16-bit PCM data to 8-bit u-law, prior to streaming, // change the following to True: Boolean convertToULaw = False; sms->addSubsession(WAVAudioFileServerMediaSubsession ::createNew(*env, inputFileName, reuseFirstSource, convertToULaw)); rtspServer->addServerMediaSession(sms); announceStream(rtspServer, sms, streamName, inputFileName); } // An AMR audio stream: { char const* streamName = "amrAudioTest"; char const* inputFileName = "test.amr"; ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); sms->addSubsession(AMRAudioFileServerMediaSubsession ::createNew(*env, inputFileName, reuseFirstSource)); rtspServer->addServerMediaSession(sms); announceStream(rtspServer, sms, streamName, inputFileName); } // A 'VOB' file (e.g., from an unencrypted DVD): { char const* streamName = "vobTest"; char const* inputFileName = "test.vob"; ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); // Note: VOB files are MPEG-2 Program Stream files, but using AC-3 audio MPEG1or2FileServerDemux* demux = MPEG1or2FileServerDemux::createNew(*env, inputFileName, reuseFirstSource); sms->addSubsession(demux->newVideoServerMediaSubsession(iFramesOnly)); sms->addSubsession(demux->newAC3AudioServerMediaSubsession()); rtspServer->addServerMediaSession(sms); announceStream(rtspServer, sms, streamName, inputFileName); } // A MPEG-2 Transport Stream: { char const* streamName = "mpeg2TransportStreamTest"; char const* inputFileName = "test.ts"; char const* indexFileName = "test.tsx"; ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); sms->addSubsession(MPEG2TransportFileServerMediaSubsession ::createNew(*env, inputFileName, indexFileName, reuseFirstSource)); rtspServer->addServerMediaSession(sms); announceStream(rtspServer, sms, streamName, inputFileName); } // An AAC audio stream (ADTS-format file): { char const* streamName = "aacAudioTest"; char const* inputFileName = "test.aac"; ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); sms->addSubsession(ADTSAudioFileServerMediaSubsession ::createNew(*env, inputFileName, reuseFirstSource)); rtspServer->addServerMediaSession(sms); announceStream(rtspServer, sms, streamName, inputFileName); } // A DV video stream: { // First, make sure that the RTPSinks' buffers will be large enough to handle the huge size of DV frames (as big as 288000). OutPacketBuffer::maxSize = 2000000; char const* streamName = "dvVideoTest"; char const* inputFileName = "test.dv"; ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); sms->addSubsession(DVVideoFileServerMediaSubsession ::createNew(*env, inputFileName, reuseFirstSource)); rtspServer->addServerMediaSession(sms); announceStream(rtspServer, sms, streamName, inputFileName); } // A AC3 video elementary stream: { char const* streamName = "ac3AudioTest"; char const* inputFileName = "test.ac3"; ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); sms->addSubsession(AC3AudioFileServerMediaSubsession ::createNew(*env, inputFileName, reuseFirstSource)); rtspServer->addServerMediaSession(sms); announceStream(rtspServer, sms, streamName, inputFileName); } // A Matroska ('.mkv') file, with video+audio+subtitle streams: { char const* streamName = "matroskaFileTest"; char const* inputFileName = "test.mkv"; ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); newDemuxWatchVariable = 0; MatroskaFileServerDemux::createNew(*env, inputFileName, onMatroskaDemuxCreation, NULL); env->taskScheduler().doEventLoop(&newDemuxWatchVariable); Boolean sessionHasTracks = False; ServerMediaSubsession* smss; while ((smss = matroskaDemux->newServerMediaSubsession()) != NULL) { sms->addSubsession(smss); sessionHasTracks = True; } if (sessionHasTracks) { rtspServer->addServerMediaSession(sms); } // otherwise, because the stream has no tracks, we don't add a ServerMediaSession to the server. announceStream(rtspServer, sms, streamName, inputFileName); } // A WebM ('.webm') file, with video(VP8)+audio(Vorbis) streams: // (Note: ".webm' files are special types of Matroska files, so we use the same code as the Matroska ('.mkv') file code above.) { char const* streamName = "webmFileTest"; char const* inputFileName = "test.webm"; ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); newDemuxWatchVariable = 0; MatroskaFileServerDemux::createNew(*env, inputFileName, onMatroskaDemuxCreation, NULL); env->taskScheduler().doEventLoop(&newDemuxWatchVariable); Boolean sessionHasTracks = False; ServerMediaSubsession* smss; while ((smss = matroskaDemux->newServerMediaSubsession()) != NULL) { sms->addSubsession(smss); sessionHasTracks = True; } if (sessionHasTracks) { rtspServer->addServerMediaSession(sms); } // otherwise, because the stream has no tracks, we don't add a ServerMediaSession to the server. announceStream(rtspServer, sms, streamName, inputFileName); } // An Ogg ('.ogg') file, with video and/or audio streams: { char const* streamName = "oggFileTest"; char const* inputFileName = "test.ogg"; ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); newDemuxWatchVariable = 0; OggFileServerDemux::createNew(*env, inputFileName, onOggDemuxCreation, NULL); env->taskScheduler().doEventLoop(&newDemuxWatchVariable); Boolean sessionHasTracks = False; ServerMediaSubsession* smss; while ((smss = oggDemux->newServerMediaSubsession()) != NULL) { sms->addSubsession(smss); sessionHasTracks = True; } if (sessionHasTracks) { rtspServer->addServerMediaSession(sms); } // otherwise, because the stream has no tracks, we don't add a ServerMediaSession to the server. announceStream(rtspServer, sms, streamName, inputFileName); } // An Opus ('.opus') audio file: // (Note: ".opus' files are special types of Ogg files, so we use the same code as the Ogg ('.ogg') file code above.) { char const* streamName = "opusFileTest"; char const* inputFileName = "test.opus"; ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); newDemuxWatchVariable = 0; OggFileServerDemux::createNew(*env, inputFileName, onOggDemuxCreation, NULL); env->taskScheduler().doEventLoop(&newDemuxWatchVariable); Boolean sessionHasTracks = False; ServerMediaSubsession* smss; while ((smss = oggDemux->newServerMediaSubsession()) != NULL) { sms->addSubsession(smss); sessionHasTracks = True; } if (sessionHasTracks) { rtspServer->addServerMediaSession(sms); } // otherwise, because the stream has no tracks, we don't add a ServerMediaSession to the server. announceStream(rtspServer, sms, streamName, inputFileName); } // A MPEG-2 Transport Stream, coming from a live UDP (raw-UDP or RTP/UDP) source: { char const* streamName = "mpeg2TransportStreamFromUDPSourceTest"; char const* inputAddressStr = "239.255.42.42"; // This causes the server to take its input from the stream sent by the "testMPEG2TransportStreamer" demo application. // (Note: If the input UDP source is unicast rather than multicast, then change this to NULL.) portNumBits const inputPortNum = 1234; // This causes the server to take its input from the stream sent by the "testMPEG2TransportStreamer" demo application. Boolean const inputStreamIsRawUDP = False; ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); sms->addSubsession(MPEG2TransportUDPServerMediaSubsession ::createNew(*env, inputAddressStr, inputPortNum, inputStreamIsRawUDP)); rtspServer->addServerMediaSession(sms); char* url = rtspServer->rtspURL(sms); *env << "\n\"" << streamName << "\" stream, from a UDP Transport Stream input source \n\t("; if (inputAddressStr != NULL) { *env << "IP multicast address " << inputAddressStr << ","; } else { *env << "unicast;"; } *env << " port " << inputPortNum << ")\n"; *env << "Play this stream using the URL \"" << url << "\"\n"; delete[] url; } // Also, attempt to create a HTTP server for RTSP-over-HTTP tunneling. // Try first with the default HTTP port (80), and then with the alternative HTTP // port numbers (8000 and 8080). if (rtspServer->setUpTunnelingOverHTTP(80) || rtspServer->setUpTunnelingOverHTTP(8000) || rtspServer->setUpTunnelingOverHTTP(8080)) { *env << "\n(We use port " << rtspServer->httpServerPortNum() << " for optional RTSP-over-HTTP tunneling.)\n"; } else { *env << "\n(RTSP-over-HTTP tunneling is not available.)\n"; } env->taskScheduler().doEventLoop(); // does not return return 0; // only to prevent compiler warning }
int myRTSPServer(){ Boolean bFlag; // Begin by setting up our usage environment: TaskScheduler* scheduler = BasicTaskScheduler::createNew(); env = BasicUsageEnvironment::createNew(*scheduler); UserAuthenticationDatabase* authDB = NULL; // Create the RTSP server: RTSPServer* rtspServer = RTSPServer::createNew(*env, 8554, authDB); if (rtspServer == NULL) { *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n"; exit(1); } char const* descriptionString = "Session streamed by \"testOnDemandRTSPServer\""; // Set up each of the possible streams that can be served by the // RTSP server. Each such stream is implemented using a // "ServerMediaSession" object, plus one or more // "ServerMediaSubsession" objects for each audio/video substream. // A H.264 video elementary stream: { char const* streamName = "BackChannelTest"; char const* inputFileName = "slamtv10.264"; char const* audioFileName = "slamtv10.aac"; char const* outputFileName = "receive.pcm"; reuseFirstSource = True; // check if test file is exist { FILE *fp=NULL; fp = fopen(inputFileName,"r"); if(fp==NULL) printf("File %s is not exist\n", inputFileName); else fclose(fp); fp = fopen(audioFileName,"r"); if(fp==NULL) printf("File %s is not exist\n", audioFileName); else fclose(fp); } // Stream 1: H.264 video ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName, descriptionString); H264VideoFileServerMediaSubsession *sub =H264VideoFileServerMediaSubsession ::createNew(*env, inputFileName, reuseFirstSource); bFlag = sms->addSubsession(sub); if(bFlag==False) printf("addSubsession for %s error\n", inputFileName); // Stream 2: AAC audio stream (ADTS-format file): ADTSAudioFileServerMediaSubsession *sub2 =ADTSAudioFileServerMediaSubsession ::createNew(*env, audioFileName, reuseFirstSource); bFlag = sms->addSubsession(sub2); if(bFlag==False) printf("addSubsession for %s error\n", audioFileName); // Stream 3: backchannel AAC audio // TODO: modify here to support backchannel // implement a new class named ADTSBackChannelAudioFileServerMediaSubsession // use RTPSource to receive data and use ADTSAudioFileSink to save data to file //ADTSBackChannelAudioFileServerMediaSubsession *sub3 =ADTSBackChannelAudioFileServerMediaSubsession WaveBackChannelAudioFileServerMediaSubsession* sub3 = WaveBackChannelAudioFileServerMediaSubsession ::createNew(*env, outputFileName, reuseFirstSource); sub3->setSubsessionAsBackChannel(); bFlag = sms->addSubsession(sub3); if(bFlag==False) printf("addSubsession for %s error\n", outputFileName); rtspServer->addServerMediaSession(sms); // 20140703 albert.liao modified start // we should notify OnDemandServerMediaSubsession or ServerMediaSubSession that we already create a backchannel subsession // so that ServerMediaSubSession can do // 1. create a SDP with backchannel // 2. create a RTPSource to read data from RTPClient // 3. create a FileSink to save received data to file // 20140703 albert.liao modified end announceStream(rtspServer, sms, streamName, inputFileName); } // Also, attempt to create a HTTP server for RTSP-over-HTTP tunneling. // Try first with the default HTTP port (80), and then with the alternative HTTP // port numbers (8000 and 8080). if (rtspServer->setUpTunnelingOverHTTP(80) || rtspServer->setUpTunnelingOverHTTP(8000) || rtspServer->setUpTunnelingOverHTTP(8080)) { *env << "\n(We use port " << rtspServer->httpServerPortNum() << " for optional RTSP-over-HTTP tunneling.)\n"; } else { *env << "\n(RTSP-over-HTTP tunneling is not available.)\n"; } env->taskScheduler().doEventLoop(); // does not return return 0; // only to prevent compiler warning }
int main( int argc, char **argv ) { //int ret = 0; PTZControlInit(); demo_setting * ext_gSettings = NULL; // Allocate the "global" settings ext_gSettings = (demo_setting*)malloc( sizeof( demo_setting ) ); if ( NULL == ext_gSettings ) { printf( "main::out of memory!\n" ); return -1; } sig_init(); atexit(appExit); //init the setting struct Settings_Initialize( ext_gSettings ); read_Parse(ext_gSettings); //printf("video type = %d \n", ext_gSettings->video_types); //...do your job //close the led setled_off(); //init dma memory akuio_pmem_init(); encode_init(); printf("encode_init ok\n"); //open camera camera_open(ext_gSettings->width, ext_gSettings->height); printf("camera_open ok\n"); //encode_open T_ENC_INPUT encInput; encInput.width = ext_gSettings->width; //实际编码图像的宽度,能被4整除 encInput.height = ext_gSettings->height; //实际编码图像的长度,能被2整除 encInput.kbpsmode = ext_gSettings->kbpsmode; encInput.qpHdr = ext_gSettings->qpHdr; //初始的QP的值 encInput.iqpHdr = ext_gSettings->iqpHdr; //初始的QP的值 encInput.bitPerSecond = ext_gSettings->bitPerSecond; //目标bps encInput.minQp = ext_gSettings->minQp; encInput.maxQp = ext_gSettings->maxQp; encInput.framePerSecond = ext_gSettings->framePerSecond; encInput.video_tytes = ext_gSettings->video_types; encode_open(&encInput); printf("encode_open ok\n"); //set mux mux_input.rec_path = ext_gSettings->rec_path; mux_input.m_MediaRecType = MEDIALIB_REC_AVI_NORMAL; if (ext_gSettings->bhasAudio) { bHasAudio = 1; //mux_input.m_bCaptureAudio = 1; } else { bHasAudio = 0; //mux_input.m_bCaptureAudio = 0; } mux_input.m_bCaptureAudio = 1; //mux video if(parse.format2 == 0) { mux_input.m_eVideoType = MEDIALIB_VIDEO_H264; } else if(parse.format2 == 1) { mux_input.m_eVideoType = MEDIALIB_VIDEO_MJPEG; } mux_input.m_nWidth = parse.width2; mux_input.m_nHeight = parse.height2; //mux audio mux_input.m_eAudioType = MEDIALIB_AUDIO_AAC; mux_input.m_nSampleRate = 8000; //mux_input.abitsrate = ext_gSettings->abitsrate; printf("mux_open ok\n"); //if (ext_gSettings->bhasAudio) { T_AUDIO_INPUT audioInput; audioInput.enc_type = (AUDIO_ENCODE_TYPE_CC)ext_gSettings->audioType; audioInput.nBitsRate = ext_gSettings->abitsrate; audioInput.nBitsPerSample = 16; audioInput.nChannels = 1; audioInput.nSampleRate = ext_gSettings->aSamplerate; audio_open(&audioInput); printf("audio_open ok\n"); audio_start(); } //start ftp server //startFTPSrv(); Init_photograph(); //PTZControlInit(); //start video process video_process_start(); InitMotionDetect(); DemuxForLiveSetCallBack(); TaskScheduler* scheduler = BasicTaskScheduler::createNew(); env = BasicUsageEnvironment::createNew(*scheduler); UserAuthenticationDatabase* authDB = NULL; #ifdef ACCESS_CONTROL // To implement client access control to the RTSP server, do the following: authDB = new UserAuthenticationDatabase; authDB->addUserRecord("username1", "password1"); // replace these with real strings // Repeat the above with each <username>, <password> that you wish to allow // access to the server. #endif // Create the RTSP server: RTSPServer* rtspServer = AKRTSPServer::createNew(*env, RTSPPORT, authDB); if (rtspServer == NULL) { *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n"; appExit(); exit(1); } char const* descriptionString = "Session streamed by \"testOnDemandRTSPServer\""; // Set up each of the possible streams that can be served by the // RTSP server. Each such stream is implemented using a // "ServerMediaSession" object, plus one or more // "ServerMediaSubsession" objects for each audio/video substream. int vsIndex = 0; VIDEO_MODE vm[2] = {VIDEO_MODE_VGA,VIDEO_MODE_VGA}; const char* streamName1 = "vs1"; const char* streamName2 = "vs2"; ((AKRTSPServer*)rtspServer)->SetStreamName(streamName1, streamName2); if(ext_gSettings->video_types == 1) { if(ext_gSettings->width == 640) { vm[0] = VIDEO_MODE_VGA; } else if(ext_gSettings->width == 320) { vm[0] = VIDEO_MODE_QVGA; } else if(ext_gSettings->width == 720) { vm[0] = VIDEO_MODE_D1; } AKIPCMJPEGFramedSource* ipcMJPEGSourcecam = NULL; ServerMediaSession* smsMJPEGcam = ServerMediaSession::createNew(*env, streamName1, 0, descriptionString); AKIPCMJPEGOnDemandMediaSubsession* subsMJPEGcam = AKIPCMJPEGOnDemandMediaSubsession::createNew(*env,ipcMJPEGSourcecam, ext_gSettings->width, ext_gSettings->height, vsIndex); smsMJPEGcam->addSubsession(subsMJPEGcam); subsMJPEGcam->getframefunc = video_process_get_buf; subsMJPEGcam->setledstart = setled_view_start; subsMJPEGcam->setledexit = setled_view_stop; if(bHasAudio) smsMJPEGcam->addSubsession(AKIPCAACAudioOnDemandMediaSubsession::createNew(*env,True,getAACBuf, vsIndex)); rtspServer->addServerMediaSession(smsMJPEGcam); char* url1 = rtspServer->rtspURL(smsMJPEGcam); *env << "using url \"" << url1 <<"\"\n"; delete[] url1; } else if(ext_gSettings->video_types == 0) { if(ext_gSettings->width == 1280) { vm[0] = VIDEO_MODE_720P; } else if(ext_gSettings->width == 640) { vm[0] = VIDEO_MODE_VGA; } else if(ext_gSettings->width == 320) { vm[0] = VIDEO_MODE_QVGA; } else if(ext_gSettings->width == 720) { vm[0] = VIDEO_MODE_D1; } AKIPCH264FramedSource* ipcSourcecam = NULL; ServerMediaSession* smscam = ServerMediaSession::createNew(*env, streamName1, 0, descriptionString); AKIPCH264OnDemandMediaSubsession* subscam = AKIPCH264OnDemandMediaSubsession::createNew(*env,ipcSourcecam, 0, vsIndex); smscam->addSubsession(subscam); if(bHasAudio) smscam->addSubsession(AKIPCAACAudioOnDemandMediaSubsession::createNew(*env,True,getAACBuf, vsIndex)); subscam->getframefunc = video_process_get_buf; subscam->setledstart = setled_view_start; subscam->setledexit = setled_view_stop; rtspServer->addServerMediaSession(smscam); char* url1 = rtspServer->rtspURL(smscam); *env << "using url \"" << url1 <<"\"\n"; delete[] url1; } vsIndex = 1; if(parse.format2 == 0)//264 { if(parse.width2 == 1280) { vm[1] = VIDEO_MODE_720P; } else if(parse.width2 == 640) { vm[1] = VIDEO_MODE_VGA; } else if(parse.width2 == 320) { vm[1] = VIDEO_MODE_QVGA; } else if(parse.width2 == 720) { vm[1] = VIDEO_MODE_D1; } AKIPCH264FramedSource* ipcSourcecam = NULL; ServerMediaSession* smscam = ServerMediaSession::createNew(*env, streamName2, 0, descriptionString); AKIPCH264OnDemandMediaSubsession* subscam = AKIPCH264OnDemandMediaSubsession::createNew(*env,ipcSourcecam, 0, vsIndex); smscam->addSubsession(subscam); if(bHasAudio) smscam->addSubsession(AKIPCAACAudioOnDemandMediaSubsession::createNew(*env,True,getAACBuf, vsIndex)); subscam->getframefunc = video_process_get_buf; subscam->setledstart = setled_view_start; subscam->setledexit = setled_view_stop; rtspServer->addServerMediaSession(smscam); char* url2 = rtspServer->rtspURL(smscam); *env << "using url \"" << url2 <<"\"\n"; delete[] url2; } else if(parse.format2 == 1)//mjpeg { if(parse.width2 == 640) { vm[1] = VIDEO_MODE_VGA; } else if(parse.width2 == 320) { vm[1] = VIDEO_MODE_QVGA; } else if(parse.width2 == 720) { vm[1] = VIDEO_MODE_D1; } AKIPCMJPEGFramedSource* ipcMJPEGSourcecam = NULL; ServerMediaSession* smsMJPEGcam = ServerMediaSession::createNew(*env, streamName2, 0, descriptionString); AKIPCMJPEGOnDemandMediaSubsession* subsMJPEGcam = AKIPCMJPEGOnDemandMediaSubsession::createNew(*env,ipcMJPEGSourcecam, parse.width2, parse.height2, vsIndex); smsMJPEGcam->addSubsession(subsMJPEGcam); subsMJPEGcam->getframefunc = video_process_get_buf; subsMJPEGcam->setledstart = setled_view_start; subsMJPEGcam->setledexit = setled_view_stop; if(bHasAudio) smsMJPEGcam->addSubsession(AKIPCAACAudioOnDemandMediaSubsession::createNew(*env,True,getAACBuf, vsIndex)); rtspServer->addServerMediaSession(smsMJPEGcam); char* url2 = rtspServer->rtspURL(smsMJPEGcam); *env << "using url \"" << url2 <<"\"\n"; delete[] url2; } #if 0 if (rtspServer->setUpTunnelingOverHTTP(80) || rtspServer->setUpTunnelingOverHTTP(8000) || rtspServer->setUpTunnelingOverHTTP(8080)) { *env << "\n(We use port " << rtspServer->httpServerPortNum() << " for optional RTSP-over-HTTP tunneling.)\n"; } else { *env << "\n(RTSP-over-HTTP tunneling is not available.)\n"; } #endif //printf("streamName:%s,Port:%d\n", streamName1, RTSPPORT); NetCtlSrvPar ncsp; memset(&ncsp, 0, sizeof(ncsp)); getDeviceID(ncsp.strDeviceID); printf("device id:**%s**\n", ncsp.strDeviceID); strcpy(ncsp.strStreamName1, streamName1); strcpy(ncsp.strStreamName2, streamName2); ncsp.vm1 = vm[0]; ncsp.vm2 = vm[1]; ncsp.nRtspPort = RTSPPORT; ncsp.nMainFps = parse.fps1; ncsp.nSubFps = parse.fps2; //start net command server startNetCtlServer(&ncsp); printf("[##]start record...\n"); auto_record_file(); printf("[##]auto_record_file() called..\n"); //at last,start rtsp loop env->taskScheduler().doEventLoop(); // does not return return 0; }
static void RtspServerStart(RTSP_PARAM_INFO *cRtspParamInfo) { enum {ENUM_UNICAST = 0, ENUM_MULTICAST}; scheduler = BasicTaskScheduler::createNew(); env = BasicUsageEnvironment::createNew(*scheduler); Boolean reuseFirstSource = true; OutPacketBuffer::maxSize = 500000; const unsigned short statusPort = 10086; Port RTSPStatusPort(statusPort); Port RTSPServerPort(cRtspParamInfo->iRTSPServerPort); RTPSink *videoSinkMajor = NULL; RTPSink *videoSinkMinor = NULL; char streamName[RTSP_STRING_LEN] = {0}; char secondStreamName[RTSP_STRING_LEN] = {0}; strncpy(streamName, cRtspParamInfo->rgStreamName[RTSP_STREAM_MAJOR], RTSP_STRING_LEN - 1); strncpy(secondStreamName, cRtspParamInfo->rgStreamName[RTSP_STREAM_MINOR], RTSP_STRING_LEN - 1); // Create 'groupsocks' for RTP and RTCP: struct in_addr destinationAddressMajor; struct in_addr destinationAddressMinor; UserAuthenticationDatabase *authDB = NULL; //#ifdef ACCESS_CONTROL if(cRtspParamInfo->iAuthenticateEnable){ authDB = new UserAuthenticationDatabase; // authDB->addUserRecord(cRtspParamInfo->rgUserName, cRtspParamInfo->rgPassword); authDB->addUserRecord("admin", "admin"); fprintf(stdout, "%s %d Authentication Enable!\n", __FILE__, __LINE__); } //#endif destinationAddressMajor.s_addr = chooseRandomIPv4SSMAddress(*env); destinationAddressMinor.s_addr = chooseRandomIPv4SSMAddress(*env); // Note: This is a multicast address. If you wish instead to stream // using unicast, then you should use the "testOnDemandRTSPServer" // test program - not this test program - as a model. const unsigned short rtpPortNumMajor = 18888; const unsigned short rtcpPortNumMajor = rtpPortNumMajor + 1; const unsigned short rtpPortNumMinor = rtcpPortNumMajor + 1; const unsigned short rtcpPortNumMinor = rtpPortNumMinor + 1; const unsigned char ttl = 255; const Port rtpPortMajor(rtpPortNumMajor); const Port rtcpPortMajor(rtcpPortNumMajor); const Port rtpPortMinor(rtpPortNumMinor); const Port rtcpPortMinor(rtcpPortNumMinor); Groupsock rtpGroupsockMajor(*env, destinationAddressMajor, rtpPortMajor, ttl); rtpGroupsockMajor.multicastSendOnly(); // we're a SSM source Groupsock rtcpGroupsockMajor(*env, destinationAddressMajor, rtcpPortMajor, ttl); rtcpGroupsockMajor.multicastSendOnly(); // we're a SSM source Groupsock rtpGroupsockMinor(*env, destinationAddressMinor, rtpPortMinor, ttl); rtpGroupsockMinor.multicastSendOnly(); Groupsock rtcpGroupsockMinor(*env, destinationAddressMinor, rtcpPortMinor, ttl); rtcpGroupsockMinor.multicastSendOnly(); // Create a 'H264 Video RTP' sink from the RTP 'groupsock': OutPacketBuffer::maxSize = 1000000; // Create (and start) a 'RTCP instance' for this RTP sink: const unsigned estimatedSessionBandwidth = 500; // in kbps; for RTCP b/w share const unsigned maxCNAMElen = 100; unsigned char CNAME[maxCNAMElen+1]; gethostname((char*)CNAME, maxCNAMElen); CNAME[maxCNAMElen] = '\0'; // just in case RTSPServer* rtspServer = RTSPServer::createNew(*env, RTSPServerPort, authDB); if (rtspServer == NULL) { *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n"; exit(1); } //first stream if(cRtspParamInfo->rgCommunicationMode[RTSP_STREAM_MAJOR] == ENUM_UNICAST){ ServerMediaSession* smsMajor = ServerMediaSession::createNew(*env, streamName, streamName, "Session streamed by \"testH264VideoStreamer\""); smsMajor->addSubsession(H264VideoFileServerMediaSubsession::createNew(*env, firstInputFileName, reuseFirstSource)); rtspServer->addServerMediaSession(smsMajor); }else if(cRtspParamInfo->rgCommunicationMode[RTSP_STREAM_MAJOR] == ENUM_MULTICAST){ videoSinkMajor = H264VideoRTPSink::createNew(*env, &rtpGroupsockMajor, 96); RTCPInstance* rtcpMajor = RTCPInstance::createNew(*env, &rtcpGroupsockMajor, estimatedSessionBandwidth, CNAME, videoSinkMajor, NULL /* we're a server */, True /* we're a SSM source */); ServerMediaSession* smsMajor = ServerMediaSession::createNew(*env, streamName, firstInputFileName, "Session streamed by \"swH264VideoStreamer\"", True /*SSM*/); smsMajor->addSubsession(PassiveServerMediaSubsession::createNew(*videoSinkMajor, rtcpMajor)); rtspServer->addServerMediaSession(smsMajor); play(videoSinkMajor, firstInputFileName); } if((cRtspParamInfo->iRTSPStreamNum > 1) && (cRtspParamInfo->rgCommunicationMode[RTSP_STREAM_MINOR] == ENUM_UNICAST)){ ServerMediaSession* smsMinor = ServerMediaSession::createNew(*env, secondStreamName, secondStreamName, "Session streamed by \"testH264VideoStreamer\""); smsMinor->addSubsession(H264VideoFileServerMediaSubsession::createNew(*env, secondInputFileName, reuseFirstSource)); rtspServer->addServerMediaSession(smsMinor); }else if((cRtspParamInfo->iRTSPStreamNum > 1) && (cRtspParamInfo->rgCommunicationMode[RTSP_STREAM_MINOR] == ENUM_MULTICAST)){ videoSinkMinor = H264VideoRTPSink::createNew(*env, &rtpGroupsockMinor, 96); RTCPInstance* rtcpMinor = RTCPInstance::createNew(*env, &rtcpGroupsockMinor, estimatedSessionBandwidth, CNAME, videoSinkMinor, NULL /* we're a server */, True /* we're a SSM source */); ServerMediaSession *smsMinor = ServerMediaSession::createNew(*env, secondStreamName, secondInputFileName, "Session streamed by \"swH264VideoStreamer\"", True /*SSM*/); smsMinor->addSubsession(PassiveServerMediaSubsession::createNew(*videoSinkMinor, rtcpMinor)); rtspServer->addServerMediaSession(smsMinor); play(videoSinkMinor, secondInputFileName); } rtspServer->setUpConnectionStatus(RTSPStatusPort); rtspServer->setStreamName(0, streamName, strlen(streamName)); //0 for major rtspServer->setStreamName(1, secondStreamName, strlen(secondStreamName)); rtspServer->setAutoControlBitrate(cRtspParamInfo->iAutoControlBitrateEnable); if(rtspServer->setUpTunnelingOverHTTP(80) || rtspServer->setUpTunnelingOverHTTP(8000) || rtspServer->setUpTunnelingOverHTTP(8080)) { *env << "\n(We use port " << rtspServer->httpServerPortNum() << " for optional RTSP-over-HTTP tunneling.)\n"; } else { *env << "\n(RTSP-over-HTTP tunneling is not available.)\n"; } env->taskScheduler().doEventLoop(); // does not return }