AVIFileSink::AVIFileSink(UsageEnvironment& env, MediaSession& inputSession, char const* outputFileName, unsigned bufferSize, unsigned short movieWidth, unsigned short movieHeight, unsigned movieFPS, Boolean packetLossCompensate) : Medium(env), fInputSession(inputSession), fIndexRecordsHead(NULL), fIndexRecordsTail(NULL), fNumIndexRecords(0), fBufferSize(bufferSize), fPacketLossCompensate(packetLossCompensate), fAreCurrentlyBeingPlayed(False), fNumSubsessions(0), fNumBytesWritten(0), fHaveCompletedOutputFile(False), fMovieWidth(movieWidth), fMovieHeight(movieHeight), fMovieFPS(movieFPS) { fOutFid = OpenOutputFile(env, outputFileName); if (fOutFid == NULL) return; // Set up I/O state for each input subsession: MediaSubsessionIterator iter(fInputSession); MediaSubsession* subsession; while ((subsession = iter.next()) != NULL) { // Ignore subsessions without a data source: FramedSource* subsessionSource = subsession->readSource(); if (subsessionSource == NULL) continue; // If "subsession's" SDP description specified screen dimension // or frame rate parameters, then use these. if (subsession->videoWidth() != 0) { fMovieWidth = subsession->videoWidth(); } if (subsession->videoHeight() != 0) { fMovieHeight = subsession->videoHeight(); } if (subsession->videoFPS() != 0) { fMovieFPS = subsession->videoFPS(); } AVISubsessionIOState* ioState = new AVISubsessionIOState(*this, *subsession); subsession->miscPtr = (void*)ioState; // Also set a 'BYE' handler for this subsession's RTCP instance: if (subsession->rtcpInstance() != NULL) { subsession->rtcpInstance()->setByeHandler(onRTCPBye, ioState); } ++fNumSubsessions; } // Begin by writing an AVI header: addFileHeader_AVI(); }
int CAimer39RTSPClient::GetVideoFPS( unsigned int nStreamNum ) { MediaSubsession * subsession = findSubSessionByStreamNum( nStreamNum ); if ( NULL == subsession ) return -1; IS_VIDEO_SUBS_R( subsession, -1 ); return (int)subsession->videoFPS(); }