예제 #1
0
void FileSink::afterGettingFrame(unsigned frameSize,
				 unsigned numTruncatedBytes,
				 struct timeval presentationTime) {
  if (numTruncatedBytes > 0) {
    envir() << "FileSink::afterGettingFrame(): The input frame data was too large for our buffer size ("
	    << fBufferSize << ").  "
            << numTruncatedBytes << " bytes of trailing data was dropped!  Correct this by increasing the \"bufferSize\" parameter in the \"createNew()\" call to at least "
            << fBufferSize + numTruncatedBytes << "\n";
  }
  addData(fBuffer, frameSize, presentationTime);

  if (fOutFid == NULL || fflush(fOutFid) == EOF) {
    // The output file has closed.  Handle this the same way as if the input source had closed:
    if (fSource != NULL) fSource->stopGettingFrames();
    onSourceClosure();
    return;
  }

  if (fPerFrameFileNameBuffer != NULL) {
    if (fOutFid != NULL) { fclose(fOutFid); fOutFid = NULL; }
  }

  // Then try getting the next frame:
  continuePlaying();
}
void DummySink::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes,
                                  struct timeval presentationTime, unsigned /*durationInMicroseconds*/) {
    
    // We've just received a frame of data.  (Optionally) print out information about it:
#ifdef DEBUG_PRINT_EACH_RECEIVED_FRAME
    if (fStreamId != NULL) envir() << "Stream \"" << fStreamId << "\"; ";
    envir() << fSubsession.mediumName() << "/" << fSubsession.codecName() << ":\tReceived " << frameSize << " bytes";
    if (numTruncatedBytes > 0) envir() << " (with " << numTruncatedBytes << " bytes truncated)";
    char uSecsStr[6+1]; // used to output the 'microseconds' part of the presentation time
    sprintf(uSecsStr, "%06u", (unsigned)presentationTime.tv_usec);
    envir() << ".\tPresentation time: " << (int)presentationTime.tv_sec << "." << uSecsStr;
    if (fSubsession.rtpSource() != NULL && !fSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) {
        envir() << "!"; // mark the debugging output to indicate that this presentation time is not RTCP-synchronized
    }
    envir() << "\n";
#endif

#ifdef DEBUG_PRINT_NPT
    envir() << "\tNPT: " << fSubsession.getNormalPlayTime(presentationTime)<< "\n";
#endif

    //envir()<<"*";

    // Then continue, to request the next frame of data:
    continuePlaying();
}
예제 #3
0
	void afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes,
	struct timeval presentationTime, unsigned durationInMicroseconds)
	{
		if (fStreamId != NULL) envir() << "Stream \"" << fStreamId << "\"; ";
		envir() << fSubsession.mediumName() << "/" << fSubsession.codecName() << ":\tReceived " << frameSize << " bytes";

		QByteArray frameBuffer((char*)fReceiveBuffer, frameSize);
		
		
		//插入SPS PPS才能让H264解码器正确解码
		QByteArray sps = sprop_parameter_sets;
		QByteArray extraData;
		QList<QByteArray> recodList = sps.split(',');

		for (int i = 0; i < recodList.size(); ++i)
		{
			extraData.append(char(0x00));
			extraData.append(char(0x00));
			extraData.append(char(0x00));
			extraData.append(char(0x01));

			extraData += QByteArray::fromBase64(recodList.at(i));
		}
		QByteArray endMark = QByteArray(4, 0);
		endMark[3] = 0x01;


		frameBuffer.insert(0, extraData);
		frameBuffer.insert(extraData.size(), endMark);

		m_ffmpeg->decodeFrame((uint8_t*)frameBuffer.data(), frameBuffer.size(), presentationTime.tv_sec, presentationTime.tv_usec);

		// Then continue, to request the next frame of data:
		continuePlaying();
	}
예제 #4
0
void CMemoryStreamSink::afterGettingFrame1(unsigned frameSize,struct timeval presentationTime) 
{
	CAutoLock BufferLock(&m_BufferLock);

	OnRawData(fBuffer, frameSize);
	//addData(fBuffer, frameSize, presentationTime);
	// Then try getting the next frame:
	continuePlaying();
}
void MFSD_DummySink::afterGettingFrame1() {
  if (fReturnFirstSeenCode && fOurDemux.lastSeenSCR().isValid) {
    // We were asked to return the first SCR that we saw, and we've seen one,
    // so we're done.  (Handle this as if the input source had closed.)
    onSourceClosure(this);
    return;
  }

  continuePlaying();
}
예제 #6
0
void DefaultSink::onAfterGettingFrame(unsigned frame_size,
                                      unsigned truncated_bytes,
                                      struct timeval const & presentation_time,
                                      unsigned UNUSED_PARAM(duration_in_microseconds))
{
    crLogIfD(_verbose, getFrameInfo(frame_size, truncated_bytes, presentation_time));

    if (!_have_written_first_frame) {
        // If we have NAL units encoded in "sprop parameter strings",
        // prepend these to the file:

        for (auto & param : _sprop_parameter_sets) {
            unsigned int sprop_records_size = 0;

            // Returns the binary value of each 'parameter set' specified in a "sprop-parameter-sets" string
            // (in the SDP description for a H.264/RTP stream).
            //
            // The value is returned as an array (length "numSPropRecords") of "SPropRecord"s.
            // This array is dynamically allocated by this routine, and must be delete[]d by the caller.
            SPropRecord * sprop_records = parseSPropParameterSets(param.data(), sprop_records_size);
            for (unsigned int i = 0; i < sprop_records_size; ++i) {
                write(NAL_START_CODE, sizeof(NAL_START_CODE), presentation_time);
                write(sprop_records[i].sPropBytes, sprop_records[i].sPropLength, presentation_time);
            }
            delete [] sprop_records;
        }
        _have_written_first_frame = true;
    }

    if (truncated_bytes > 0) {
        auto const BUFFER_SIZE = _receive_buffer.size();
        crLogW("DefaultSink::onAfterGettingFrame() The input frame data was too large for our buffer size ({})"
               "{}bytes of trailing data was dropped!"
               "Correct this by increasing the 'bufferSize' parameter in the 'createNew()' call to at least {}",
               BUFFER_SIZE, truncated_bytes, BUFFER_SIZE + truncated_bytes);
    }

    // Write the input data to the file, with the start code in front:
    write(NAL_START_CODE, sizeof(NAL_START_CODE), presentation_time);
    write(_receive_buffer.data(), frame_size, presentation_time);

    if (isClosed()) {
        // The output file has closed.
        // Handle this the same way as if the input source had closed:
        if (fSource != nullptr) {
            fSource->stopGettingFrames();
        }
        onSourceClosure();
        return;
    }

    // Then continue, to request the next frame of data:
    continuePlaying();
}
예제 #7
0
void QueueSink::afterGettingFrame(unsigned frameSize, struct timeval presentationTime)
{
    if (frame != NULL){
        frame->setLength(frameSize);
        frame->newOriginTime();
        frame->setPresentationTime(std::chrono::system_clock::now());
        frame->setSequenceNumber(++seqNum);
        fWriter->addFrame();
    }

    continuePlaying();
}
예제 #8
0
Boolean AVIFileSink::startPlaying(afterPlayingFunc* afterFunc,
				  void* afterClientData) {
  // Make sure we're not already being played:
  if (fAreCurrentlyBeingPlayed) {
    envir().setResultMsg("This sink has already been played");
    return False;
  }

  fAreCurrentlyBeingPlayed = True;
  fAfterFunc = afterFunc;
  fAfterClientData = afterClientData;

  return continuePlaying();
}
예제 #9
0
void CH264Frame::afterGettingFrame(unsigned frameSize,
								   unsigned numTruncatedBytes,
								   timeval presentationTime) 
{
	static int nFrameTotailNum = 100;
	
	// 判断是否缓冲区太小
	if (numTruncatedBytes > 0) 
	{
		envir() << "H264Frame::afterGettingFrame(): The input frame data was too large for our buffer size ("
			<< nBufferSize << ").  "
			<< numTruncatedBytes << " bytes of trailing data was dropped!  Correct this by increasing the \"bufferSize\" parameter in the \"createNew()\" call to at least "
			<< nBufferSize + numTruncatedBytes << "\n";
	}

	// 处理帧数据,然后调用回调函数,最后清空缓冲区
	bool bIFrame = false;
	nUsedBuffSize += frameSize;

	if (((pWriteBuffer[0] & I_FRAME_HEAD_NO) == I_FRAME_HEAD_FIRST) 
		|| ((pWriteBuffer[0] & I_FRAME_HEAD_NO) == I_FRAME_HEAD_SECOND))
	{
		pWriteBuffer += frameSize;		// 直到65(即I帧)之后才写入文件
	}
	else
	{
		if ((pWriteBuffer[0] & I_FRAME_HEAD_NO) == I_FRAME_HEAD_THIRD)
		{
			bIFrame = true;		// I帧
		}
		++nFrameNum;

		if (NULL != m_pFunCallBack)
		{
			m_pFunCallBack(fBuffer, nUsedBuffSize, bIFrame, nFrameNum, presentationTime, false, pFunCallBackData);
		}
		Clean();
	}

	// 测试主动暂停
// 	if (--nFrameTotailNum < 1)
// 	{
// 		this->stopPlaying();
// 	}

	continuePlaying();
}
예제 #10
0
void ProxyMediaSink::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes,
                                       struct timeval presentationTime,
                                       unsigned durationInMicroseconds)
{
    if (numTruncatedBytes == 0)
    {
        bool isRtcpSynced =
            _subsession.rtpSource() && _subsession.rtpSource()->hasBeenSynchronizedUsingRTCP();
        _mediaPacketQueue.push(
            MediaPacketSample(_receiveBuffer, frameSize, presentationTime, isRtcpSynced));
    }
    else
    {
    }

    continuePlaying();
}
예제 #11
0
void ssc::ConsoleDataSink::afterGettingFrame(unsigned int frameSize, unsigned int numTruncatedBytes,
    timeval presentationTime, unsigned int durationInMicroseconds)
{
    envir() << mSubSession.mediumName() << "/" << mSubSession.codecName() << ":\tReceived " << frameSize << " bytes";
    if (numTruncatedBytes > 0)
        envir() << " (with " << numTruncatedBytes << " bytes truncated)";

    char uSecsStr[6 + 1]; // used to output the 'microseconds' part of the presentation time
    std::snprintf(uSecsStr, sizeof(uSecsStr), "%06u", (unsigned)presentationTime.tv_usec);

    envir() << ".\tPresentation time: " << (int)presentationTime.tv_sec << "." << uSecsStr << "\n";
    if (mSubSession.rtpSource() && !mSubSession.rtpSource()->hasBeenSynchronizedUsingRTCP()) {
        envir() << "!"; // mark the debugging output to indicate that this presentation time is not RTCP-synchronized
    }

    // Then continue, to request the next frame of data
    continuePlaying();
}
void CH264StreamDecodeSink::afterGettingFrame1(unsigned frameSize, timeval presentationTime)
{
//	if ( !log_file )
//	{
//		log_file = fopen(".\\lzxlog.txt", "w");
//	}
//	if ( log_file )
//	{
////		fprintf( log_file, "afterGettingFrame1\n" );
//	}
	// 处理数据。
	m_nDataLen = frameSize + ( m_pData - m_pBuffer );

	m_timeStamp = presentationTime;

	DecodeData();

	continuePlaying();
}
예제 #13
0
Boolean MediaSink::startPlaying(MediaSource& source,
				afterPlayingFunc* afterFunc,
				void* afterClientData) {
  // Make sure we're not already being played:
  if (fSource != NULL) {
    envir().setResultMsg("This sink is already being played");
    return False;
  }

  // Make sure our source is compatible:
  if (!sourceIsCompatibleWithUs(source)) {
    envir().setResultMsg("MediaSink::startPlaying(): source is not compatible!");
    return False;
  }
  fSource = (FramedSource*)&source;

  fAfterFunc = afterFunc;
  fAfterClientData = afterClientData;
  return continuePlaying();
}
예제 #14
0
void HTTPSink::afterGettingFrame1(unsigned frameSize,
				 struct timeval /*presentationTime*/) {
  // Write the data back to our client socket (if we have one):
  if (fClientSocket >= 0 && isUseableFrame(fBuffer, frameSize)) {
    int sendResult
      = send(fClientSocket, (char*)(&fBuffer[0]), frameSize, 0);
    if (sendResult < 0) {
      int err = envir().getErrno();
      if (err != EWOULDBLOCK) {
	// The client appears to have gone; close him down,
	// and consider ourselves done:
	ourOnSourceClosure(this);
	return;
      }
    }
  }

  // Then try getting the next frame:
  continuePlaying();
}
예제 #15
0
void FileSink::afterGettingFrame1(unsigned frameSize,
				  struct timeval presentationTime) {
  addData(fBuffer, frameSize, presentationTime);

  if (fOutFid == NULL || fflush(fOutFid) == EOF) {
    // The output file has closed.  Handle this the same way as if the
    // input source had closed:
    onSourceClosure(this);

    stopPlaying();
    return;
  }
 
  if (fPerFrameFileNameBuffer != NULL) {
    if (fOutFid != NULL) { fclose(fOutFid); fOutFid = NULL; }
  }

  // Then try getting the next frame:
  continuePlaying();
}
예제 #16
0
// If you don't want to see debugging output for each received frame, then comment out the following line:
//#define DEBUG_PRINT_EACH_RECEIVED_FRAME 1
void DummySink::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes,
				  struct timeval presentationTime, unsigned /*durationInMicroseconds*/) 
{
	// We've just received a frame of data.  (Optionally) print out information about it:
#ifdef DEBUG_PRINT_EACH_RECEIVED_FRAME
	if (fStreamId != NULL) envir() << "Stream \"" << fStreamId << "\"; ";
	envir() << fSubsession.mediumName() << "/" << fSubsession.codecName() << ":\tReceived " << frameSize << " bytes";
	if (numTruncatedBytes > 0) envir() << " (with " << numTruncatedBytes << " bytes truncated)";
	char uSecsStr[6+1]; // used to output the 'microseconds' part of the presentation time
	sprintf(uSecsStr, "%06u", (unsigned)presentationTime.tv_usec);
	envir() << ".\tPresentation time: " << (int)presentationTime.tv_sec << "." << uSecsStr;
	if (fSubsession.rtpSource() != NULL && !fSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) {
		envir() << "!"; // mark the debugging output to indicate that this presentation time is not RTCP-synchronized
	}
#ifdef DEBUG_PRINT_NPT
	envir() << "\tNPT: " << fSubsession.getNormalPlayTime(presentationTime);
#endif
	envir() << "\n";
#endif

	calcNPTandInform();
	
	fAimerClient->m_nRecvBytes += frameSize;
	fAimerClient->m_nRecvBytesSec += frameSize;

	if ( strcmp( fSubsession.mediumName(), "VIDEO" ) == 0 ||
		 strcmp( fSubsession.mediumName(), "video" ) == 0 ) {
#if 0
		LARGE_INTEGER stfreq;
		QueryPerformanceFrequency(&stfreq);
		static double m_dfFreq = (double)stfreq.QuadPart;
		
		LARGE_INTEGER startTime;
		static LONGLONG m_nStartTime = 0;
		static LONGLONG nNowTime = 0;
		
		QueryPerformanceCounter(&startTime);
		m_nStartTime = (LONGLONG)startTime.QuadPart;
		
		double dfMinus = (double)(m_nStartTime - nNowTime);
		LONGLONG nMs = (uint64_t)((dfMinus / m_dfFreq) * 1000);

		fprintf(stderr, "diif = %lld\n", nMs);

		nNowTime = m_nStartTime;
#endif
		fAimerClient->m_nRecvVideoBytes += frameSize;
		vector<IDataSink *>::iterator iter = fAimerClient->m_aVecDataSink[STREAM_VIDEO].begin();
		for ( ; iter != fAimerClient->m_aVecDataSink[STREAM_VIDEO].end(); ++iter ) {
			(*iter)->SendData( fReceiveBuffer, frameSize, presentationTime, 
							   fSubsession.mediumName(), fSubsession.codecName() );
		}

		//fprintf(stderr, "Video time %ds, %dms\n", presentationTime.tv_sec, (presentationTime.tv_usec / 1000));
	}

	if ( strcmp( fSubsession.mediumName(), "AUDIO" ) == 0 ||
		 strcmp( fSubsession.mediumName(), "audio" ) == 0 ) {
		vector<IDataSink *>::iterator iter = fAimerClient->m_aVecDataSink[STREAM_AUDIO].begin();
		for ( ; iter != fAimerClient->m_aVecDataSink[STREAM_AUDIO].end(); ++iter ) {
			(*iter)->SendData( fReceiveBuffer, frameSize, presentationTime, 
							fSubsession.mediumName(), fSubsession.codecName(), fSubsession.fmtp_config() );
		}

		//fprintf(stderr, "Audio time %ds, %dms\n", presentationTime.tv_sec, (presentationTime.tv_usec / 1000));
	}
	// Then continue, to request the next frame of data:
	continuePlaying();
}
예제 #17
0
void StreamMediaSink::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime, unsigned /*durationInMicroseconds*/)
{
	// We've just received a frame of data.  (Optionally) print out information about it:
#ifdef DEBUG_PRINT_EACH_RECEIVED_FRAME
	if (m_fStreamId != NULL) {
		envir() << "Stream \"" << m_fStreamId << "\"; ";
	}

	envir() << m_fSubsession.mediumName() << "/" << m_fSubsession.codecName() << ":\tReceived " << frameSize << " bytes";

	if (numTruncatedBytes > 0) {
		envir() << " (with " << numTruncatedBytes << " bytes truncated)";
	}

	char uSecsStr[6 + 1]; // used to output the 'microseconds' part of the presentation time
	sprintf(uSecsStr, "%06u", (unsigned) presentationTime.tv_usec);
	envir() << ".\tPresentation time: " << (int) presentationTime.tv_sec << "." << uSecsStr;

	if (m_fSubsession.rtpSource() != NULL && !m_fSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) {
		envir() << "!"; // mark the debugging output to indicate that this presentation time is not RTCP-synchronized
	}

#ifdef DEBUG_PRINT_NPT
	envir() << "\tNPT: " << m_fSubsession.getNormalPlayTime(presentationTime);
#endif
	envir() << "\n";
#endif

	m_avPacket.size = frameSize + 4;
	m_avPacket.data = m_buffer;
	int gotFrame = 0;
	int len = 0;

	while (m_avPacket.size > 0) {
		len = avcodec_decode_video2(m_avCodecContext, m_avFrame, &gotFrame, &m_avPacket);
		if (len < 0) {
			break;
		}
		if (gotFrame) {
			envir() << "Decoded Frame: " << ++m_idx << " Picture Type: " << av_get_picture_type_char(m_avFrame->pict_type) << " Key Frame: " << m_avFrame->key_frame << "\n";
			envir() << "showFrame: " << showFrame() << "\n";

			SDL_PollEvent(&m_event);
			switch (m_event.type) {
				case SDL_QUIT:
					SDL_Quit();
					exit(0);
					break;
				default:
					break;
			}
#if defined(WRITE_RAW)
			if (m_avFrame->key_frame) {
				writeRaw(m_idx);
			}
#endif
#if defined(WRITE_JPEG)
			//if (m_avFrame->pict_type == AV_PICTURE_TYPE_I) {
			writeJPEG(m_idx);
			//}
#endif
		}
		if (m_avPacket.data) {
			m_avPacket.size -= len;
			m_avPacket.data += len;
		}
	}

	// Then continue, to request the next frame of data:
	continuePlaying();
}
예제 #18
0
void DummySink::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes,
				  struct timeval presentationTime, unsigned /*durationInMicroseconds*/) {
  // We've just received a frame of data.  (Optionally) print out information about it:
	if(numTruncatedBytes > 0)
  {
  	printf("============== warnning, live555 truncate %d bytes =================\n", numTruncatedBytes);
  }
#ifdef DEBUG_PRINT_EACH_RECEIVED_FRAME
  if (fStreamId != NULL) envir() << "Stream \"" << fStreamId << "\"; ";
  envir() << fSubsession.mediumName() << "/" << fSubsession.codecName() << ":\tReceived " << frameSize << " bytes";
  if (numTruncatedBytes > 0) envir() << " (with " << numTruncatedBytes << " bytes truncated)";
  char uSecsStr[6+1]; // used to output the 'microseconds' part of the presentation time
  sprintf(uSecsStr, "%06u", (unsigned)presentationTime.tv_usec);
  envir() << ".\tPresentation time: " << (unsigned)presentationTime.tv_sec << "." << uSecsStr;
  if (fSubsession.rtpSource() != NULL && !fSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) {
    envir() << "!"; // mark the debugging output to indicate that this presentation time is not RTCP-synchronized
  }
  envir() << "\n";
#endif

  // 分辨率已经发生变化,不再上报数据
	if (_drop)
	{
		printf("############################### drop frame\n");
		return;
	}


  if(_cb)
  {
    RtspFrameInfo	info;
	info.videoFPS = fSubsession.videoFPS();
	info.videoWidth = fSubsession.videoWidth();
	info.videoHeight = fSubsession.videoHeight();
	info.frequency = fSubsession.rtpTimestampFrequency();
	info.channels = fSubsession.numChannels();
	info.profile_level_id = fSubsession.fmtp_profile_level_id();
	
	strncpy((char*)&(info.mediaName), fSubsession.mediumName(), sizeof(info.mediaName));
	strncpy((char*)&(info.codecName), fSubsession.codecName(), sizeof(info.codecName));
	info.timestamp	= presentationTime;
  	if(fSubsession.rtpSource() != NULL && !fSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) 
	{
		info.syncUseRTCP	= false;
	}
	else
	{
		info.syncUseRTCP	= true;
	}
 
	if(strcmp(fSubsession.mediumName(), "audio") == 0)
	{
		if (strcmp(fSubsession.codecName(), "MPEG4-GENERIC") == 0)
		{
			info.isHeader	= 0;
 			_cb(_channel, frameSize, (char*)fReceiveBuffer, info);
		}
		else if (strcmp(fSubsession.codecName(), "L16") == 0)
		{
			int i = fSubsession.numChannels();
			info.isHeader	= 0;
 			_cb(_channel, frameSize, (char*)fReceiveBuffer, info);
		}
	}
	else if(strcmp(fSubsession.mediumName(), "video") == 0)
	{
		if(strcmp(fSubsession.codecName(), "H264") == 0)
		{
			unsigned char start_code[4] = {0x00, 0x00, 0x00, 0x01};

			if(!_sentHeader)
			{
				_sentHeader	= true;
	
				unsigned numSpropRecords;
				if(fSubsession.fmtp_spropparametersets() && 0 < strlen(fSubsession.fmtp_spropparametersets()))
				{
					SPropRecord* sPropRecords = parseSPropParameterSets(fSubsession.fmtp_spropparametersets(), numSpropRecords);
					printf("====================== proparamset: [%d]%s =================\n", numSpropRecords, fSubsession.fmtp_spropparametersets());
					if(numSpropRecords > 0)
					{
						int 	headerLen		= 0;
						int 	validRecordNum	= 0;
						for(unsigned int i = 0; i < numSpropRecords; i++)
						{
							printf("spropparameter first byte = %x\n", sPropRecords[i].sPropBytes[0]);
							if(((sPropRecords[i].sPropBytes[0] & 0x1f) == 7) || ((sPropRecords[i].sPropBytes[0] & 0x1f) == 8))
							{
								headerLen += sPropRecords[i].sPropLength;
								validRecordNum	+= 1;
							}
						}

						headerLen		+= sizeof(start_code) * validRecordNum;
						char*	headerData	= new char[headerLen];
						int		offset		= 0;
						for(unsigned int i = 0; i < numSpropRecords; i++)
						{
							if(((sPropRecords[i].sPropBytes[0] & 0x1f) == 7) || ((sPropRecords[i].sPropBytes[0] & 0x1f) == 8))
							{
								memcpy(headerData + offset, start_code, 4);
								offset				+= 4;
								memcpy(headerData + offset, sPropRecords[i].sPropBytes, sPropRecords[i].sPropLength);
								offset				+= sPropRecords[i].sPropLength;
							}
						}

						uint16_t w = 0;
						uint16_t h = 0;
						if (H264Parse::GetResolution((uint8_t*)headerData, headerLen, &w, &h))
						{
							_w = w;
							_h = h;
						}

						info.isHeader	= 1;
						_cb(_channel, headerLen, headerData, info);

						delete [] headerData;
					}
				}
			}
			else
			{
				if ((fReceiveBuffer[0] & 0x1f) == 7)
				{
					uint16_t w = 0;
					uint16_t h = 0;
					if (H264Parse::GetResolution((uint8_t*)fReceiveBuffer, frameSize, &w, &h))
					{
						if (_w == 0 || _h == 0)
						{
							_w = w;
							_h = h;
						}
						else if ((_w != w) || (_h != h))
						{
							printf("=====33333333========= %dx%d,   %dx%d\n", _w, _h, w, h);
							_drop = true;
						}
					}
				}
			}

			if (!_drop)
			{
				info.isHeader	= 0;

				char* newData	= new char[sizeof(start_code) + frameSize];
				memcpy(newData, start_code, sizeof(start_code));
				memcpy(newData + sizeof(start_code), (char*)fReceiveBuffer, frameSize);
 				_cb(_channel, frameSize + sizeof(start_code), newData, info);
			
				delete [] newData;
			}
		}
		else if(strcmp(fSubsession.codecName(), "MP4V-ES") == 0)
		{
#ifdef SEND_CONFIG_HEADER
			unsigned configLen;
			unsigned char* configData = parseGeneralConfigStr(fSubsession.fmtp_config(), configLen);
				
			info.isHeader	= 1;
			_cb(_channel, configLen, (char*)configData, info);
#endif
			
			info.isHeader	= 0;
 			_cb(_channel, frameSize, (char*)fReceiveBuffer, info);
		}
		else
		{
			info.isHeader	= 0;
 			_cb(_channel, frameSize, (char*)fReceiveBuffer, info);
		}
	}
  }
  
  // Then continue, to request the next frame of data:
  continuePlaying();
}
예제 #19
0
void DummySink::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes,
				  struct timeval presentationTime, unsigned /*durationInMicroseconds*/) {
  // We've just received a frame of data.  (Optionally) print out information about it:
#ifdef DEBUG_PRINT_EACH_RECEIVED_FRAME
  if (fStreamId != NULL) envir() << "Stream \"" << fStreamId << "\"; ";
  envir() << fSubsession.mediumName() << "/" << fSubsession.codecName() << ":\tReceived " << frameSize << " bytes";
  if (numTruncatedBytes > 0) envir() << " (with " << numTruncatedBytes << " bytes truncated)";
  char uSecsStr[6+1]; // used to output the 'microseconds' part of the presentation time
  sprintf(uSecsStr, "%06u", (unsigned)presentationTime.tv_usec);
  envir() << ".\tPresentation time: " << (int)presentationTime.tv_sec << "." << uSecsStr;
  if (fSubsession.rtpSource() != NULL && !fSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) {
    envir() << "!"; // mark the debugging output to indicate that this presentation time is not RTCP-synchronized
  }
#ifdef DEBUG_PRINT_NPT
  envir() << "\tNPT: " << fSubsession.getNormalPlayTime(presentationTime);
#endif
  envir() << "\n";
#endif
	if (strcmp(fSubsession.codecName(),"H264") == 0) {
	avpkt.data = fReceiveBufferAV;
//	r2sprop();
//	r2sprop2();
//	avpkt.size = (int)fReceiveBuffer[0];
	avpkt.size = frameSize + 4;
//	avpkt.size = frameSize;
	if (avpkt.size != 0) {
		memcpy (fReceiveBufferAV + 4, fReceiveBuffer, frameSize);	
		avpkt.data = fReceiveBufferAV; //+2;
//		avpkt.data = fReceiveBuffer; //+2;
		len = avcodec_decode_video2 (c, picture, &got_picture, &avpkt);
		if (len < 0) {
			envir() << "Error while decoding frame" << frame;
//			exit(6);
		}
		if (got_picture) {
			// do something with it
			SDL_LockYUVOverlay(bmp);
			
			AVPicture pict;
			pict.data[0] = bmp->pixels[0];
			pict.data[1] = bmp->pixels[2];
			pict.data[2] = bmp->pixels[1];

			pict.linesize[0] = bmp->pitches[0];
			pict.linesize[1] = bmp->pitches[2];
			pict.linesize[2] = bmp->pitches[1];

			struct SwsContext *sws;
			sws = sws_getContext(
				c->width,
				c->height,
				PIX_FMT_YUV420P,
				c->width,
				c->height,
				PIX_FMT_YUV420P,
				SWS_BICUBIC,
				NULL,
				NULL,
				NULL
			);
			sws_scale(
				sws,
				picture->data,
				picture->linesize,
				0,
				c->height,
				pict.data,
				pict.linesize
			);
				
				

			SDL_UnlockYUVOverlay(bmp);

			rect.x = 0;
			rect.y = 0;
			rect.w = c->width;
			rect.h = c->height;
			SDL_DisplayYUVOverlay(bmp, &rect);


/*
			char fname[256]={0};
			sprintf(fname, "OriginalYUV%d.pgm",frame);
			pgm_save (
				picture->data[0],
				picture->linesize[0],
				c->width,
				c->height,
				fname
			);
*/
			frame ++;
		} else {
			envir() << "no picture :( !\n";
		}
	}

	}

  // Then continue, to request the next frame of data:
  continuePlaying();
}