Esempio n. 1
0
H264VideoRTPSink* H264VideoRTPSink
::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat,
            char const* sPropParameterSetsStr) {
  u_int8_t* sps = NULL; unsigned spsSize = 0;
  u_int8_t* pps = NULL; unsigned ppsSize = 0;

  unsigned numSPropRecords;
  SPropRecord* sPropRecords = parseSPropParameterSets(sPropParameterSetsStr, numSPropRecords);
  for (unsigned i = 0; i < numSPropRecords; ++i) {
    if (sPropRecords[i].sPropLength == 0) continue; // bad data
    u_int8_t nal_unit_type = (sPropRecords[i].sPropBytes[0])&0x1F;
    if (nal_unit_type == 7/*SPS*/) {
      sps = sPropRecords[i].sPropBytes;
      spsSize = sPropRecords[i].sPropLength;
    } else if (nal_unit_type == 8/*PPS*/) {
      pps = sPropRecords[i].sPropBytes;
      ppsSize = sPropRecords[i].sPropLength;
    }
  }

  H264VideoRTPSink* result
    = new H264VideoRTPSink(env, RTPgs, rtpPayloadFormat, sps, spsSize, pps, ppsSize);
  delete[] sPropRecords;

  return result;
}
Esempio n. 2
0
H265VideoRTPSink* H265VideoRTPSink
::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat,
	    char const* sPropVPSStr, char const* sPropSPSStr, char const* sPropPPSStr) {
  u_int8_t* vps = NULL; unsigned vpsSize = 0;
  u_int8_t* sps = NULL; unsigned spsSize = 0;
  u_int8_t* pps = NULL; unsigned ppsSize = 0;

  // Parse each 'sProp' string, extracting and then classifying the NAL unit(s) from each one.
  // We're 'liberal in what we accept'; it's OK if the strings don't contain the NAL unit type
  // implied by their names (or if one or more of the strings encode multiple NAL units).
  SPropRecord* sPropRecords[3];
  unsigned numSPropRecords[3];
  sPropRecords[0] = parseSPropParameterSets(sPropVPSStr, numSPropRecords[0]);
  sPropRecords[1] = parseSPropParameterSets(sPropSPSStr, numSPropRecords[1]);
  sPropRecords[2] = parseSPropParameterSets(sPropPPSStr, numSPropRecords[2]);

  for (unsigned j = 0; j < 3; ++j) {
    SPropRecord* records = sPropRecords[j];
    unsigned numRecords = numSPropRecords[j];

    for (unsigned i = 0; i < numRecords; ++i) {
      if (records[i].sPropLength == 0) continue; // bad data
      u_int8_t nal_unit_type = ((records[i].sPropBytes[0])&0x7E)>>1;
      if (nal_unit_type == 32/*VPS*/) {
	vps = records[i].sPropBytes;
	vpsSize = records[i].sPropLength;
      } else if (nal_unit_type == 33/*SPS*/) {
	sps = records[i].sPropBytes;
	spsSize = records[i].sPropLength;
      } else if (nal_unit_type == 34/*PPS*/) {
	pps = records[i].sPropBytes;
	ppsSize = records[i].sPropLength;
      }
    }
  }

  H265VideoRTPSink* result = new H265VideoRTPSink(env, RTPgs, rtpPayloadFormat,
						  vps, vpsSize, sps, spsSize, pps, ppsSize);
  delete[] sPropRecords[0]; delete[] sPropRecords[1]; delete[] sPropRecords[2];

  return result;
}
Esempio n. 3
0
void DefaultSink::onAfterGettingFrame(unsigned frame_size,
                                      unsigned truncated_bytes,
                                      struct timeval const & presentation_time,
                                      unsigned UNUSED_PARAM(duration_in_microseconds))
{
    crLogIfD(_verbose, getFrameInfo(frame_size, truncated_bytes, presentation_time));

    if (!_have_written_first_frame) {
        // If we have NAL units encoded in "sprop parameter strings",
        // prepend these to the file:

        for (auto & param : _sprop_parameter_sets) {
            unsigned int sprop_records_size = 0;

            // Returns the binary value of each 'parameter set' specified in a "sprop-parameter-sets" string
            // (in the SDP description for a H.264/RTP stream).
            //
            // The value is returned as an array (length "numSPropRecords") of "SPropRecord"s.
            // This array is dynamically allocated by this routine, and must be delete[]d by the caller.
            SPropRecord * sprop_records = parseSPropParameterSets(param.data(), sprop_records_size);
            for (unsigned int i = 0; i < sprop_records_size; ++i) {
                write(NAL_START_CODE, sizeof(NAL_START_CODE), presentation_time);
                write(sprop_records[i].sPropBytes, sprop_records[i].sPropLength, presentation_time);
            }
            delete [] sprop_records;
        }
        _have_written_first_frame = true;
    }

    if (truncated_bytes > 0) {
        auto const BUFFER_SIZE = _receive_buffer.size();
        crLogW("DefaultSink::onAfterGettingFrame() The input frame data was too large for our buffer size ({})"
               "{}bytes of trailing data was dropped!"
               "Correct this by increasing the 'bufferSize' parameter in the 'createNew()' call to at least {}",
               BUFFER_SIZE, truncated_bytes, BUFFER_SIZE + truncated_bytes);
    }

    // Write the input data to the file, with the start code in front:
    write(NAL_START_CODE, sizeof(NAL_START_CODE), presentation_time);
    write(_receive_buffer.data(), frame_size, presentation_time);

    if (isClosed()) {
        // The output file has closed.
        // Handle this the same way as if the input source had closed:
        if (fSource != nullptr) {
            fSource->stopGettingFrames();
        }
        onSourceClosure();
        return;
    }

    // Then continue, to request the next frame of data:
    continuePlaying();
}
void continueAfterSETUP(RTSPClient* rtspClient, int resultCode, char* resultString) {
  do {
    UsageEnvironment& env = rtspClient->envir(); // alias
    StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias

    if (resultCode != 0) {
      env << *rtspClient << "Failed to set up the \"" << *scs.subsession << "\" subsession: " << env.getResultMsg() << "\n";
      break;
    }

    env << *rtspClient << "Set up the \"" << *scs.subsession
	<< "\" subsession (client ports " << scs.subsession->clientPortNum() << "-" << scs.subsession->clientPortNum()+1 << ")\n";

    // Having successfully setup the subsession, create a data sink for it, and call "startPlaying()" on it.
    // (This will prepare the data sink to receive data; the actual flow of data from the client won't start happening until later,
    // after we've sent a RTSP "PLAY" command.)

    scs.subsession->sink = DummySink::createNew(env, *scs.subsession, rtspClient->url());	

      // perhaps use your own custom "MediaSink" subclass instead
    if (scs.subsession->sink == NULL) {
      env << *rtspClient << "Failed to create a data sink for the \"" << *scs.subsession
	  << "\" subsession: " << env.getResultMsg() << "\n";
      break;
    }

	const char* spandPp= scs.subsession->fmtp_spropparametersets();
	unsigned int  numOfRecords = -1;
	SPropRecord* rec =  parseSPropParameterSets(spandPp, numOfRecords);

	for (unsigned i = 0; i < numOfRecords; ++i)
	{
			unsigned nalUnitSize = rec[i].sPropLength;
			unsigned char* nalUnitBytes = rec[i].sPropBytes;  // this is a byte array, of size "nalUnitSize".
 // Then do whatever you like with this NAL unit data
	}

    env << *rtspClient << "Created a data sink for the \"" << *scs.subsession << "\" subsession\n";
    scs.subsession->miscPtr = rtspClient; // a hack to let subsession handle functions get the "RTSPClient" from the subsession 
    scs.subsession->sink->startPlaying(*(scs.subsession->readSource()),subsessionAfterPlaying, scs.subsession);
	
	
    // Also set a handler to be called if a RTCP "BYE" arrives for this subsession:
    if (scs.subsession->rtcpInstance() != NULL) {
      scs.subsession->rtcpInstance()->setByeHandler(subsessionByeHandler, scs.subsession);
    }
  } while (0);

  	

  // Set up the next subsession, if any:
  setupNextSubsession(rtspClient);
}
void H264VideoStreamFramer::setSPSandPPS(char const* sPropParameterSetsStr) {
  unsigned numSPropRecords;
  SPropRecord* sPropRecords = parseSPropParameterSets(sPropParameterSetsStr, numSPropRecords);
  for (unsigned i = 0; i < numSPropRecords; ++i) {
    if (sPropRecords[i].sPropLength == 0) continue; // bad data
    u_int8_t nal_unit_type = (sPropRecords[i].sPropBytes[0])&0x1F;
    if (nal_unit_type == 7/*SPS*/) {
      saveCopyOfSPS(sPropRecords[i].sPropBytes, sPropRecords[i].sPropLength);
    } else if (nal_unit_type == 8/*PPS*/) {
      saveCopyOfPPS(sPropRecords[i].sPropBytes, sPropRecords[i].sPropLength);
    }
  }
  delete[] sPropRecords;
}
Esempio n. 6
0
		bool Init(VirtualSink* sink, AVCodec* avCodec, const char* sprops){
			//if it has been initialized before, we should do cleanup first
			Cleanup();

			avCodecContext = avcodec_alloc_context();
			if (!avCodecContext) {
				//failed to allocate codec context
				Cleanup();
				return false;
			}
			uint8_t startCode[] = {0x00, 0x00, 0x01};
			if(sprops != NULL){
				unsigned spropCount;
				SPropRecord* spropRecords = parseSPropParameterSets(sprops, spropCount);
				try{
					for (unsigned i = 0; i < spropCount; ++i) {
						AddExtraData(startCode, sizeof(startCode));
						AddExtraData(spropRecords[i].sPropBytes, spropRecords[i].sPropLength);
					}
				}catch(void*){
					//extradata exceeds size limit
					delete[] spropRecords;
					Cleanup();
					return false;
				}
				delete[] spropRecords;
					
				avCodecContext->extradata = extraDataBuffer;
				avCodecContext->extradata_size = extraDataSize;
			}
			AddExtraData(startCode, sizeof(startCode));
			avCodecContext->flags = 0;

			if (avcodec_open(avCodecContext, avCodec) < 0) {
				//failed to open codec
				Cleanup();
				return false;
			}
			if (avCodecContext->codec_id == CODEC_ID_H264){
				avCodecContext->flags2 |= CODEC_FLAG2_CHUNKS;
				//avCodecContext->flags2 |= CODEC_FLAG2_SHOW_ALL;
			}
			avFrame = avcodec_alloc_frame();
			if (!avFrame){
				//failed to allocate frame
				Cleanup();
				return false;
			}
			return true;
		}
void H264VideoFileSink::afterGettingFrame1(unsigned frameSize, struct timeval presentationTime) {
    unsigned char const start_code[4] = {0x00, 0x00, 0x00, 0x01};

    if (!fHaveWrittenFirstFrame) {
        // If we have PPS/SPS NAL units encoded in a "sprop parameter string", prepend these to the file:
        unsigned numSPropRecords;
        SPropRecord* sPropRecords = parseSPropParameterSets(fSPropParameterSetsStr, numSPropRecords);
        for (unsigned i = 0; i < numSPropRecords; ++i) {
            addData(start_code, 4, presentationTime);
            addData(sPropRecords[i].sPropBytes, sPropRecords[i].sPropLength, presentationTime);
        }
        delete[] sPropRecords;
        fHaveWrittenFirstFrame = True; // for next time
    }

    // Write the input data to the file, with the start code in front:
    addData(start_code, 4, presentationTime);

    // Call the parent class to complete the normal file write with the input data:
    FileSink::afterGettingFrame1(frameSize, presentationTime);
}
Esempio n. 8
0
void continueAfterSETUP(RTSPClient* rtspClient, int resultCode, char* resultString) {
  do {
    UsageEnvironment& env = rtspClient->envir(); // alias
    StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias

    if (resultCode != 0) {
      env << *rtspClient << "Failed to set up the \"" << *scs.subsession << "\" subsession: " << resultString << "\n";
      break;
    }

    env << *rtspClient << "Set up the \"" << *scs.subsession
	<< "\" subsession (client ports " << scs.subsession->clientPortNum() << "-" << scs.subsession->clientPortNum()+1 << ")\n";

const char *sprop = scs.subsession->fmtp_spropparametersets();
uint8_t const* sps = NULL;
unsigned spsSize = 0;
uint8_t const* pps = NULL;
unsigned ppsSize = 0;

if (sprop != NULL) {
	unsigned numSPropRecords;
	SPropRecord* sPropRecords = parseSPropParameterSets(sprop, numSPropRecords);
	for (unsigned i = 0; i < numSPropRecords; ++i) {
		if (sPropRecords[i].sPropLength == 0) continue; // bad data
		u_int8_t nal_unit_type = (sPropRecords[i].sPropBytes[0])&0x1F;
		if (nal_unit_type == 7/*SPS*/) {
			sps = sPropRecords[i].sPropBytes;
			spsSize = sPropRecords[i].sPropLength;
		} else if (nal_unit_type == 8/*PPS*/) {
			pps = sPropRecords[i].sPropBytes;
			ppsSize = sPropRecords[i].sPropLength;
		}
	}
}

    // Having successfully setup the subsession, create a data sink for it, and call "startPlaying()" on it.
    // (This will prepare the data sink to receive data; the actual flow of data from the client won't start happening until later,
    // after we've sent a RTSP "PLAY" command.)
    scs.subsession->sink = DummySink::createNew(env, *scs.subsession, rtspClient->url());
      // perhaps use your own custom "MediaSink" subclass instead
    if (scs.subsession->sink == NULL) {
      env << *rtspClient << "Failed to create a data sink for the \"" << *scs.subsession
	  << "\" subsession: " << env.getResultMsg() << "\n";
      break;
    }

    env << *rtspClient << "Created a data sink for the \"" << *scs.subsession << "\" subsession\n";
    scs.subsession->miscPtr = rtspClient; // a hack to let subsession handle functions get the "RTSPClient" from the subsession 
if (sps != NULL) {
	((DummySink *)scs.subsession->sink)->setSprop(sps, spsSize);
}
if (pps != NULL) {
	((DummySink *)scs.subsession->sink)->setSprop(pps, ppsSize);
}
    scs.subsession->sink->startPlaying(*(scs.subsession->readSource()),
				       subsessionAfterPlaying, scs.subsession);
    // Also set a handler to be called if a RTCP "BYE" arrives for this subsession:
    if (scs.subsession->rtcpInstance() != NULL) {
      scs.subsession->rtcpInstance()->setByeHandler(subsessionByeHandler, scs.subsession);
    }
  } while (0);
  delete[] resultString;

  // Set up the next subsession, if any:
  setupNextSubsession(rtspClient);
}
Esempio n. 9
0
StreamMediaSink::StreamMediaSink(UsageEnvironment& env, MediaSubsession& subsession, char const* streamId)
: MediaSink(env)
, m_fSubsession(subsession)
, m_idx(0)
, m_avCodec(NULL)
, m_avCodecContext(NULL)
, m_avFrame(NULL)
, m_bmp(NULL)
, m_screen(NULL)
, img_convert_ctx(NULL)
{
	if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_TIMER)) {
		std::stringstream ss;
		ss << "Could not initialize SDL - " << SDL_GetError();
		throw std::runtime_error(ss.str().c_str());
	}
	m_fStreamId = strDup(streamId);
	m_buffer = new u_int8_t[MEDIA_SINK_RECEIVE_BUFFER_SIZE + 4];

	av_init_packet(&m_avPacket);
	//m_avPacket.flags |= AV_PKT_FLAG_KEY;
	//m_avPacket.pts = m_avPacket.dts = 0;

	m_avCodec = avcodec_find_decoder(AV_CODEC_ID_H264);
	if (!m_avCodec) {
		throw std::runtime_error("Failed to find H264 ffmpeg codec");
	}

	m_avCodecContext = avcodec_alloc_context3(m_avCodec);
	if (!m_avCodecContext) {
		throw std::runtime_error("Failed to allocate codec context");
	}
	m_avCodecContext->pix_fmt = PIX_FMT_YUV420P;
	//m_avCodecContext->flags |= CODEC_FLAG2_CHUNKS;
	//m_avCodecContext->thread_count = 4;

	if (m_avCodec->capabilities & CODEC_CAP_TRUNCATED) {
		m_avCodecContext->flags |= CODEC_FLAG_TRUNCATED;
	}

	if (avcodec_open2(m_avCodecContext, m_avCodec, NULL) < 0) {
		throw std::runtime_error("Failed to open codec");
	}

	m_avFrame = av_frame_alloc();
	if (!m_avFrame) {
		throw std::runtime_error("Failed to allocate video frame");
	}

	m_screen = SDL_SetVideoMode(m_fSubsession.videoWidth(), m_fSubsession.videoHeight(), 0, 0);
	if (!m_screen) {
		throw std::runtime_error("SDL: could not set video mode - exiting");
	}

	// Allocate a place to put our YUV image on that screen
	m_bmp = SDL_CreateYUVOverlay(m_screen->w, m_screen->h, SDL_YV12_OVERLAY, m_screen);

	if (img_convert_ctx == NULL) {
		int w = m_screen->w;
		int h = m_screen->h;
		img_convert_ctx = sws_getContext(w, h, m_avCodecContext->pix_fmt, w, h, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
	}

	const u_int8_t start_code[] = {0x00, 0x00, 0x00, 0x01};
	u_int8_t idx = 0;
#if 0
	unsigned int n_records = 0;
	const char* sps = subsession.fmtp_spropparametersets();
	envir() << "SPS: " << sps << "\n";
	SPropRecord* pSPropRecord = parseSPropParameterSets(sps, n_records);

	for (int i = 0; i < n_records; ++i) {
		memcpy(&m_buffer[idx], start_code, 4);
		memcpy(&m_buffer[idx + 4], pSPrpoRecord[i].sPropBytes, pSPropBytes[i].sPropLength);
		idx += 4 + pSPropBytes[i].sPropLength;
		m_avPacket.size += 4 + pSPropBytes[i].sPropLength;
	}

	m_avPacket.data = m_buffer;

	int p = 0;
	int l = avcodec_decode_video2(m_avCodecContext, m_avFrame, &p, &m_avPacket);
#endif
	memcpy(&m_buffer[idx], &start_code, 4);
	idx += 4;
	m_fReceiveBuffer = &m_buffer[idx];
}
Esempio n. 10
0
void DummySink::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes,
				  struct timeval presentationTime, unsigned /*durationInMicroseconds*/) {
  // We've just received a frame of data.  (Optionally) print out information about it:
	if(numTruncatedBytes > 0)
  {
  	printf("============== warnning, live555 truncate %d bytes =================\n", numTruncatedBytes);
  }
#ifdef DEBUG_PRINT_EACH_RECEIVED_FRAME
  if (fStreamId != NULL) envir() << "Stream \"" << fStreamId << "\"; ";
  envir() << fSubsession.mediumName() << "/" << fSubsession.codecName() << ":\tReceived " << frameSize << " bytes";
  if (numTruncatedBytes > 0) envir() << " (with " << numTruncatedBytes << " bytes truncated)";
  char uSecsStr[6+1]; // used to output the 'microseconds' part of the presentation time
  sprintf(uSecsStr, "%06u", (unsigned)presentationTime.tv_usec);
  envir() << ".\tPresentation time: " << (unsigned)presentationTime.tv_sec << "." << uSecsStr;
  if (fSubsession.rtpSource() != NULL && !fSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) {
    envir() << "!"; // mark the debugging output to indicate that this presentation time is not RTCP-synchronized
  }
  envir() << "\n";
#endif

  // 分辨率已经发生变化,不再上报数据
	if (_drop)
	{
		printf("############################### drop frame\n");
		return;
	}


  if(_cb)
  {
    RtspFrameInfo	info;
	info.videoFPS = fSubsession.videoFPS();
	info.videoWidth = fSubsession.videoWidth();
	info.videoHeight = fSubsession.videoHeight();
	info.frequency = fSubsession.rtpTimestampFrequency();
	info.channels = fSubsession.numChannels();
	info.profile_level_id = fSubsession.fmtp_profile_level_id();
	
	strncpy((char*)&(info.mediaName), fSubsession.mediumName(), sizeof(info.mediaName));
	strncpy((char*)&(info.codecName), fSubsession.codecName(), sizeof(info.codecName));
	info.timestamp	= presentationTime;
  	if(fSubsession.rtpSource() != NULL && !fSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) 
	{
		info.syncUseRTCP	= false;
	}
	else
	{
		info.syncUseRTCP	= true;
	}
 
	if(strcmp(fSubsession.mediumName(), "audio") == 0)
	{
		if (strcmp(fSubsession.codecName(), "MPEG4-GENERIC") == 0)
		{
			info.isHeader	= 0;
 			_cb(_channel, frameSize, (char*)fReceiveBuffer, info);
		}
		else if (strcmp(fSubsession.codecName(), "L16") == 0)
		{
			int i = fSubsession.numChannels();
			info.isHeader	= 0;
 			_cb(_channel, frameSize, (char*)fReceiveBuffer, info);
		}
	}
	else if(strcmp(fSubsession.mediumName(), "video") == 0)
	{
		if(strcmp(fSubsession.codecName(), "H264") == 0)
		{
			unsigned char start_code[4] = {0x00, 0x00, 0x00, 0x01};

			if(!_sentHeader)
			{
				_sentHeader	= true;
	
				unsigned numSpropRecords;
				if(fSubsession.fmtp_spropparametersets() && 0 < strlen(fSubsession.fmtp_spropparametersets()))
				{
					SPropRecord* sPropRecords = parseSPropParameterSets(fSubsession.fmtp_spropparametersets(), numSpropRecords);
					printf("====================== proparamset: [%d]%s =================\n", numSpropRecords, fSubsession.fmtp_spropparametersets());
					if(numSpropRecords > 0)
					{
						int 	headerLen		= 0;
						int 	validRecordNum	= 0;
						for(unsigned int i = 0; i < numSpropRecords; i++)
						{
							printf("spropparameter first byte = %x\n", sPropRecords[i].sPropBytes[0]);
							if(((sPropRecords[i].sPropBytes[0] & 0x1f) == 7) || ((sPropRecords[i].sPropBytes[0] & 0x1f) == 8))
							{
								headerLen += sPropRecords[i].sPropLength;
								validRecordNum	+= 1;
							}
						}

						headerLen		+= sizeof(start_code) * validRecordNum;
						char*	headerData	= new char[headerLen];
						int		offset		= 0;
						for(unsigned int i = 0; i < numSpropRecords; i++)
						{
							if(((sPropRecords[i].sPropBytes[0] & 0x1f) == 7) || ((sPropRecords[i].sPropBytes[0] & 0x1f) == 8))
							{
								memcpy(headerData + offset, start_code, 4);
								offset				+= 4;
								memcpy(headerData + offset, sPropRecords[i].sPropBytes, sPropRecords[i].sPropLength);
								offset				+= sPropRecords[i].sPropLength;
							}
						}

						uint16_t w = 0;
						uint16_t h = 0;
						if (H264Parse::GetResolution((uint8_t*)headerData, headerLen, &w, &h))
						{
							_w = w;
							_h = h;
						}

						info.isHeader	= 1;
						_cb(_channel, headerLen, headerData, info);

						delete [] headerData;
					}
				}
			}
			else
			{
				if ((fReceiveBuffer[0] & 0x1f) == 7)
				{
					uint16_t w = 0;
					uint16_t h = 0;
					if (H264Parse::GetResolution((uint8_t*)fReceiveBuffer, frameSize, &w, &h))
					{
						if (_w == 0 || _h == 0)
						{
							_w = w;
							_h = h;
						}
						else if ((_w != w) || (_h != h))
						{
							printf("=====33333333========= %dx%d,   %dx%d\n", _w, _h, w, h);
							_drop = true;
						}
					}
				}
			}

			if (!_drop)
			{
				info.isHeader	= 0;

				char* newData	= new char[sizeof(start_code) + frameSize];
				memcpy(newData, start_code, sizeof(start_code));
				memcpy(newData + sizeof(start_code), (char*)fReceiveBuffer, frameSize);
 				_cb(_channel, frameSize + sizeof(start_code), newData, info);
			
				delete [] newData;
			}
		}
		else if(strcmp(fSubsession.codecName(), "MP4V-ES") == 0)
		{
#ifdef SEND_CONFIG_HEADER
			unsigned configLen;
			unsigned char* configData = parseGeneralConfigStr(fSubsession.fmtp_config(), configLen);
				
			info.isHeader	= 1;
			_cb(_channel, configLen, (char*)configData, info);
#endif
			
			info.isHeader	= 0;
 			_cb(_channel, frameSize, (char*)fReceiveBuffer, info);
		}
		else
		{
			info.isHeader	= 0;
 			_cb(_channel, frameSize, (char*)fReceiveBuffer, info);
		}
	}
  }
  
  // Then continue, to request the next frame of data:
  continuePlaying();
}