MP4Duration MP4AV_GetAudioSampleDuration(
	MP4FileHandle mp4File, 
	MP4TrackId mediaTrackId)
{
	MP4SampleId sampleId = 1;
	MP4SampleId numSamples = 
		MP4GetTrackNumberOfSamples(mp4File, mediaTrackId);

	// find first non-zero size sample
	// we need to search in case an empty audio sample has been added
	// at the beginning of the track to achieve sync with video
	for (; sampleId <= numSamples; sampleId++) {
		if (MP4GetSampleSize(mp4File, mediaTrackId, sampleId) > 0) {
			break;
		}
	}
	if (sampleId >= numSamples) {
		return MP4_INVALID_DURATION;
	}

	// get sample duration
	return MP4GetSampleDuration(mp4File, mediaTrackId, sampleId);

	// OPTION may want to scan all non-zero sized samples
	// and check that sample durations are +/-1 the same value
}
Exemple #2
0
extern "C" u_int8_t MP4AV_AudioGetChannels(
	MP4FileHandle mp4File, 
	MP4TrackId audioTrackId)
{
	u_int8_t audioType = 
		MP4GetTrackEsdsObjectTypeId(mp4File, audioTrackId);

	if (audioType == MP4_INVALID_AUDIO_TYPE) {
		return 0;
	}

	if (MP4_IS_MP3_AUDIO_TYPE(audioType)) {
		MP4AV_Mp3Header mp3Hdr =
			GetMp3Header(mp4File, audioTrackId);

		if (mp3Hdr == 0) {
			return 0;
		}
		return MP4AV_Mp3GetChannels(mp3Hdr);

	} else if (MP4_IS_AAC_AUDIO_TYPE(audioType)) {
		u_int8_t* pAacConfig = NULL;
		u_int32_t aacConfigLength;

		MP4GetTrackESConfiguration(
			mp4File, 
			audioTrackId,
			&pAacConfig,
			&aacConfigLength);

		if (pAacConfig == NULL || aacConfigLength < 2) {
			return 0;
		}

		u_int8_t channels =
			MP4AV_AacConfigGetChannels(pAacConfig);

		free(pAacConfig);

		return channels;

	} else if ((audioType == MP4_PCM16_LITTLE_ENDIAN_AUDIO_TYPE) ||
	(audioType == MP4_PCM16_BIG_ENDIAN_AUDIO_TYPE)) {
		u_int32_t samplesPerFrame =
			MP4GetSampleSize(mp4File, audioTrackId, 1) / 2;

		MP4Duration frameDuration =
			MP4GetSampleDuration(mp4File, audioTrackId, 1);

		if (frameDuration == 0) {
			return 0;
		}

		// assumes track time scale == sampling rate
		return samplesPerFrame / frameDuration;
	}

	return 0;
}
QWORD MP4TextTrack::Read(Listener *listener)
{
	int next = 0;
	int last = 0;
	int first = 0;

	// Get number of samples for this sample
	frameSamples = MP4GetSampleDuration(mp4, track, sampleId);

	// Get size of sample
	frameSize = MP4GetSampleSize(mp4, track, sampleId);

	// Get sample timestamp
	frameTime = MP4GetSampleTime(mp4, track, sampleId);
	//Convert to miliseconds
	frameTime = MP4ConvertFromTrackTimestamp(mp4, track, frameTime, 1000);

	// Get data pointer
	BYTE *data = (BYTE*)malloc(frameSize);
	//Get max data lenght
	DWORD dataLen = frameSize;

	MP4Timestamp	startTime;
	MP4Duration	duration;
	MP4Duration	renderingOffset;

	// Read next rtp packet
	if (!MP4ReadSample(
				mp4,				// MP4FileHandle hFile
				track,				// MP4TrackId hintTrackId
				sampleId++,			// MP4SampleId sampleId,
				(u_int8_t **) &data,		// u_int8_t** ppBytes
				(u_int32_t *) &dataLen,		// u_int32_t* pNumBytes
				&startTime,			// MP4Timestamp* pStartTime
				&duration,			// MP4Duration* pDuration
				&renderingOffset,		// MP4Duration* pRenderingOffset
				NULL				// bool* pIsSyncSample
	))
		//Last
		return MP4_INVALID_TIMESTAMP;

	//Log("Got text frame [time:%d,start:%d,duration:%d,lenght:%d,offset:%d\n",frameTime,startTime,duration,dataLen,renderingOffset);
	//Dump(data,dataLen);
	//Get length
	if (dataLen>2)
	{
		//Get string length
		DWORD len = data[0]<<8 | data[1];
		//Set frame
		frame.SetFrame(startTime,data+2+renderingOffset,len-renderingOffset-2);
		//call listener
		if (listener)
			//Call it
			listener->onTextFrame(frame);
	}
	
	// exit next send time
	return GetNextFrameTime();
}
Exemple #4
0
//#define DEBUG_G711 1
extern "C" bool G711Hinter (MP4FileHandle mp4file, 
			    MP4TrackId trackid,
			    uint16_t maxPayloadSize)
{
  uint32_t numSamples;
  uint8_t audioType;
  MP4SampleId sampleId;
  uint32_t sampleSize;
  MP4TrackId hintTrackId;
  uint8_t payload;
  uint32_t bytes_this_hint;
  uint32_t sampleOffset;

  numSamples = MP4GetTrackNumberOfSamples(mp4file, trackid);

  if (numSamples == 0) return false;

  audioType = MP4GetTrackEsdsObjectTypeId(mp4file, trackid);

  if (audioType != MP4_ALAW_AUDIO_TYPE &&
      audioType != MP4_ULAW_AUDIO_TYPE) return false;

  hintTrackId = MP4AddHintTrack(mp4file, trackid);

  if (hintTrackId == MP4_INVALID_TRACK_ID) {
    return false;
  }
  const char *type;

  if (audioType == MP4_ALAW_AUDIO_TYPE) {
    payload = 8;
    type = "PCMA";
  } else {
    payload = 0;
    type = "PCMU";
  }

  MP4SetHintTrackRtpPayload(mp4file, hintTrackId, type, &payload, 0,NULL,
			    false);

  MP4Duration sampleDuration;
  bool have_skip;
  sampleId = 1;
  sampleSize = MP4GetSampleSize(mp4file, trackid, sampleId);
  sampleDuration = MP4GetSampleDuration(mp4file, trackid, sampleId);
  have_skip = sampleDuration != sampleSize;
  sampleOffset = 0;
  bytes_this_hint = 0;

  if (maxPayloadSize > 160) maxPayloadSize = 160;

  while (1) {
    if (bytes_this_hint == 0) {
#ifdef DEBUG_G711
      printf("Adding hint/packet\n");
#endif
      MP4AddRtpHint(mp4file, hintTrackId);
      MP4AddRtpPacket(mp4file, hintTrackId, false); // marker bit 0
    }
    uint16_t bytes_left_this_packet;
    bytes_left_this_packet = maxPayloadSize - bytes_this_hint;
    if (sampleSize >= bytes_left_this_packet) {
      MP4AddRtpSampleData(mp4file, hintTrackId, 
			  sampleId, sampleOffset, bytes_left_this_packet);
      bytes_this_hint += bytes_left_this_packet;
      sampleSize -= bytes_left_this_packet;
      sampleOffset += bytes_left_this_packet;
#ifdef DEBUG_G711
      printf("Added sample with %u bytes\n", bytes_left_this_packet);
#endif
    } else {
      MP4AddRtpSampleData(mp4file, hintTrackId, 
			  sampleId, sampleOffset, sampleSize);
      bytes_this_hint += sampleSize;
#ifdef DEBUG_G711
      printf("Added sample with %u bytes\n", sampleSize);
#endif
      sampleSize = 0;
    }

    if (bytes_this_hint >= maxPayloadSize) {
      // Write the hint
      // duration is bytes written
      MP4WriteRtpHint(mp4file, hintTrackId, bytes_this_hint);
#ifdef DEBUG_G711
      printf("Finished packet - bytes %u\n", bytes_this_hint);
#endif
      bytes_this_hint = 0;
    }
    if (sampleSize == 0) {
      // next sample
      if (have_skip && bytes_this_hint != 0) {
#ifdef DEBUG_G711
	printf("duration - ending packet - bytes %u\n", bytes_this_hint);
#endif
	MP4WriteRtpHint(mp4file, hintTrackId, bytes_this_hint);
	bytes_this_hint = 0;
      }
      sampleId++;
      if (sampleId > numSamples) {
	// finish it and exit
	if (bytes_this_hint != 0) {
	  MP4WriteRtpHint(mp4file, hintTrackId, bytes_this_hint);
	}
	return true;
      }
      sampleSize = MP4GetSampleSize(mp4file, trackid, sampleId);
      sampleDuration = MP4GetSampleDuration(mp4file, trackid, sampleId);
      have_skip = sampleDuration != sampleSize;
#ifdef DEBUG_G711
      printf("Next sample %u - size %u %u\n", sampleId, sampleSize,
	     have_skip);
#endif
      sampleOffset = 0;
    }
  }
	
  return true; // will never reach here
}
Exemple #5
0
bool MP4AV_RfcIsmaConcatenator(
	MP4FileHandle mp4File, 
	MP4TrackId mediaTrackId, 
	MP4TrackId hintTrackId,
	u_int8_t samplesThisHint, 
	MP4SampleId* pSampleIds, 
	MP4Duration hintDuration,
	u_int16_t maxPayloadSize)
{
  // handle degenerate case
  if (samplesThisHint == 0) {
    return true;
  }

  u_int8_t auPayloadHdrSize;

  // LATER would be more efficient if this were a parameter
  u_int8_t mpeg4AudioType =
    MP4GetTrackAudioMpeg4Type(mp4File, mediaTrackId);

  if (mpeg4AudioType == MP4_MPEG4_CELP_AUDIO_TYPE) {
    auPayloadHdrSize = 1;
  } else {
    auPayloadHdrSize = 2;
  }

  // construct the new hint
  if (MP4AddRtpHint(mp4File, hintTrackId) == false ||
      MP4AddRtpPacket(mp4File, hintTrackId, true) == false) return false;

  u_int8_t payloadHeader[2];

  u_int16_t numHdrBits = samplesThisHint * auPayloadHdrSize * 8;
  payloadHeader[0] = numHdrBits >> 8;
  payloadHeader[1] = numHdrBits & 0xFF;

  if (MP4AddRtpImmediateData(mp4File, hintTrackId,
			     (u_int8_t*)&payloadHeader, 
			     sizeof(payloadHeader)) == false) return false;

  u_int8_t i;

  // first the headers
  for (i = 0; i < samplesThisHint; i++) {
    MP4SampleId sampleId = pSampleIds[i];

    u_int32_t sampleSize = 
      MP4GetSampleSize(mp4File, mediaTrackId, sampleId);

    if (auPayloadHdrSize == 1) {
      // AU payload header is 6 bits of size
      // follow by 2 bits of the difference between sampleId's - 1
      payloadHeader[0] = sampleSize << 2;

    } else { // auPayloadHdrSize == 2
      // AU payload header is 13 bits of size
      // follow by 3 bits of the difference between sampleId's - 1
      payloadHeader[0] = sampleSize >> 5;
      payloadHeader[1] = (sampleSize & 0x1F) << 3;
    }

    if (i > 0) {
      payloadHeader[auPayloadHdrSize - 1] 
	|= ((sampleId - pSampleIds[i-1]) - 1); 
    }
#if 0
    printf("sample %u size %u %02x %02x prev sample %d\n", 
	   sampleId, sampleSize, payloadHeader[0],
	   payloadHeader[1], pSampleIds[i-1]);
#endif

    if (MP4AddRtpImmediateData(mp4File, hintTrackId,
			       (u_int8_t*)&payloadHeader, 
			       auPayloadHdrSize) == false) 
      return false;
  }

  // then the samples
  for (i = 0; i < samplesThisHint; i++) {
    MP4SampleId sampleId = pSampleIds[i];

    u_int32_t sampleSize = 
      MP4GetSampleSize(mp4File, mediaTrackId, sampleId);

    if (MP4AddRtpSampleData(mp4File, hintTrackId, 
			    sampleId, 0, sampleSize) == false) return false;
  }

  // write the hint
  return MP4WriteRtpHint(mp4File, hintTrackId, hintDuration);
}
Exemple #6
0
extern "C" bool MP4AV_Rfc3016LatmHinter (MP4FileHandle mp4File,
        MP4TrackId mediaTrackId,
        u_int16_t maxPayloadSize)
{
    u_int32_t numSamples = MP4GetTrackNumberOfSamples(mp4File, mediaTrackId);
    u_int32_t maxSampleSize = MP4GetTrackMaxSampleSize(mp4File, mediaTrackId);
    MP4Duration sampleDuration =
        MP4AV_GetAudioSampleDuration(mp4File, mediaTrackId);

    if (sampleDuration == MP4_INVALID_DURATION) {
        return false;
    }

    if (numSamples == 0 || maxSampleSize == 0) {
        return false;
    }


    /* get the mpeg4 video configuration */
    u_int8_t* pAudioSpecificConfig;
    u_int32_t AudioSpecificConfigSize;

    if (MP4GetTrackESConfiguration(mp4File, mediaTrackId,
                                   &pAudioSpecificConfig,
                                   &AudioSpecificConfigSize) == false)
        return false;

    if (pAudioSpecificConfig == NULL ||
            AudioSpecificConfigSize == 0) return false;

    uint8_t channels = MP4AV_AacConfigGetChannels(pAudioSpecificConfig);
    uint32_t freq = MP4AV_AacConfigGetSamplingRate(pAudioSpecificConfig);
    uint8_t type = MP4AV_AacConfigGetAudioObjectType(pAudioSpecificConfig);

    uint8_t *pConfig;
    uint32_t configSize;

    MP4AV_LatmGetConfiguration(&pConfig, &configSize,
                               pAudioSpecificConfig, AudioSpecificConfigSize);
    free(pAudioSpecificConfig);

    if (pConfig == NULL || configSize == 0) {
        CHECK_AND_FREE(pConfig);
        return false;
    }

    MP4TrackId hintTrackId =
        MP4AddHintTrack(mp4File, mediaTrackId);

    if (hintTrackId == MP4_INVALID_TRACK_ID) {
        free(pConfig);
        return false;
    }
    u_int8_t payloadNumber = MP4_SET_DYNAMIC_PAYLOAD;

    char buffer[10];
    if (channels != 1) {
        snprintf(buffer, sizeof(buffer), "%u", channels);
    }

    /* convert it into ASCII form */
    char* sConfig = MP4BinaryToBase16(pConfig, configSize);
    free(pConfig);
    if (sConfig == NULL ||
            MP4SetHintTrackRtpPayload(mp4File, hintTrackId,
                                      "MP4A-LATM", &payloadNumber, 0,
                                      channels != 1 ? buffer : NULL) == false) {
        MP4DeleteTrack(mp4File, hintTrackId);
        return false;
    }

    uint32_t profile_level;
    // from gpac code
    switch (type) {
    case 2:
        if (channels <= 2) profile_level = freq <= 24000 ? 0x28 : 0x29;
        else profile_level = freq <= 48000 ? 0x2a : 0x2b;
        break;
    case 5:
        if (channels <= 2) profile_level = freq < 24000 ? 0x2c : 0x2d;
        else profile_level = freq <= 48000 ? 0x2e : 0x2f;
        break;
    default:
        if (channels <= 2) profile_level = freq < 24000 ? 0x0e : 0x0f;
        else profile_level = 0x10;
        break;
    }

    /* create the appropriate SDP attribute */
    char* sdpBuf = (char*)malloc(strlen(sConfig) + 128);

    if (sdpBuf == NULL) {
        free(sConfig);
        MP4DeleteTrack(mp4File, hintTrackId);
        return false;
    }
    snprintf(sdpBuf,
             strlen(sConfig) + 128,
             "a=fmtp:%u profile-level-id=%u; cpresent=0; config=%s;\015\012",
             payloadNumber,
             profile_level,
             sConfig);

    /* add this to the track's sdp */
    bool val = MP4AppendHintTrackSdp(mp4File, hintTrackId, sdpBuf);

    free(sConfig);
    free(sdpBuf);
    if (val == false) {
        MP4DeleteTrack(mp4File, hintTrackId);
        return false;
    }

    for (MP4SampleId sampleId = 1; sampleId <= numSamples; sampleId++) {
        uint8_t buffer[32];
        uint32_t offset = 0;
        uint32_t sampleSize =
            MP4GetSampleSize(mp4File, mediaTrackId, sampleId);
        uint32_t size_left = sampleSize;

        while (size_left > 0) {
            if (size_left > 0xff) {
                size_left -= 0xff;
                buffer[offset] = 0xff;
            } else {
                buffer[offset] = size_left;
                size_left = 0;
            }
            offset++;
        }
        if (MP4AddRtpHint(mp4File, hintTrackId) == false ||
                MP4AddRtpPacket(mp4File, hintTrackId, true) == false ||
                MP4AddRtpImmediateData(mp4File, hintTrackId,
                                       buffer, offset) == false ||
                MP4AddRtpSampleData(mp4File, hintTrackId,
                                    sampleId, 0, sampleSize) == false ||
                MP4WriteRtpHint(mp4File, hintTrackId, sampleDuration) == false) {
            MP4DeleteTrack(mp4File, hintTrackId);
            return false;
        }
    }
    return true;

}
static void DumpTrack (MP4FileHandle mp4file, MP4TrackId tid, 
		       bool dump_off, bool dump_rend)
{
  uint32_t numSamples;
  MP4SampleId sid;
  uint8_t *buffer;
  uint32_t max_frame_size;
  uint32_t timescale;
  uint64_t msectime;
  const char *media_data_name;
  uint32_t len_size = 0;
  uint8_t video_type = 0;
  numSamples = MP4GetTrackNumberOfSamples(mp4file, tid);
  max_frame_size = MP4GetTrackMaxSampleSize(mp4file, tid) + 4;
  media_data_name = MP4GetTrackMediaDataName(mp4file, tid);
  if (strcasecmp(media_data_name, "avc1") == 0) {
    MP4GetTrackH264LengthSize(mp4file, tid, &len_size);
  } else if (strcasecmp(media_data_name, "mp4v") == 0) {
    video_type = MP4GetTrackEsdsObjectTypeId(mp4file, tid);
  }
  buffer = (uint8_t *)malloc(max_frame_size);
  if (buffer == NULL) {
    printf("couldn't get buffer\n");
    return;
  }

  timescale = MP4GetTrackTimeScale(mp4file, tid);
  printf("mp4file %s, track %d, samples %d, timescale %d\n", 
	 Mp4FileName, tid, numSamples, timescale);

  for (sid = 1; sid <= numSamples; sid++) {
    MP4Timestamp sampleTime;
    MP4Duration sampleDuration, sampleRenderingOffset;
    bool isSyncSample = FALSE;
    bool ret;
    u_int8_t *temp;
    uint32_t this_frame_size = max_frame_size;
    temp = buffer;
    ret = MP4ReadSample(mp4file, 
			tid,
			sid,
			&temp,
			&this_frame_size,
			&sampleTime,
			&sampleDuration,
			&sampleRenderingOffset,
			&isSyncSample);

    msectime = sampleTime;
    msectime *= TO_U64(1000);
    msectime /= timescale;

    printf("sampleId %6d, size %5u time "U64"("U64")",
	  sid,  MP4GetSampleSize(mp4file, tid, sid), 
	   sampleTime, msectime);
    if (dump_rend) printf(" %6"U64F, sampleRenderingOffset);
    if (strcasecmp(media_data_name, "mp4v") == 0) {
      if (MP4_IS_MPEG4_VIDEO_TYPE(video_type))
	ParseMpeg4(temp, this_frame_size, dump_off);
    } else if (strcasecmp(media_data_name, "avc1") == 0) {
      ParseH264(temp, this_frame_size, len_size, dump_off);
    }
    printf("\n");
  }
}
QWORD MP4TextTrack::ReadPrevious(QWORD time,Listener *listener)
{
	//Check it is the first
	if (sampleId==1)
	{
		//Set emtpy frame
		frame.SetFrame(time,(wchar_t*)NULL,0);
		//call listener
		if (listener)
			//Call it
			listener->onTextFrame(frame);
		//Exit
		return 1;
	}

	//The previous one
	MP4SampleId prevId = sampleId-1;

	//If it was not found
	if (sampleId==MP4_INVALID_SAMPLE_ID)
		//The latest
		prevId = MP4GetTrackNumberOfSamples(mp4,track);

	// Get size of sample
	frameSize = MP4GetSampleSize(mp4, track, prevId);

	// Get data pointer
	BYTE *data = (BYTE*)malloc(frameSize);
	//Get max data lenght
	DWORD dataLen = frameSize;

	MP4Timestamp	startTime;
	MP4Duration	duration;
	MP4Duration	renderingOffset;

	// Read next rtp packet
	if (!MP4ReadSample(
				mp4,				// MP4FileHandle hFile
				track,				// MP4TrackId hintTrackId
				prevId,				// MP4SampleId sampleId,
				(u_int8_t **) &data,		// u_int8_t** ppBytes
				(u_int32_t *) &dataLen,		// u_int32_t* pNumBytes
				&startTime,			// MP4Timestamp* pStartTime
				&duration,			// MP4Duration* pDuration
				&renderingOffset,		// MP4Duration* pRenderingOffset
				NULL				// bool* pIsSyncSample
	))
		//Last
		return MP4_INVALID_TIMESTAMP;

	//Get length
	if (dataLen>2)
	{
		//Get string length
		DWORD len = data[0]<<8 | data[1];
		//Set frame
		frame.SetFrame(time,data+2+renderingOffset,len-renderingOffset-2);
		//call listener
		if (listener)
			//Call it
			listener->onTextFrame(frame);
	}

	// exit next send time
	return 1;
}
QWORD MP4RtpTrack::Read(Listener *listener)
{
	int last = 0;
	uint8_t* data;
	bool isSyncSample;

	// If it's first packet of a frame
	if (!numHintSamples)
	{
		// Get number of rtp packets for this sample
		if (!MP4ReadRtpHint(mp4, hint, sampleId, &numHintSamples))
		{
			//Print error
			Error("Error reading hintt");
			//Exit
			return MP4_INVALID_TIMESTAMP;
		}

		// Get number of samples for this sample
		frameSamples = MP4GetSampleDuration(mp4, hint, sampleId);

		// Get size of sample
		frameSize = MP4GetSampleSize(mp4, hint, sampleId);

		// Get sample timestamp
		frameTime = MP4GetSampleTime(mp4, hint, sampleId);
		//Convert to miliseconds
		frameTime = MP4ConvertFromTrackTimestamp(mp4, hint, frameTime, 1000);

		// Check if it is H264 and it is a Sync frame
		if (codec==VideoCodec::H264 && MP4GetSampleSync(mp4,track,sampleId))
			// Send SEI info
			SendH263SEI(listener);

		//Get max data lenght
		BYTE *data = NULL;
		DWORD dataLen = 0;
		MP4Timestamp	startTime;
		MP4Duration	duration;
		MP4Duration	renderingOffset;

		//Get values
		data	= frame->GetData();
		dataLen = frame->GetMaxMediaLength();
		
		// Read next rtp packet
		if (!MP4ReadSample(
			mp4,				// MP4FileHandle hFile
			track,				// MP4TrackId hintTrackId
			sampleId,			// MP4SampleId sampleId,
			(u_int8_t **) &data,		// u_int8_t** ppBytes
			(u_int32_t *) &dataLen,		// u_int32_t* pNumBytes
			&startTime,			// MP4Timestamp* pStartTime
			&duration,			// MP4Duration* pDuration
			&renderingOffset,		// MP4Duration* pRenderingOffset
			&isSyncSample			// bool* pIsSyncSample
			))
		{
			Error("Error reading sample");
			//Last
			return MP4_INVALID_TIMESTAMP;
		}

		//Check type
		if (media == MediaFrame::Video)
		{
			//Get video frame
			VideoFrame *video = (VideoFrame*)frame;
			//Set lenght
			video->SetLength(dataLen);
			//Timestamp
			video->SetTimestamp(startTime*90000/timeScale);
			//Set intra
			video->SetIntra(isSyncSample);
		} else {
			//Get Audio frame
			AudioFrame *audio = (AudioFrame*)frame;
			//Set lenght
			audio->SetLength(dataLen);
			//Timestamp
			audio->SetTimestamp(startTime*8000/timeScale);
		}

		//Check listener
		if (listener)
			//Frame callback
			listener->onMediaFrame(*frame);
	}

	// if it's the last
	if (packetIndex + 1 == numHintSamples)
		//Set last mark
		last = 1;
	
	// Set mark bit
	rtp.SetMark(last);

	// Get data pointer
	data = rtp.GetMediaData();
	//Get max data lenght
	DWORD dataLen = rtp.GetMaxMediaLength();

	// Read next rtp packet
	if (!MP4ReadRtpPacket(
				mp4,				// MP4FileHandle hFile
				hint,				// MP4TrackId hintTrackId
				packetIndex++,			// u_int16_t packetIndex
				(u_int8_t **) &data,		// u_int8_t** ppBytes
				(u_int32_t *) &dataLen,		// u_int32_t* pNumBytes
				0,				// u_int32_t ssrc DEFAULT(0)
				0,				// bool includeHeader DEFAULT(true)
				1				// bool includePayload DEFAULT(true)
	))
	{
		//Error
		Error("Error reading packet [%d,%d,%d]\n", hint, track,packetIndex);
		//Exit
		return MP4_INVALID_TIMESTAMP;
	}
		

	//Check
	if (dataLen>rtp.GetMaxMediaLength())
	{
		//Error
		Error("RTP packet too big [%u,%u]\n",dataLen,rtp.GetMaxMediaLength());
		//Exit
		return MP4_INVALID_TIMESTAMP;
	}
	
	//Set lenght
	rtp.SetMediaLength(dataLen);
	// Write frame
	listener->onRTPPacket(rtp);

	// Are we the last packet in a hint?
	if (last)
	{
		// The first hint
		packetIndex = 0;
		// Go for next sample
		sampleId++;
		numHintSamples = 0;
		//Return next frame time
		return GetNextFrameTime();
	}

	// This packet is this one
	return frameTime;
}