Exemple #1
0
void AudioGrabber::audioReceived(float * input, int bufferSize, int nChannels){
    AudioFrame * frame = new AudioFrame(input,bufferSize,nChannels);
    newFrameEvent.notify(this,*frame);
    frame->release();

    //cout << "audio: " << frame.getTimestamp().epochTime() << "\n";
}
Exemple #2
0
void OpenALRenderer::queueFrame(const AudioFrame& frame)
{
    assert(frame.getFrameData());
    alBufferData(m_AudioBuffers[m_CurrentBuffer], m_AudioFormat, frame.getFrameData(), frame.getDataSize(), m_Frequency);
    alSourceQueueBuffers(m_AudioSource, 1, &m_AudioBuffers[m_CurrentBuffer]);
    m_PtsQueue.push_back(frame.getPts());

    play();

    ++m_CurrentBuffer;
    m_CurrentBuffer %= NUM_BUFFERS;

    assert(alGetError() == AL_NO_ERROR);
}
void
MediaDecoder::writeToRingBuffer(const AudioFrame& decodedFrame,
                                RingBuffer& rb, const AudioFormat outFormat)
{
    const auto libav_frame = decodedFrame.pointer();
    decBuff_.setFormat(AudioFormat{
        (unsigned) libav_frame->sample_rate,
        (unsigned) decoderCtx_->channels
    });
    decBuff_.resize(libav_frame->nb_samples);

    if ( decoderCtx_->sample_fmt == AV_SAMPLE_FMT_FLTP ) {
        decBuff_.convertFloatPlanarToSigned16(libav_frame->extended_data,
                                         libav_frame->nb_samples,
                                         decoderCtx_->channels);
    } else if ( decoderCtx_->sample_fmt == AV_SAMPLE_FMT_S16 ) {
        decBuff_.deinterleave(reinterpret_cast<const AudioSample*>(libav_frame->data[0]),
                         libav_frame->nb_samples, decoderCtx_->channels);
    }
    if ((unsigned)libav_frame->sample_rate != outFormat.sample_rate) {
        if (!resampler_) {
            RING_DBG("Creating audio resampler");
            resampler_.reset(new Resampler(outFormat));
        }
        resamplingBuff_.setFormat({(unsigned) outFormat.sample_rate, (unsigned) decoderCtx_->channels});
        resamplingBuff_.resize(libav_frame->nb_samples);
        resampler_->resample(decBuff_, resamplingBuff_);
        rb.put(resamplingBuff_);
    } else {
        rb.put(decBuff_);
    }
}
Exemple #4
0
AudioFrame * myAudioBuffer::getAudioFrame(pmTimeDiff time) {
    AudioFrame * frame = NULL;
    if(size()>0) {
        int frameback = CLAMP((int)((float)time/1000000.0*(float)fps),1,size());
        int currentPos=size()-frameback;
        frame= frames[currentPos];

        /*if(((float)time/1000000.0*(float)fps)<size() && ((float)time/1000000.0*(float)fps)>=0)
            frame= frames[frames.size()-1-(int)((float)time/1000000.0*(float)fps)];
        else if(((float)time/1000000.0*(float)fps)<0)
            frame= frames[size()-1];
        else
            frame= frames[0];*/
        frame->retain();
    }
    return frame;
}
Exemple #5
0
bool AudioEncoderFFmpeg::encode(const AudioFrame &frame)
{
    DPTR_D(AudioEncoderFFmpeg);
    AVFrame *f = NULL;
    if (frame.isValid()) {
        f = av_frame_alloc();
        const AudioFormat fmt(frame.format());
        f->format = fmt.sampleFormatFFmpeg();
        f->channel_layout = fmt.channelLayoutFFmpeg();
        // f->channels = fmt.channels(); //remove? not availale in libav9
        // must be (not the last frame) exactly frame_size unless CODEC_CAP_VARIABLE_FRAME_SIZE is set (frame_size==0)
        // TODO: mpv use pcmhack for avctx.frame_size==0. can we use input frame.samplesPerChannel?
        f->nb_samples = d.frame_size;
        /// f->quality = d.avctx->global_quality; //TODO
        // TODO: record last pts. mpv compute pts internally and also use playback time
        f->pts = int64_t(frame.timestamp()*fmt.sampleRate()); // TODO
        // pts is set in muxer
        const int nb_planes = frame.planeCount();
        // bytes between 2 samples on a plane. TODO: add to AudioFormat? what about bytesPerFrame?
        const int sample_stride = fmt.isPlanar() ? fmt.bytesPerSample() : fmt.bytesPerSample()*fmt.channels();
        for (int i = 0; i < nb_planes; ++i) {
            f->linesize[i] = f->nb_samples * sample_stride;// frame.bytesPerLine(i); //
            f->extended_data[i] = (uint8_t*)frame.constBits(i);
        }
    }
    AVPacket pkt;
    av_init_packet(&pkt);
    pkt.data = (uint8_t*)d.buffer.constData();
    pkt.size = d.buffer.size();
    int got_packet = 0;
    int ret = avcodec_encode_audio2(d.avctx, &pkt, f, &got_packet);
    av_frame_free(&f);
    if (ret < 0) {
        //qWarning("error avcodec_encode_audio2: %s" ,av_err2str(ret));
        return false; //false
    }
    if (!got_packet) {
        qWarning("no packet got");
        return false; //false
    }
    // qDebug("enc avpkt.pts: %lld, dts: %lld.", pkt.pts, pkt.dts);
    d.packet = Packet::fromAVPacket(&pkt, av_q2d(d.avctx->time_base));
    // qDebug("enc packet.pts: %.3f, dts: %.3f.", d.packet.pts, d.packet.dts);
    return true;
}
Exemple #6
0
void AudioStretcher::write( AudioFrame* frame ) {
    int readFrames = frame->size / (audio.channels*2);
    //convert s16 to float
    for( int i=0; i < readFrames; i++ )
        for( int c=0; c<audio.channels; c++ )
            buffer[c][i] = frame->data[i*audio.channels+c] / 32768.f;
    rubberband->process( buffer, readFrames, false );

    if( !audioOutput.empty() ) {
        int actualFrames = rubberband->retrieve( buffer, audio.frequency );
        AudioFrame* frame = new AudioFrame( actualFrames * (2*audio.channels) );
        //convert float to s16
        for( int i=0; i<actualFrames; i++ )
            for( int c=0; c<audio.channels; c++ )
                frame->data[i*audio.channels+c] = buffer[c][i] * 32768.f;
        foreach( AudioOutput* output, audioOutput ) output->write( frame->copy() );
        frame->free();
    }
    frame->free();
}
	//-------------------------------------------------------------------------------
	void AudioBuffer::newAudioFrame(AudioFrame &frame)
	{
		if(size()==0)initTime=frame.getTimestamp();
	
		// AudioFrames managing, store AudioFrame on the cue.
		totalFrames++;
		frames.push_back(&frame);
		if(size()>maxSize){
			frames.erase(frames.begin());
		}
		
		newFrameEvent.notify(this,frame);
	}
	//----------------------------------------------------------------------------------------	
	void AudioBufferSamples::newAudioFrame(AudioFrame &frame)
	{		
		if(size()==0)initTime=frame.getTimestamp();
		
		// AudioFrames managing, store AudioFrame on the cue.
		frames.push_back(frame);

		if(size()>maxSize)
		{
			frames.erase(frames.begin());
		}
		// what for ??
		newFrameEvent.notify(this,frame);		 
	}
Exemple #9
0
AudioFrame * AudioHeader::getAudioFrame(int position,float density){
    position = CLAMP(position,0,buffer->size());
    AudioFrame * currentFrame = buffer->getAudioFrame(position);
    vector<AudioFrame*> prevFrames;
    vector<AudioFrame*> nextFrames;

    for(int i=0;i<density+1;i++){
        if(buffer->size()>position+i+1){
            AudioFrame * frame = buffer->getAudioFrame(position+i);
            if(frame){
                nextFrames.push_back(frame);
            }
        }
    }
    for(int i=0;i>-density-1;i--){
        if(position+i>0){
            AudioFrame * frame = buffer->getAudioFrame(position+i);
            if(frame){
                nextFrames.push_back(frame);
            }
        }
    }

    int currentFrameSize=currentFrame->getBufferSize()*currentFrame->getChannels();
    float resultBuffer[currentFrameSize];
    memcpy(resultBuffer,currentFrame->getAudioFrame(),sizeof(float)*currentFrameSize);

    for(int i=0;i<prevFrames.size();i++){
        int frameSize=prevFrames[i]->getBufferSize()*prevFrames[i]->getChannels();
        int offset=CLAMP((int)((float)frameSize*(density-(float)i)),0,frameSize);
        for(int j=frameSize-offset;j<frameSize;j++){
            resultBuffer[frameSize-j]=resultBuffer[frameSize-j]+prevFrames[i]->getAudioFrame()[j];
        }
        prevFrames[i]->release();
    }

    for(int i=0;i<nextFrames.size();i++){
        int frameSize=nextFrames[i]->getBufferSize()*nextFrames[i]->getChannels();
        int offset=CLAMP((int)((float)frameSize*(density-(float)i)),0,frameSize);
        for(int j=0;j<offset;j++){
            resultBuffer[frameSize-j]=resultBuffer[frameSize-j]+nextFrames[i]->getAudioFrame()[j];
        }
        nextFrames[i]->release();
    }

    AudioFrame * resultFrame=
        new AudioFrame( resultBuffer,
                        currentFrame->getBufferSize(),
                        currentFrame->getChannels());
    currentFrame->release();
    return resultFrame;
}
MediaDecoder::Status
MediaDecoder::decode(const AudioFrame& decodedFrame)
{
    const auto frame = decodedFrame.pointer();

    AVPacket inpacket;
    av_init_packet(&inpacket);

   int ret = av_read_frame(inputCtx_, &inpacket);
    if (ret == AVERROR(EAGAIN)) {
        return Status::Success;
    } else if (ret == AVERROR_EOF) {
        return Status::EOFError;
    } else if (ret < 0) {
        char errbuf[64];
        av_strerror(ret, errbuf, sizeof(errbuf));
        RING_ERR("Couldn't read frame: %s\n", errbuf);
        return Status::ReadError;
    }

    // is this a packet from the audio stream?
    if (inpacket.stream_index != streamIndex_) {
        av_packet_unref(&inpacket);
        return Status::Success;
    }

    int frameFinished = 0;
    int len = avcodec_decode_audio4(decoderCtx_, frame,
                                    &frameFinished, &inpacket);
    av_packet_unref(&inpacket);

    if (len <= 0) {
        return Status::DecodeError;
    }

    if (frameFinished) {
        if (emulateRate_ and frame->pkt_pts != AV_NOPTS_VALUE) {
            auto frame_time = getTimeBase()*(frame->pkt_pts - avStream_->start_time);
            auto target = startTime_ + static_cast<std::int64_t>(frame_time.real() * 1e6);
            auto now = av_gettime();
            if (target > now) {
                std::this_thread::sleep_for(std::chrono::microseconds(target - now));
            }
        }
        return Status::FrameFinished;
    }

    return Status::Success;
}
IFrame* AudioFrameBuffer::getFrame(const size_t size)
{
    LOG_DEBUG("Get a " << size << " bytes frame from a " << _totalDataSize << " bytes frame buffer");
    IFrame* next = _frameQueue.front();
    const size_t nextFrameSize = next->getDataSize();

    // If no expected size, or if the expected size equals the front frame of the queue (with no offset)
    if(size == 0 || (size == nextFrameSize && _positionInFrontFrame == 0))
    {
        // Directly return the front frame of the queue
        _totalDataSize -= nextFrameSize;
        popFrame();
        return next;
    }

    // Create a new frame
    AudioFrame* newAudioFrame = new AudioFrame(_audioFrameDesc, false);
    const size_t expectedNbSamples = size / (newAudioFrame->getNbChannels() * newAudioFrame->getBytesPerSample());
    newAudioFrame->setNbSamplesPerChannel(expectedNbSamples);
    newAudioFrame->allocateData();

    // Concatenate frames data
    size_t extractedDataSize = 0;
    unsigned char* outputData = new unsigned char[size];
    while(extractedDataSize != size && _frameQueue.size() != 0)
    {
        // Get the front frame from queue
        next = _frameQueue.front();
        size_t remainingDataInFrontFrame = next->getDataSize() - _positionInFrontFrame;

        // Compute the data size to get from the frame
        size_t dataToGet = size - extractedDataSize;
        if(dataToGet > remainingDataInFrontFrame)
            dataToGet = remainingDataInFrontFrame;

        // Copy the data from the frame to temporal buffer
        for(size_t i = 0; i < dataToGet; i++)
            outputData[extractedDataSize++] = next->getData()[0][_positionInFrontFrame + i];

        if(dataToGet < remainingDataInFrontFrame)
        {
            // Set new position into front frame
            _positionInFrontFrame += dataToGet;
        }
        else
        {
            // The whole front frame has been read, so pop it from queue
            popFrame();
            _positionInFrontFrame = 0;
        }
    }

    _totalDataSize -= extractedDataSize;
    newAudioFrame->assignBuffer(outputData);
    return newAudioFrame;
}
//-------------------------------------------------------------------------
bool R::readFrame(AudioFrame& f)
{
  if (_seekWanted)
  {
    _seekWanted = false;
    _frameIndex = _seekWantedIdx;
    if (_frameIndex >= getFrameCount())
      return false;
    unsigned long n = getSampleBytes()*getChannelCount();
    _pReader->seek(getHeaderLength()+n*_frameIndex);
  }
  // si on depasse la fin du fichier
  unsigned long i, frameCount = getFrameCount();
  // the call to getFrameCount() defines _sampleBytes & other stuff
  if (_frameIndex >= frameCount)
    return false;
  if (_selectedChannel > _channelCount)
    throw Exception("Unavailable selected channel #"
          + String::valueOf(_selectedChannel), __FILE__, __LINE__);
  if (_sampleBytes == 2) // 16 bits
  {
    if (_channelCount == 1)
      f.setData(_pReader->readInt2());
    else if (_channelCount == 2)
    {
      if (_selectedChannel == 0)
      {
        f.setData(_pReader->readInt2());
        _pReader->readInt2();
      }
      else
      {
        _pReader->readInt2();
        f.setData(_pReader->readInt2());
      }
    }
    else // multi-channels
    {
      for (i=0; i<_selectedChannel; i++)
        _pReader->readInt2();
      f.setData(_pReader->readInt2());
      for (i++; i<_channelCount; i++)
        _pReader->readInt2();
    }
  }
  else if (_sampleBytes == 1) // 8 bits
  {
    if (_channelCount == 1)
      f.setData(_pReader->readChar());
    else if (_channelCount == 2)
    {
      if (_selectedChannel == 0)
      {
        f.setData(_pReader->readChar());
        _pReader->readChar();
      }
      else
      {
        _pReader->readChar();
        f.setData(_pReader->readChar());
      }
    }
    else // multi-channels
    {
      for (i=0; i<_selectedChannel; i++)
        _pReader->readChar();
      f.setData(_pReader->readChar());
      for (i++; i<_channelCount; i++)
        _pReader->readChar();
    }
  }
  else
    throw Exception("Unimplemented code (TODO)", __FILE__, __LINE__);
  f.setValidity(true);
  _frameIndex++;
  return true; // invalid frame
}
Exemple #13
0
	void AudioEffect::ProcessCaptureStream( int16_t* audio_samples, size_t frame_byte_size, int16_t* outSample, size_t& len_of_byte )
	{
		if(!m_bInit)
		{
			return;
		}
		if(!m_bEnable)
		{
			return;
		}

		if(rec_resample.infreq == 44100)
		{
			frame_byte_size = 880 * rec_resample.inchannel;
		}

		if(rec_resample.inchannel == 2 && rec_resample.channel == 1)
		{
			AudioResample::ToMono( audio_samples, frame_byte_size / 2 );
			frame_byte_size /= 2;
		}

		AudioFrame af;
		size_t outLen = 0;
		int err = 0;
		if(0 != ( err = m_recResample.Push( audio_samples,
			frame_byte_size / sizeof( int16_t ),
			af.data_,
			sizeof( af.data_ ),
			outLen ) ))
		{
			return;
		}

		af.UpdateFrame( 0,
			GetTimeStamp(),
			af.data_,
			kTargetRecSampleRate / 100,
			kTargetRecSampleRate,
			AudioFrame::kNormalSpeech,
			AudioFrame::kVadUnknown,
			rec_resample.channel );
		m_apm->set_stream_delay_ms( m_stream_delay );
		if(0 != ( err = m_apm->ProcessStream( &af ) ))
		{
			return;
		}


		size_t inLen = outLen;
		if(0 != ( err = m_recReverseResample.Push( af.data_,
			inLen,
			outSample,
			len_of_byte / 2,
			outLen ) ))
		{
			return;
		}

		if(rec_resample.outchannel == 2 && rec_resample.channel == 1)
		{
			AudioResample::Tostereo( outSample, outLen );
			outLen *= 2;
		}

		if(rec_resample.outfreq == 44100)
		{
			if(rec_resample.outchannel == 1)
			{
				outSample[440] = outSample[439];
			}
			else
			{
				outSample[880] = outSample[878];
				outSample[880 + 1] = outSample[879];
			}
		}

		len_of_byte = rec_resample.outfreq / 100 * rec_resample.outchannel * 2;
	}
Exemple #14
0
	void AudioEffect::ProcessRenderStream( int16_t*  inSamples, size_t frame_byte_size, int16_t*outSample, size_t& len_of_byte )
	{
		if(!m_bInit)
		{
			return;
		}
		if(!m_bEnable)
		{
			return;
		}
		if(ply_resample.infreq == 44100)
		{
			frame_byte_size = 880 * ply_resample.inchannel;
		}
		if(ply_resample.inchannel == 2 && ply_resample.channel == 1)
		{
			AudioResample::ToMono( inSamples, frame_byte_size / 2 );
			frame_byte_size /= 2;
		}

		size_t outLen;
		int err = 0;
		AudioFrame af;
		if(0 != ( err = m_plyResample.Push( inSamples,
			frame_byte_size / sizeof( int16_t ),
			af.data_,
			sizeof( af.data_ ),
			outLen ) ))
		{
			return;
		}
		af.UpdateFrame( 0,
			GetTimeStamp(),
			af.data_,
			kTargetPlySampleRate / 100,
			kTargetPlySampleRate,
			AudioFrame::kNormalSpeech,
			AudioFrame::kVadUnknown,
			ply_resample.channel );

		if(0 != ( err = m_apm->AnalyzeReverseStream( &af ) ))
		{
			return;
		}
		if(ply_resample.infreq == ply_resample.outfreq)
		{
			if(ply_resample.inchannel == 1 && ply_resample.outchannel == 2)
			{
				AudioResample::Tostereo( inSamples, frame_byte_size / 2, outSample );
			}
			else if(inSamples != outSample)
			{
				memcpy( outSample, inSamples, len_of_byte );
			}
			len_of_byte = ply_resample.outfreq / 100 * 2 * ply_resample.outchannel;
			return;
		}

		size_t inLen = outLen;
		if(ply_resample.infreq != ply_resample.outfreq || inSamples != outSample)
		{
			if(0 != ( err = m_plyReverseResample.Push( inSamples,
				frame_byte_size / 2,
				outSample,
				1920,
				outLen ) ))
			{
				return;
			}
		}

		if(ply_resample.inchannel == 1 && ply_resample.outchannel == 2)
		{
			AudioResample::Tostereo( outSample, outLen );
		}

		if(ply_resample.outfreq == 44100)
		{
			if(ply_resample.outchannel == 1)
			{
				outSample[440] = outSample[439];
			}
			else
			{
				outSample[880] = outSample[878];
				outSample[880 + 1] = outSample[879];
			}
		}

		len_of_byte = ply_resample.outfreq / 100 * 2 * ply_resample.outchannel;
	}
Exemple #15
0
int RTPMultiplexerSmoother::SmoothFrame(const MediaFrame* frame,DWORD duration)
{
	//Check
	if (!frame || !frame->HasRtpPacketizationInfo())
		//Error
		return Error("Frame do not has packetization info");

	//Get info
	const MediaFrame::RtpPacketizationInfo& info = frame->GetRtpPacketizationInfo();

	DWORD codec = 0;
	BYTE *frameData = NULL;
	DWORD frameSize = 0;

	//Depending on the type
	switch(frame->GetType())
	{
		case MediaFrame::Audio:
		{
			//get audio frame
			AudioFrame * audio = (AudioFrame*)frame;
			//Get codec
			codec = audio->GetCodec();
			//Get data
			frameData = audio->GetData();
			//Get size
			frameSize = audio->GetLength();
		}
			break;
		case MediaFrame::Video:
		{
			//get Video frame
			VideoFrame * video = (VideoFrame*)frame;
			//Get codec
			codec = video->GetCodec();
			//Get data
			frameData = video->GetData();
			//Get size
			frameSize = video->GetLength();
		}
			break;
		default:
			return Error("No smoother for frame");
	}

	DWORD frameLength = 0;
	//Calculate total length
	for (int i=0;i<info.size();i++)
		//Get total length
		frameLength += info[i]->GetTotalLength();

	//Calculate bitrate for frame
	DWORD current = 0;
	
	//For each one
	for (int i=0;i<info.size();i++)
	{
		//Get packet
		MediaFrame::RtpPacketization* rtp = info[i];

		//Create rtp packet
		RTPPacketSched *packet = new RTPPacketSched(frame->GetType(),codec);

		//Make sure it is enought length
		if (rtp->GetPrefixLen()+rtp->GetSize()>packet->GetMaxMediaLength())
			//Error
			continue;
		
		//Get pointer to media data
		BYTE* out = packet->GetMediaData();
		//Copy prefic
		memcpy(out,rtp->GetPrefixData(),rtp->GetPrefixLen());
		//Copy data
		memcpy(out+rtp->GetPrefixLen(),frameData+rtp->GetPos(),rtp->GetSize());
		//Set length
		DWORD len = rtp->GetPrefixLen()+rtp->GetSize();
		//Set length
		packet->SetMediaLength(len);
		switch(packet->GetMedia())
		{
			case MediaFrame::Video:
				//Set timestamp
				packet->SetTimestamp(frame->GetTimeStamp()*90);
				break;
			case MediaFrame::Audio:
				//Set timestamp
				packet->SetTimestamp(frame->GetTimeStamp()*8);
				break;
			default:
				//Set timestamp
				packet->SetTimestamp(frame->GetTimeStamp());
		}
		//Check
		if (i+1==info.size())
			//last
			packet->SetMark(true);
		else
			//No last
			packet->SetMark(false);
		//Calculate partial lenght
		current += len;
		//Calculate sending time offset from first frame
		packet->SetSendingTime(current*duration/frameLength);
		//Append it
		queue.Add(packet);
	}

	return 1;
}
QWORD MP4RtpTrack::Read(Listener *listener)
{
	int last = 0;
	uint8_t* data;
	bool isSyncSample;

	// If it's first packet of a frame
	if (!numHintSamples)
	{
		// Get number of rtp packets for this sample
		if (!MP4ReadRtpHint(mp4, hint, sampleId, &numHintSamples))
		{
			//Print error
			Error("Error reading hintt");
			//Exit
			return MP4_INVALID_TIMESTAMP;
		}

		// Get number of samples for this sample
		frameSamples = MP4GetSampleDuration(mp4, hint, sampleId);

		// Get size of sample
		frameSize = MP4GetSampleSize(mp4, hint, sampleId);

		// Get sample timestamp
		frameTime = MP4GetSampleTime(mp4, hint, sampleId);
		//Convert to miliseconds
		frameTime = MP4ConvertFromTrackTimestamp(mp4, hint, frameTime, 1000);

		// Check if it is H264 and it is a Sync frame
		if (codec==VideoCodec::H264 && MP4GetSampleSync(mp4,track,sampleId))
			// Send SEI info
			SendH263SEI(listener);

		//Get max data lenght
		BYTE *data = NULL;
		DWORD dataLen = 0;
		MP4Timestamp	startTime;
		MP4Duration	duration;
		MP4Duration	renderingOffset;

		//Get values
		data	= frame->GetData();
		dataLen = frame->GetMaxMediaLength();
		
		// Read next rtp packet
		if (!MP4ReadSample(
			mp4,				// MP4FileHandle hFile
			track,				// MP4TrackId hintTrackId
			sampleId,			// MP4SampleId sampleId,
			(u_int8_t **) &data,		// u_int8_t** ppBytes
			(u_int32_t *) &dataLen,		// u_int32_t* pNumBytes
			&startTime,			// MP4Timestamp* pStartTime
			&duration,			// MP4Duration* pDuration
			&renderingOffset,		// MP4Duration* pRenderingOffset
			&isSyncSample			// bool* pIsSyncSample
			))
		{
			Error("Error reading sample");
			//Last
			return MP4_INVALID_TIMESTAMP;
		}

		//Check type
		if (media == MediaFrame::Video)
		{
			//Get video frame
			VideoFrame *video = (VideoFrame*)frame;
			//Set lenght
			video->SetLength(dataLen);
			//Timestamp
			video->SetTimestamp(startTime*90000/timeScale);
			//Set intra
			video->SetIntra(isSyncSample);
		} else {
			//Get Audio frame
			AudioFrame *audio = (AudioFrame*)frame;
			//Set lenght
			audio->SetLength(dataLen);
			//Timestamp
			audio->SetTimestamp(startTime*8000/timeScale);
		}

		//Check listener
		if (listener)
			//Frame callback
			listener->onMediaFrame(*frame);
	}

	// if it's the last
	if (packetIndex + 1 == numHintSamples)
		//Set last mark
		last = 1;
	
	// Set mark bit
	rtp.SetMark(last);

	// Get data pointer
	data = rtp.GetMediaData();
	//Get max data lenght
	DWORD dataLen = rtp.GetMaxMediaLength();

	// Read next rtp packet
	if (!MP4ReadRtpPacket(
				mp4,				// MP4FileHandle hFile
				hint,				// MP4TrackId hintTrackId
				packetIndex++,			// u_int16_t packetIndex
				(u_int8_t **) &data,		// u_int8_t** ppBytes
				(u_int32_t *) &dataLen,		// u_int32_t* pNumBytes
				0,				// u_int32_t ssrc DEFAULT(0)
				0,				// bool includeHeader DEFAULT(true)
				1				// bool includePayload DEFAULT(true)
	))
	{
		//Error
		Error("Error reading packet [%d,%d,%d]\n", hint, track,packetIndex);
		//Exit
		return MP4_INVALID_TIMESTAMP;
	}
		

	//Check
	if (dataLen>rtp.GetMaxMediaLength())
	{
		//Error
		Error("RTP packet too big [%u,%u]\n",dataLen,rtp.GetMaxMediaLength());
		//Exit
		return MP4_INVALID_TIMESTAMP;
	}
	
	//Set lenght
	rtp.SetMediaLength(dataLen);
	// Write frame
	listener->onRTPPacket(rtp);

	// Are we the last packet in a hint?
	if (last)
	{
		// The first hint
		packetIndex = 0;
		// Go for next sample
		sampleId++;
		numHintSamples = 0;
		//Return next frame time
		return GetNextFrameTime();
	}

	// This packet is this one
	return frameTime;
}