Exemplo n.º 1
0
//TODO: seek by byte
void AVDemuxer::seek(qreal q)
{
    if ((!a_codec_context && !v_codec_context) || !format_context) {
        qWarning("can not seek. context not ready: %p %p %p", a_codec_context, v_codec_context, format_context);
        return;
    }
    if (seek_timer.isValid()) {
        if (seek_timer.elapsed() < kSeekInterval)
            return;
        seek_timer.restart();
    } else {
        seek_timer.start();
    }
    QMutexLocker lock(&mutex);
    Q_UNUSED(lock);
    q = qMax<qreal>(0.0, q);
    if (q >= 1.0) {
        qWarning("Invalid seek position %f/1.0", q);
        return;
    }
#if 0
    //t: unit is s
    qreal t = q;// * (double)format_context->duration; //
    int ret = av_seek_frame(format_context, -1, (int64_t)(t*AV_TIME_BASE), t > pkt->pts ? 0 : AVSEEK_FLAG_BACKWARD);
    qDebug("[AVDemuxer] seek to %f %f %lld / %lld", q, pkt->pts, (int64_t)(t*AV_TIME_BASE), duration());
#else
    //t: unit is us (10^-6 s, AV_TIME_BASE)
    int64_t t = int64_t(q*duration());///AV_TIME_BASE;
    //TODO: pkt->pts may be 0, compute manually. Check wether exceed the length
    if (t >= duration()) {
        qWarning("Invailid seek position: %lld/%lld", t, duration());
        return;
    }
    bool backward = t <= (int64_t)(pkt->pts*AV_TIME_BASE);
    qDebug("[AVDemuxer] seek to %f %f %lld / %lld backward=%d", q, pkt->pts, t, duration(), backward);
	//AVSEEK_FLAG_BACKWARD has no effect? because we know the timestamp
	int seek_flag =  (backward ? 0 : AVSEEK_FLAG_BACKWARD); //AVSEEK_FLAG_ANY
	int ret = av_seek_frame(format_context, -1, t, seek_flag);
#endif
    if (ret < 0) {
        qWarning("[AVDemuxer] seek error: %s", av_err2str(ret));
        return;
    }
    //replay
    if (q == 0) {
        qDebug("************seek to 0. started = false");
        started_ = false;
        v_codec_context->frame_number = 0; //TODO: why frame_number not changed after seek?
    }
    if (master_clock) {
        master_clock->updateValue(qreal(t)/qreal(AV_TIME_BASE));
        master_clock->updateExternalClock(t/1000LL); //in msec. ignore usec part using t/1000
    }
    //calc pts
    //use AVThread::flush() when reaching end
    //if (videoCodecContext())
    //    avcodec_flush_buffers(videoCodecContext());
    //if (audioCodecContext())
    //    avcodec_flush_buffers(audioCodecContext());
}
Exemplo n.º 2
0
void FeMedia::stop()
{
	if ( m_audio )
	{
		sf::SoundStream::stop();
		m_audio->stop();

		av_seek_frame( m_imp->m_format_ctx, m_audio->stream_id, 0,
							AVSEEK_FLAG_BACKWARD );

		avcodec_flush_buffers( m_audio->codec_ctx );
	}

	if ( m_video )
	{
		m_video->stop();

		av_seek_frame( m_imp->m_format_ctx, m_video->stream_id, 0,
							AVSEEK_FLAG_BACKWARD );

		avcodec_flush_buffers( m_video->codec_ctx );
	}

	m_imp->m_read_eof = false;
}
Exemplo n.º 3
0
AVFrame* get_frame_by_second(AVCodecContext* codec_ctx, AVFormatContext *format_ctx,
                                  int video_stream, int64_t second) 
{
  AVFrame* frame = avcodec_alloc_frame();
  AVPacket packet;
  int frame_end = 0;
  int rc = 0;

  if ((rc = av_seek_frame(format_ctx, -1, second , 0)) < 0) 
  {
    LOG_ERROR("Seek on invalid time");
    return frame;
  }
  while (!frame_end && (av_read_frame(format_ctx, &packet) >= 0)) 
  {
    if (packet.stream_index == video_stream) 
    {
      avcodec_decode_video2(codec_ctx, frame, &frame_end, &packet);
    }
    av_free_packet(&packet);
  }
  av_seek_frame(format_ctx, video_stream, 0, AVSEEK_FLAG_BYTE);
  avcodec_flush_buffers(codec_ctx);  

  return frame;
}
Exemplo n.º 4
0
static void demux_seek_lavf(demuxer_t *demuxer, float rel_seek_secs, float audio_delay, int flags){
    lavf_priv_t *priv = demuxer->priv;
    int avsflags = 0;
    mp_msg(MSGT_DEMUX,MSGL_DBG2,"demux_seek_lavf(%p, %f, %f, %d)\n", demuxer, rel_seek_secs, audio_delay, flags);

    if (flags & SEEK_ABSOLUTE) {
      priv->last_pts = priv->avfc->start_time != AV_NOPTS_VALUE ?
                       priv->avfc->start_time : 0;
    }
    // This is important also for SEEK_ABSOLUTE because seeking
    // is done by dts, while start_time is relative to pts and thus
    // usually too large.
    if (rel_seek_secs <= 0) avsflags = AVSEEK_FLAG_BACKWARD;
    if (flags & SEEK_FACTOR) {
      if (priv->avfc->duration == 0 || priv->avfc->duration == AV_NOPTS_VALUE)
        return;
      priv->last_pts += rel_seek_secs * priv->avfc->duration;
    } else {
      priv->last_pts += rel_seek_secs * AV_TIME_BASE;
    }
    if (av_seek_frame(priv->avfc, -1, priv->last_pts, avsflags) < 0) {
        avsflags ^= AVSEEK_FLAG_BACKWARD;
        av_seek_frame(priv->avfc, -1, priv->last_pts, avsflags);
    }
}
Exemplo n.º 5
0
bool FFLAVFVideo::SeekTo(int n, int SeekOffset) {
	if (SeekMode >= 0) {
		int TargetFrame = n + SeekOffset;
		if (TargetFrame < 0)
			throw FFMS_Exception(FFMS_ERROR_SEEKING, FFMS_ERROR_UNKNOWN,
			"Frame accurate seeking is not possible in this file");

		if (SeekMode < 3)
			TargetFrame = Frames.FindClosestVideoKeyFrame(TargetFrame);

		if (SeekMode == 0) {
			if (n < CurrentFrame) {
				av_seek_frame(FormatContext, VideoTrack, Frames[0].PTS, AVSEEK_FLAG_BACKWARD);
				FlushBuffers(CodecContext);
				CurrentFrame = 0;
				DelayCounter = 0;
				InitialDecode = 1;
			}
		} else {
			// 10 frames is used as a margin to prevent excessive seeking since the predicted best keyframe isn't always selected by avformat
			if (n < CurrentFrame || TargetFrame > CurrentFrame + 10 || (SeekMode == 3 && n > CurrentFrame + 10)) {
				av_seek_frame(FormatContext, VideoTrack, Frames[TargetFrame].PTS, AVSEEK_FLAG_BACKWARD);
				FlushBuffers(CodecContext);
				DelayCounter = 0;
				InitialDecode = 1;
				return true;
			}
		}
	} else if (n < CurrentFrame) {
		throw FFMS_Exception(FFMS_ERROR_SEEKING, FFMS_ERROR_INVALID_ARGUMENT,
			"Non-linear access attempted");
	}
	return false;
}
Exemplo n.º 6
0
void FFMpegDemuxer::seek(int DestFrame, int StartTimestamp, int StreamIndex)
{
    AVStream * pVStream = m_pFormatContext->streams[StreamIndex];
#if LIBAVFORMAT_BUILD <= 4616
    av_seek_frame(m_pFormatContext, StreamIndex, 
            int((double(DestFrame)*1000000*1000)/pVStream->r_frame_rate+StartTimestamp));
#else
#if LIBAVFORMAT_BUILD < ((49<<16)+(0<<8)+0)
    av_seek_frame(m_pFormatContext, StreamIndex, 
            int((double(DestFrame)*1000000*1000)/pVStream->r_frame_rate+StartTimestamp), 0);
#else
    double framerate = (pVStream->r_frame_rate.num)/pVStream->r_frame_rate.den;
    double FrameStartOffset = framerate*StartTimestamp/1000.0;
    av_seek_frame(m_pFormatContext, -1, 
            int((double(DestFrame+FrameStartOffset)*AV_TIME_BASE)/framerate),
            AVSEEK_FLAG_BACKWARD);
#endif
#endif
    clearPacketCache();
    map<int, PacketList>::iterator it;
    for (it=m_PacketLists.begin(); it != m_PacketLists.end(); ++it) {
        int CurStreamIndex = it->first;
        AVStream * pStream = m_pFormatContext->streams[CurStreamIndex];
        avcodec_flush_buffers(pStream->codec);
    }
}
Exemplo n.º 7
0
int determine_header_offset(ff_global_ptr storage) {
    AVFormatContext *formatContext;
    AVPacket packet;
    int result, i;

    formatContext = storage->format_context;
    result = noErr;
    storage->header_offset = 0;

    /* Seek backwards to get a manually read packet for file offset */
    if(formatContext->streams[0]->index_entries == NULL || storage->componentType == 'FLV ')
    {
        storage->header_offset = 0;
    }
    else
    {
        int streamsRead = 0;
        AVStream *st;

        result = av_seek_frame(formatContext, -1, 0, AVSEEK_FLAG_ANY);
        if(result < 0) goto bail;

        result = av_read_frame(formatContext, &packet);
        if(result < 0) goto bail;
        st = formatContext->streams[packet.stream_index];

        /* read_packet will give the first decodable packet. However, that isn't necessarily
        	the first entry in the index, so look for an entry with a matching size. */
        for (i = 0; i < st->nb_index_entries; i++) {
            if (packet.dts == st->index_entries[i].timestamp) {
                storage->header_offset = packet.pos - st->index_entries[i].pos;
                break;
            }
        }
        while(streamsRead < formatContext->nb_streams)
        {
            int streamIndex = packet.stream_index;
            if(storage->firstFrames[streamIndex].size == 0)
            {
                memcpy(storage->firstFrames + streamIndex, &packet, sizeof(AVPacket));
                streamsRead++;
                if(streamsRead == formatContext->nb_streams)
                    break;
            }
            else
                av_free_packet(&packet);
            int status = formatContext->iformat->read_packet(formatContext, &packet);
            if(status < 0)
                break;
        }

        // seek back to the beginning, otherwise av_read_frame-based decoding will skip a few packets.
        av_seek_frame(formatContext, -1, 0, AVSEEK_FLAG_ANY | AVSEEK_FLAG_BACKWARD);
    }

bail:
    return result;
}
Exemplo n.º 8
0
void FFMpegDecoder::seek(float percent) 
{
  if (m_audio_stream && m_audio_stream->duration > 1) {
    av_seek_frame(m_format, m_audio_stream->index, (int64_t)(percent * m_audio_stream->duration), 0);
  }
  else {
    av_seek_frame(m_format, m_audio_stream->index, (int64_t)(percent * m_file_size), AVSEEK_FLAG_BYTE);
  }
}
Exemplo n.º 9
0
int try_decode_video_frame
(
    lwlibav_decode_handler_t *dhp,
    uint32_t                  frame_number,
    int64_t                   rap_pos,
    char                     *error_string
)
{
    AVFrame *picture = av_frame_alloc();
    if( !picture )
    {
        strcpy( error_string, "Failed to alloc AVFrame to set up a decoder configuration.\n" );
        return -1;
    }
    lwlibav_video_decode_handler_t *vdhp = (lwlibav_video_decode_handler_t *)dhp;
    AVFormatContext *format_ctx   = vdhp->format;
    int              stream_index = vdhp->stream_index;
    AVCodecContext  *ctx          = format_ctx->streams[stream_index]->codec;
    ctx->refcounted_frames = 1;
    if( av_seek_frame( format_ctx, stream_index, rap_pos, vdhp->av_seek_flags ) < 0 )
        av_seek_frame( format_ctx, stream_index, rap_pos, vdhp->av_seek_flags | AVSEEK_FLAG_ANY );
    do
    {
        if( frame_number > vdhp->frame_count )
            break;
        /* Get a frame. */
        AVPacket pkt = { 0 };
        int extradata_index = vdhp->frame_list[frame_number].extradata_index;
        if( extradata_index != vdhp->exh.current_index )
            break;
        int ret = lwlibav_get_av_frame( format_ctx, stream_index, frame_number, &pkt );
        if( ret > 0 )
            break;
        else if( ret < 0 )
        {
            if( ctx->pix_fmt == AV_PIX_FMT_NONE )
                strcpy( error_string, "Failed to set up pixel format.\n" );
            else
                strcpy( error_string, "Failed to set up resolution.\n" );
            av_frame_free( &picture );
            return -1;
        }
        /* Try decode a frame. */
        av_frame_unref( picture );
        int dummy;
        avcodec_decode_video2( ctx, picture, &dummy, &pkt );
        ++frame_number;
    } while( ctx->width == 0 || ctx->height == 0 || ctx->pix_fmt == AV_PIX_FMT_NONE );
    av_frame_free( &picture );
    return 0;
}
Exemplo n.º 10
0
bool K3bFFMpegFile::seek( const K3b::Msf& msf )
{
  d->outputBufferSize = 0;
  d->packetSize = 0;

  double seconds = (double)msf.totalFrames()/75.0;
  Q_UINT64 timestamp = (Q_UINT64)(seconds * (double)AV_TIME_BASE);

  // FIXME: do we really need the start_time and why?
#if LIBAVFORMAT_BUILD >= 4619
  return ( av_seek_frame( d->formatContext, -1, timestamp + d->formatContext->start_time, 0 ) >= 0 );
#else
  return ( av_seek_frame( d->formatContext, -1, timestamp + d->formatContext->start_time ) >= 0 );
#endif
}
Exemplo n.º 11
0
void FFLAVFAudio::Seek() {
    size_t TargetPacket = GetSeekablePacketNumber(Frames, PacketNumber);
    LastValidTS = AV_NOPTS_VALUE;

    int Flags = Frames.HasTS ? AVSEEK_FLAG_BACKWARD : AVSEEK_FLAG_BACKWARD | AVSEEK_FLAG_BYTE;

    if (av_seek_frame(FormatContext, TrackNumber, FrameTS(TargetPacket), Flags) < 0)
        av_seek_frame(FormatContext, TrackNumber, FrameTS(TargetPacket), Flags | AVSEEK_FLAG_ANY);

    if (TargetPacket != PacketNumber) {
        // Decode until the PTS changes so we know where we are
        int64_t LastPTS = FrameTS(PacketNumber);
        while (LastPTS == FrameTS(PacketNumber)) DecodeNextBlock();
    }
}
Exemplo n.º 12
0
// TODO: seeking back may go beyond edge, same with forward (it might never find a keyframe)
bool SimpleVideo::seekToFrame( int iFrameNumber, AVFrame* pOutput )
{
	if( ( iFrameNumber < 0 ) || ( iFrameNumber >= getNumFrames() ) )
	{
		return false;
	}

	bool bSucceeded = ( av_seek_frame( m_pFormatContext, m_iVideoStreamIndex, iFrameNumber, AVSEEK_FLAG_ANY ) >= 0 );
	if( bSucceeded )
	{
		bSucceeded = getNextFrameRaw();
		if( bSucceeded )
		{
			if( m_pFrame->key_frame == 1 )
			{
				convertFrameToRGB( pOutput );
				return true;
			}
			else
			{
				// seek backwards until I see a keyframe
				int currentFrameNumber = iFrameNumber - 1;
				av_seek_frame( m_pFormatContext, m_iVideoStreamIndex, currentFrameNumber, AVSEEK_FLAG_ANY );
				getNextFrameRaw();
				while( m_pFrame->key_frame == 0 )
				{
					--currentFrameNumber;
					av_seek_frame( m_pFormatContext, m_iVideoStreamIndex, currentFrameNumber, AVSEEK_FLAG_ANY );
					getNextFrameRaw();
				}

				// then read forward until I get back to my frame number
				++currentFrameNumber;
				getNextFrameRaw();
				while( currentFrameNumber < iFrameNumber )
				{
					++currentFrameNumber;
					getNextFrameRaw();
				}

				convertFrameToRGB( pOutput );
				return true;
			}
		}
	}

	return NULL;
}
Exemplo n.º 13
0
//Seek function
int CALL_CONVT ac_seek(lp_ac_decoder pDecoder, int dir, int64_t target_pos) {
  AVRational timebase = 
    ((lp_ac_data)pDecoder->pacInstance)->pFormatCtx->streams[pDecoder->stream_index]->time_base;
  
  int flags = dir < 0 ? AVSEEK_FLAG_BACKWARD : 0;    
  
  int64_t pos = av_rescale(target_pos, AV_TIME_BASE, 1000);
  
  ((lp_ac_decoder_data)pDecoder)->sought = 100;
  pDecoder->timecode = target_pos / 1000;
  
  if (av_seek_frame(((lp_ac_data)pDecoder->pacInstance)->pFormatCtx, pDecoder->stream_index, 
      av_rescale_q(pos, AV_TIME_BASE_Q, timebase), flags) >= 0) {
	
	if (pDecoder->type == AC_DECODER_TYPE_AUDIO)
	{
		if (((lp_ac_audio_decoder)pDecoder)->pCodecCtx->codec->flush != NULL)
			avcodec_flush_buffers(((lp_ac_audio_decoder)pDecoder)->pCodecCtx);
	
		av_free(((lp_ac_audio_decoder)pDecoder)->tmp_data);
		((lp_ac_audio_decoder)pDecoder)->tmp_data_length = 0;
	}
    return 1;
  }
  
  return 0;  
}
Exemplo n.º 14
0
int ffmpeg_seek(struct ffmpeg_file *file, int pos, int rel)
{
    if (!rel && pos < 0) {
        return -1;
    }

    AVStream *s = file->format->streams[file->stream];
    int64_t pos_pts = pos / av_q2d(s->time_base);
    int pts;
    if (rel) {
        pts = file->time + pos_pts;
    } else {
        pts = s->start_time + pos_pts;
    }
    int e = av_seek_frame(file->format, file->stream, pts,
            AVSEEK_FLAG_ANY | AVSEEK_FLAG_BACKWARD);
    if (e < 0) {
        return e;
    }

    avcodec_flush_buffers(file->codec);
    file->time = pts;
    file->buf_len = 0;
    file->buf_offset = 0;
    ffmpeg_reset_pkt(file);

    return 0;
}
Exemplo n.º 15
0
static void *read_thread(void *data)
{
	struct vidsrc_st *st = data;

	while (st->run) {
		AVPacket pkt;

		av_init_packet(&pkt);

		if (av_read_frame(st->ic, &pkt) < 0) {
			sys_msleep(1000);
			av_seek_frame(st->ic, -1, 0, 0);
			continue;
		}

		if (pkt.stream_index != st->sindex)
			goto out;

		handle_packet(st, &pkt);

		/* simulate framerate */
		sys_msleep(1000/st->fps);

	out:
#if LIBAVCODEC_VERSION_INT >= ((57<<16)+(12<<8)+100)
		av_packet_unref(&pkt);
#else
		av_free_packet(&pkt);
#endif
	}

	return NULL;
}
Exemplo n.º 16
0
STDMETHODIMP CBDDemuxer::Seek(REFERENCE_TIME rTime)
{
  int64_t prev = bd_tell(m_pBD);

  int64_t target = bd_find_seek_point(m_pBD, ConvertDSTimeTo90Khz(rTime));
  m_EndOfStreamPacketFlushProtection = FALSE;

  DbgLog((LOG_TRACE, 1, "Seek Request: %I64u (time); %I64u (byte), %I64u (prev byte)", rTime, target, prev));
  HRESULT hr = m_lavfDemuxer->SeekByte(target + 4, AVSEEK_FLAG_BACKWARD);

  if (m_MVCPlayback && m_MVCFormatContext) {
    // Re-open to switch clip if needed
    CloseMVCExtensionDemuxer();
    if (FAILED(OpenMVCExtensionDemuxer(m_NewClip)))
      return E_FAIL;

    // Adjust for clip offset
    int64_t seek_pts = 0;
    if (rTime > 0) {
      AVStream *stream = m_MVCFormatContext->streams[m_MVCStreamIndex];

      rTime -= m_rtNewOffset;
      rTime -= 10000000; // seek one second before the target to ensure the MVC queue isn't out of sync for too long
      seek_pts = m_lavfDemuxer->ConvertRTToTimestamp(rTime, stream->time_base.num, stream->time_base.den);
    }

    if (seek_pts < 0)
      seek_pts = 0;

    av_seek_frame(m_MVCFormatContext, m_MVCStreamIndex, seek_pts, AVSEEK_FLAG_BACKWARD);
  }
  return hr;
}
Exemplo n.º 17
0
int
xrdpvr_seek_media(int64_t pos, int backward)
{
    int64_t seek_target;
    int     seek_flag;

    printf("xrdpvr_seek_media() entered\n");

    g_psi.audioTimeout = -1;
    g_psi.videoTimeout = -1;

    seek_flag = (backward) ? AVSEEK_FLAG_BACKWARD : 0;

    seek_target = av_rescale_q(pos * AV_TIME_BASE,
                               AV_TIME_BASE_Q,
                               g_psi.p_format_ctx->streams[g_video_index]->time_base);


    if(av_seek_frame(g_psi.p_format_ctx, g_video_index, seek_target, seek_flag) < 0)
    {
        printf("media seek error\n");
        return -1;
    }
    printf("xrdpvr_seek_media: success\n");
    return 0;
}
Exemplo n.º 18
0
Arquivo: avbin.c Projeto: TimSC/AVbin
AVbinResult avbin_seek_file(AVbinFile *file, AVbinTimestamp timestamp)
{
    int i;
    AVCodecContext *codec_context;
    int flags = 0;

    /*if (!timestamp)
    {
        flags = AVSEEK_FLAG_ANY | AVSEEK_FLAG_BYTE | AVSEEK_FLAG_BACKWARD;
        if (av_seek_frame(file->context, -1, 0, flags) < 0)
            return AVBIN_RESULT_ERROR;
    }
    else
    {*/
        flags = AVSEEK_FLAG_BACKWARD;
        if (av_seek_frame(file->context, -1, timestamp, flags) < 0)
            return AVBIN_RESULT_ERROR;
    //}

    for (i = 0; i < file->context->nb_streams; i++)
    {
        codec_context = file->context->streams[i]->codec;
        if (codec_context && codec_context->codec)
            avcodec_flush_buffers(codec_context);
    }
    return AVBIN_RESULT_OK;
}
Exemplo n.º 19
0
bool ThumbFinder::seekToFrame(int frame, bool checkPos)
{
    // make sure the frame is not in a cut point
    if (checkPos)
        frame = checkFramePosition(frame);

    // seek to a position PRE_SEEK_AMOUNT frames before the required frame
    int64_t timestamp = m_startTime + (frame * m_frameTime) -
                        (PRE_SEEK_AMOUNT * m_frameTime);
    int64_t requiredPTS = m_startPTS + (frame * m_frameTime);

    if (timestamp < m_startTime)
        timestamp = m_startTime;

    if (av_seek_frame(m_inputFC, m_videostream, timestamp, AVSEEK_FLAG_ANY) < 0)
    {
        LOG(VB_GENERAL, LOG_ERR, "ThumbFinder::SeekToFrame: seek failed") ;
        return false;
    }

    avcodec_flush_buffers(m_codecCtx);
    getFrameImage(true, requiredPTS);

    return true;
}
Exemplo n.º 20
0
int video_reader_thread( void *data ) {
    static AVPacket packet;

    if( !format_context || !video_codec_context || !video_buffer )
        return -1;

    reader_running = 1;

    while( !stop ) {
        if( video_queue.frames >= MAX_QUEUE_FRAMES || audio_queue.packets >= MAX_QUEUE_PACKETS ) {
            SDL_Delay( QUEUE_FULL_DELAY );
        }
        else {
            if( av_read_frame( format_context, &packet ) >= 0 ) {
                if( packet.stream_index == video_stream ) {
                    video_decode_video_frame( &packet );
                }
                else if( packet.stream_index == audio_stream && audio_codec_context ) {
                    packet_queue_put( &audio_queue, &packet );
                }
                else {
                    av_free_packet( &packet );
                }
            }
            else {
                av_seek_frame( format_context, -1, 0, AVSEEK_FLAG_BACKWARD|AVSEEK_FLAG_BYTE );
            }
        }
    }

    reader_running = 0;
    return 0;
}
Exemplo n.º 21
0
//跳转到指定时间 
MEDIACODEC_API int SeekToTime(MediaFormatContext_t* ctx,int timesec){
//avcodec_flush_buffers(pFormatCtx->streams[video_stream]->codec);
	
	/*
	int64_t        timestamp = timesec*AV_TIME_BASE;
	//AVStream *st;
	AVRational tbase =  {1, AV_TIME_BASE};
	AVRational tbase2 = { ctx->video.tb_num,ctx->video.tb_den};
	if(1){
		//st= ic->streams[videoStream];
		//timestamp= av_rescale_q(timestamp, tbase, st->time_base);
		//timestamp= av_rescale_q(timestamp, st->time_base,tbase);
		timestamp= av_rescale_q(timestamp, tbase, tbase2);
	}
	int     rt = av_seek_frame(ctx->fc, ctx->video.videostream, timestamp, AVSEEK_FLAG_BACKWARD);
*/


 	if( av_seek_frame(ctx->fc,-1,timesec*AV_TIME_BASE,AVSEEK_FLAG_BACKWARD)<0){
 		return -1;
 	}

	//avcodec_flush_buffers(ctx->streams[video_stream]->codec);
	return 0;
}
Exemplo n.º 22
0
void VideoPlayer::seek(qint64 time)
{
    SDL_LockMutex(mutex);
    if (pFormatCtx != NULL)
    {
        clearQuene();

        int stream_index;
        if  (videoStream >= 0) stream_index = videoStream;
        else if(audioStream >= 0) stream_index = audioStream;

        if (time < pFormatCtx->start_time) time=pFormatCtx->start_time;
        if (time > totaltime) time = totaltime;
        int target;
        target = av_rescale_q(time, AV_TIME_BASE_Q, pFormatCtx->streams[stream_index]->time_base);

        av_seek_frame(pFormatCtx,stream_index,target,AVSEEK_FLAG_FRAME); //AV_TIME_BASE
        currenttime = target;
        if(av_read_frame(pFormatCtx, &nextPacket)>=0)
        {
            if (nextPacket.dts>0)
                currenttime = nextPacket.dts;
            else currenttime = nextPacket.pts - 30;
        }
    }

    SDL_UnlockMutex(mutex);
    emit updateTime(currenttime);
}
Exemplo n.º 23
0
void decode_grab(unsigned char **pImage)
{
	int rtn;
	/* Read frames */
	while ((rtn = av_read_frame(pFormatCtx, &packet)) == 0) {

			/* Is this a packet from the video stream? */
			if (packet.stream_index == videoStream) {
				// Decode video frame
				avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

				// Did we get a video frame?
				if (frameFinished) {
					// Convert the image from its native format to RGB
					sws_scale(sws_ctx, (uint8_t const * const *)pFrame->data,
						  pFrame->linesize, 0, pCodecCtx->height,
						  pFrameRGB->data, pFrameRGB->linesize);

					*pImage = (uint8_t *)(pFrameRGB->data[0]);
					av_free_packet(&packet);
					return;
				}
			}

			// Free the packet that was allocated by av_read_frame
			av_free_packet(&packet);
	}
	
	if(rtn < 0) {
		av_seek_frame(pFormatCtx, -1, 0, AVSEEK_FLAG_ANY);
	}

}
Exemplo n.º 24
0
bool FFmpegTestWindow::setFrame(int frame)
{
	if(!_videoStream)
		return false;
	av_seek_frame(_pFormatContext, _videoStream->index, frame, 0);

	AVPacket packet;
	while(av_read_frame(_pFormatContext, &packet) >= 0) {
		if(packet.stream_index == _videoStream->index) {
			int ok;
			avcodec_decode_video2(_pCodecContext, _pFrame, &ok, &packet);
			if(!ok)
				return false;

			int linesize = _image->width() * 3;
			// Convert the image into YUV format
			if (sws_scale(_pSwsCtx, (const uint8_t * const *) _pFrame->data,
			              _pFrame->linesize, 0, _pCodecContext->height, &_rgb,
			              &linesize) < 0)
				return false;
			return true;
		}
	}
	return false;
}
Exemplo n.º 25
0
static void *read_thread(void *data)
{
	struct vidsrc_st *st = data;

	while (st->run) {
		AVPacket pkt;

		av_init_packet(&pkt);

		if (av_read_frame(st->ic, &pkt) < 0) {
			sys_msleep(1000);
			av_seek_frame(st->ic, -1, 0, 0);
			continue;
		}

		if (pkt.stream_index != st->sindex)
			goto out;

		handle_packet(st, &pkt);

		/* simulate framerate */
		sys_msleep(1000/st->fps);

	out:
		av_free_packet(&pkt);
	}

	return NULL;
}
Exemplo n.º 26
0
int EXPORT ffmpegRead (FFMPEG* ff, short* buf, int buflen) {
//fprintf(stderr, "Read\r\n");
if (!ff || !ff->cctx || !ff->fmtctx) return -1;
EnterCriticalSection(ff->cs);
if (ff->seekRequest) {
int flags = (ff->seekPos<ff->curPos? AVSEEK_FLAG_BACKWARD : 0);
av_seek_frame(ff->fmtctx, -1, ff->seekPos * 1000000LL / (ff->cctx->sample_rate * ff->cctx->channels * sizeof(short)), flags);
avcodec_flush_buffers(ff->codec);
ff->seekRequest = false;
ff->curPos = ff->seekPos*2;
ff->bufferPos = -1;
}
if (ff->bufferPos<0 || ff->bufferPos>=ff->bufferLength) {
if (!ffmpegDecodeAudio(ff)) {
ff->length = ff->curPos;
LeaveCriticalSection(ff->cs);
return 0;
}}
int avail = ff->bufferLength - ff->bufferPos;
if (buflen<avail) avail=buflen;
memcpy(buf, ff->buffer+ff->bufferPos, avail);
ff->bufferPos+=avail;
ff->curPos += avail;
LeaveCriticalSection(ff->cs);
return avail;
}
Exemplo n.º 27
0
void VideoThread::seek(int ms, int flags)
{
	//qDebug() << "VideoThread::seek()";
// 	QMutexLocker locker(&mutex);
	if(m_video_stream < 0)
	{
		m_seekOnStart = ms;
		return;
	}
	
	m_total_runtime = ms;
	m_run_time.start();
	m_frameTimer = ms;
	

	double seconds = (double)ms / 1000.0f;

	int64_t seek_target = (int64_t)(seconds * AV_TIME_BASE);

	seek_target = av_rescale_q(seek_target, m_time_base_rational,
		m_av_format_context->streams[m_video_stream]->time_base);

	av_seek_frame(m_av_format_context, m_video_stream, seek_target, flags);

	avcodec_flush_buffers(m_video_codec_context);
}
Exemplo n.º 28
0
bool FFMpegStream::seek(double target)
{
	AVRational &base = formatContext->streams[targetStream]->time_base;
	int64_t ts = target*base.den/double(base.num);
	avcodec_flush_buffers(codecContext);
	return av_seek_frame(formatContext, targetStream, ts, AVSEEK_FLAG_BACKWARD) >= 0;
}
Exemplo n.º 29
0
long SoundSourceFFmpeg::seek(long filepos)
{
    int ret = 0;
    int hours, mins, secs;
    long fspos, diff;
    AVRational time_base = pFormatCtx->streams[audioStream]->time_base;

    lock();

    fspos = mixxx2ffmpeg(filepos, time_base);
    //  qDebug() << "ffmpeg: seek0.5 " << packet.pos << "ld -- " << packet.duration << " -- " << pFormatCtx->streams[audioStream]->cur_dts << "ld";
    qDebug() << "ffmpeg: seek (ffpos " << fspos << "d) (mixxxpos " << filepos << "d)";

    ret = av_seek_frame(pFormatCtx, audioStream, fspos, AVSEEK_FLAG_BACKWARD /*AVSEEK_FLAG_ANY*/);

    if (ret){
        qDebug() << "ffmpeg: Seek ERROR ret(" << ret << ") filepos(" << filepos << "d) at file"
                 << m_qFilename;
        unlock();
        return 0;
    }

    readInput();
    diff = ffmpeg2mixxx(fspos - pFormatCtx->streams[audioStream]->cur_dts, time_base);
    qDebug() << "ffmpeg: seeked (dts " << pFormatCtx->streams[audioStream]->cur_dts << ") (diff " << diff << ") (diff " << fspos - pFormatCtx->streams[audioStream]->cur_dts << ")";

    bufferOffset = 0; //diff;
    if (bufferOffset > bufferSize) {
        qDebug() << "ffmpeg: ERROR BAD OFFFFFFSET, buffsize: " << bufferSize << " offset: " << bufferOffset;
        bufferOffset = 0;
    }
    unlock();
    return filepos;
}
Exemplo n.º 30
0
void playerseek(void *hplayer, DWORD sec)
{
    if (!hplayer) return;
    PLAYER *player = (PLAYER*)hplayer;

    // start render if paused
    if (player->nPlayerStatus & PS_R_PAUSE) renderstart(player->hCoreRender);

    // render seek start
    player->nPlayerStatus |= PS_D_PAUSE;
    renderseek(player->hCoreRender, sec);

    // wait for packet queue empty
    while (!pktqueue_isempty_a(&(player->PacketQueue))) Sleep(20);
    while (!pktqueue_isempty_v(&(player->PacketQueue))) Sleep(20);

    // seek frame
    av_seek_frame(player->pAVFormatContext, -1, (int64_t)sec * AV_TIME_BASE, 0);
    if (player->iAudioStreamIndex != -1) avcodec_flush_buffers(player->pAudioCodecContext);
    if (player->iVideoStreamIndex != -1) avcodec_flush_buffers(player->pVideoCodecContext);

    // render seek done, -1 means done
    renderseek(player->hCoreRender, -1);
    player->nPlayerStatus &= ~PS_D_PAUSE;

    // wait for video packet queue not empty witch timeout 200ms
    int i = 10; while (i-- && pktqueue_isempty_v(&(player->PacketQueue))) Sleep(20);

    // pause render if needed
    if (player->nPlayerStatus & PS_R_PAUSE) renderpause(player->hCoreRender);
}