bool CvCapture_FFMPEG::grabFrame() { bool valid = false; static bool bFirstTime = true; int got_picture; // First time we're called, set packet.data to NULL to indicate it // doesn't have to be freed if (bFirstTime) { bFirstTime = false; packet.data = NULL; } if( !ic || !video_st ) return false; // free last packet if exist if (packet.data != NULL) { av_free_packet (&packet); } // get the next frame while (!valid && (av_read_frame(ic, &packet) >= 0)) { if( packet.stream_index != video_stream ) { av_free_packet (&packet); continue; } #if LIBAVFORMAT_BUILD > 4628 avcodec_decode_video(video_st->codec, picture, &got_picture, packet.data, packet.size); #else avcodec_decode_video(&video_st->codec, picture, &got_picture, packet.data, packet.size); #endif if (got_picture) { // we have a new picture, so memorize it picture_pts = packet.pts; valid = 1; } } // return if we have a new picture or not return valid; }
static int pass_frame_to_decoder(AVCodecContext * avctx, AVFrame * picture, int inlen, unsigned char * in, int * outlen, char * out) { int bytes_decoded; int got_picture; bytes_decoded = avcodec_decode_video(avctx, picture, &got_picture, in, inlen); if ( bytes_decoded != inlen ) { fprintf(stderr, "codec_ffmpeg: decode: failed to decode whole frame %d / %d\n", bytes_decoded, inlen); return -1; } if ( !got_picture ) { fprintf(stderr, "codec_ffmpeg: decode: failed to get picture\n"); return -1; } frame_to_frame_xlate(avctx, picture, outlen, out); return 0; }
int avhelper_decode_video( void *ptr, uint8_t *data, int len, uint8_t *dst[3] ) { int got_picture = 0; el_decoder_t * e = (el_decoder_t*) ptr; int result = avcodec_decode_video( e->codec_ctx, e->frame, &got_picture, data, len ); if(!got_picture || result <= 0) { avhelper_frame_unref( e->frame ); return 0; } e->input->data[0] = e->frame->data[0]; e->input->data[1] = e->frame->data[1]; e->input->data[2] = e->frame->data[2]; e->input->data[3] = e->frame->data[3]; e->output->data[0] = dst[0]; e->output->data[1] = dst[1]; e->output->data[2] = dst[2]; yuv_convert_any3( e->scaler, e->input, e->frame->linesize, e->output, e->input->format, e->pixfmt ); avhelper_frame_unref( e->frame ); return 1; }
int video_thread(void *arg) { VideoState *is = (VideoState *)arg; AVPacket pkt1, *packet = &pkt1; int len1, frameFinished; AVFrame *pFrame; pFrame = avcodec_alloc_frame(); for(;;) { if(packet_queue_get(&is->videoq, packet, 1) < 0) { // means we quit getting packets break; } // Decode video frame len1 = avcodec_decode_video(is->video_st->codec, pFrame, &frameFinished, packet->data, packet->size); // Did we get a video frame? if(frameFinished) { if(queue_picture(is, pFrame) < 0) { break; } } av_free_packet(packet); } av_free(pFrame); return 0; }
AVFrame *DataSource::decodeVideo(AVCodecContext *codecCtx, AVPacket *packet) { int frameFinished = 0; avcodec_decode_video(codecCtx, m_rawFrame, &frameFinished, packet->data, packet->size); if (frameFinished) { AVFrame *f = avcodec_alloc_frame(); if (f == NULL) { DPRINT("can not alloc frame."); return NULL; } avpicture_alloc((AVPicture *)f, PIX_FMT_RGB24, codecCtx->width, codecCtx->height); sws_scale(this->m_swsCtx, m_rawFrame->data, m_rawFrame->linesize, 0, codecCtx->height, f->data, f->linesize); return f; } return NULL; }
////////////////// // Get next frame bool LAVCVideoProvider::GetNextFrame() { // Read packet AVPacket packet; while (av_read_frame(lavcfile->fctx, &packet)>=0) { // Check if packet is part of video stream if(packet.stream_index == vidStream) { // Decode frame int frameFinished; avcodec_decode_video(codecContext, frame, &frameFinished, packet.data, packet.size); // Success? if(frameFinished) { // Set time lastDecodeTime = packet.dts; // Free packet av_free_packet(&packet); return true; } } } // No more packets return false; }
// gets the next frame in the default color space bool SimpleVideo::getNextFrameRaw() { // frameFinished != 0 ==> frame is finished int frameFinished = 0; bool bSucceeded; AVPacket packet; bSucceeded = ( av_read_frame( m_pFormatContext, &packet ) >= 0 ); // loop while it can still read (bSucceeded) and the frame is NOT done (frameFinished == 0) while( bSucceeded && ( frameFinished == 0 ) ) { // is this a packet from the video stream? if( packet.stream_index == m_iVideoStreamIndex ) { // Decode video frame avcodec_decode_video( m_pCodecContext, m_pFrame, &frameFinished, packet.data, packet.size ); } // Free the packet that was allocated by av_read_frame av_free_packet( &packet ); bSucceeded = ( av_read_frame( m_pFormatContext, &packet ) >= 0 ); }; return( bSucceeded && ( frameFinished != 0 ) ); }
int CFFMPEGLoader::ReadVideoData(unsigned char **buf, UINT *width, UINT *height) { if(iPKTVideoLength<=0) return 0; int frameFinished; int ret = avcodec_decode_video(pVCodecCon, pFrame, &frameFinished, pktLastVideo[0].data, pktLastVideo[0].size); *width=*height=0; if(ret>0) { *width=pVCodecCon->width; *height=pVCodecCon->height; if(buf) *buf=(unsigned char*)pFrame->data[0]; } if(pktLastVideo[0].data) av_free_packet(&pktLastVideo[0]); iPKTVideoLength--; for(UINT i=0; i<iPKTVideoLength; i++) pktLastVideo[i]=pktLastVideo[i+1]; pktLastVideo[iPKTVideoLength].data=NULL; return ret; }
static BOOL tsmf_ffmpeg_decode_video(ITSMFDecoder* decoder, const BYTE *data, UINT32 data_size, UINT32 extensions) { TSMFFFmpegDecoder* mdecoder = (TSMFFFmpegDecoder*) decoder; int decoded; int len; AVFrame *frame; BOOL ret = TRUE; #if LIBAVCODEC_VERSION_MAJOR < 52 || (LIBAVCODEC_VERSION_MAJOR == 52 && LIBAVCODEC_VERSION_MINOR <= 20) len = avcodec_decode_video(mdecoder->codec_context, mdecoder->frame, &decoded, data, data_size); #else { AVPacket pkt; av_init_packet(&pkt); pkt.data = (BYTE *) data; pkt.size = data_size; if (extensions & TSMM_SAMPLE_EXT_CLEANPOINT) pkt.flags |= AV_PKT_FLAG_KEY; len = avcodec_decode_video2(mdecoder->codec_context, mdecoder->frame, &decoded, &pkt); } #endif if (len < 0) { WLog_ERR(TAG, "data_size %d, avcodec_decode_video failed (%d)", data_size, len); ret = FALSE; } else if (!decoded) { WLog_ERR(TAG, "data_size %d, no frame is decoded.", data_size); ret = FALSE; } else { DEBUG_TSMF("linesize[0] %d linesize[1] %d linesize[2] %d linesize[3] %d " "pix_fmt %d width %d height %d", mdecoder->frame->linesize[0], mdecoder->frame->linesize[1], mdecoder->frame->linesize[2], mdecoder->frame->linesize[3], mdecoder->codec_context->pix_fmt, mdecoder->codec_context->width, mdecoder->codec_context->height); mdecoder->decoded_size = avpicture_get_size(mdecoder->codec_context->pix_fmt, mdecoder->codec_context->width, mdecoder->codec_context->height); mdecoder->decoded_data = calloc(1, mdecoder->decoded_size); if (!mdecoder->decoded_data) return FALSE; #if LIBAVCODEC_VERSION_MAJOR < 55 frame = avcodec_alloc_frame(); #else frame = av_frame_alloc(); #endif avpicture_fill((AVPicture*) frame, mdecoder->decoded_data, mdecoder->codec_context->pix_fmt, mdecoder->codec_context->width, mdecoder->codec_context->height); av_picture_copy((AVPicture*) frame, (AVPicture*) mdecoder->frame, mdecoder->codec_context->pix_fmt, mdecoder->codec_context->width, mdecoder->codec_context->height); av_free(frame); } return ret; }
static mblk_t *jpeg2yuv(uint8_t *jpgbuf, int bufsize, MSVideoSize *reqsize){ AVCodecContext av_context; int got_picture=0; AVFrame orig; AVPicture dest; mblk_t *ret; struct SwsContext *sws_ctx; avcodec_get_context_defaults(&av_context); if (avcodec_open(&av_context,avcodec_find_decoder(CODEC_ID_MJPEG))<0){ ms_error("jpeg2yuv: avcodec_open failed"); return NULL; } if (avcodec_decode_video(&av_context,&orig,&got_picture,jpgbuf,bufsize)<0){ ms_error("jpeg2yuv: avcodec_decode_video failed"); avcodec_close(&av_context); return NULL; } ret=allocb(avpicture_get_size(PIX_FMT_YUV420P,reqsize->width,reqsize->height),0); ret->b_wptr=ret->b_datap->db_lim; avpicture_fill(&dest,ret->b_rptr,PIX_FMT_YUV420P,reqsize->width,reqsize->height); sws_ctx=sws_getContext(av_context.width,av_context.height,PIX_FMT_YUV420P, reqsize->width,reqsize->height,PIX_FMT_YUV420P,SWS_FAST_BILINEAR, NULL, NULL, NULL); if (sws_scale(sws_ctx,orig.data,orig.linesize,0,av_context.height,dest.data,dest.linesize)<0){ ms_error("jpeg2yuv: sws_scale() failed."); } sws_freeContext(sws_ctx); avcodec_close(&av_context); return ret; }
int FfmpegCamera::Capture( Image &image ) { static int frameCount = 0; AVPacket packet; int frameComplete = false; while ( !frameComplete && (av_read_frame( mFormatContext, &packet ) >= 0) ) { Debug( 5, "Got packet from stream %d", packet.stream_index ); if ( packet.stream_index == mVideoStreamId ) { if ( avcodec_decode_video( mCodecContext, mRawFrame, &frameComplete, packet.data, packet.size) < 0 ) Fatal( "Unable to decode frame at frame %d", frameCount ); Debug( 3, "Decoded video packet at frame %d", frameCount ); if ( frameComplete ) { Debug( 1, "Got frame %d", frameCount ); #if HAVE_LIBSWSCALE if ( sws_scale( mConvertContext, mRawFrame->data, mRawFrame->linesize, 0, mCodecContext->height, mFrame->data, mFrame->linesize ) < 0 ) Fatal( "Unable to convert raw format %d to RGB at frame %d", mCodecContext->pix_fmt, frameCount ); #else // HAVE_LIBSWSCALE Fatal( "You must compile ffmpeg with the --enable-swscale option to use ffmpeg cameras" ); #endif // HAVE_LIBSWSCALE image.Assign( mCodecContext->width, mCodecContext->height, colours, (unsigned char *)mFrame->data[0] ); frameCount++; } } av_free_packet( &packet ); } return (0); }
int FFMPEG::decode(uint8_t *data, long data_size, VFrame *frame_out) { // NOTE: frame must already have data space allocated got_picture = 0; int length = avcodec_decode_video(context, picture, &got_picture, data, data_size); if (length < 0) { printf("FFMPEG::decode error decoding frame\n"); return 1; } if (! got_picture) { // signal the caller there is no picture yet return FFMPEG_LATENCY; } int result = convert_cmodel((AVPicture *)picture, context->pix_fmt, asset->width, asset->height, frame_out); return result; }
//返回在szNal中已经处理的字节数 int H264DecWrapper::Decode(unsigned char* szNal, int iSize, unsigned char* szOutImage, int& iOutSize, bool& bGetFrame) { int got_picture_ptr = 0; int len = avcodec_decode_video(c, picture, &got_picture_ptr, szNal, iSize); if (len < 0) { fprintf(stderr, "Error while decoding frame %d\n", frame); return -1; } bGetFrame = (got_picture_ptr == 0 ? false : true); if (bGetFrame) { iOutSize = 0; iOutSize += output(picture->data[0], picture->linesize[0],c->width, c->height, szOutImage+iOutSize); iOutSize += output(picture->data[1], picture->linesize[1],c->width/2, c->height/2, szOutImage+iOutSize); iOutSize += output(picture->data[2], picture->linesize[2],c->width/2, c->height/2, szOutImage+iOutSize); } else { bGetFrame = false; } return len; }
/* * Class: com_zhutieju_testservice_H264Android * Method: dalDecoder * Signature: ([BI[B)I */ JNIEXPORT jint JNICALL Java_com_zhutieju_testservice_H264Android_dalDecoder (JNIEnv* env, jobject thiz, jbyteArray in, jint nalLen, jbyteArray out) { int i; int imod; int got_picture=0; jbyte * Buf = (jbyte*)(*env)->GetByteArrayElements(env, in, 0); jbyte * Pixel= (jbyte*)(*env)->GetByteArrayElements(env, out, 0); int consumed_bytes = avcodec_decode_video(c, picture, &got_picture, Buf, nalLen); if(got_picture > 0) { DisplayYUV_16((int*)Pixel, picture->data[0], picture->data[1], picture->data[2], c->width, c->height, picture->linesize[0], picture->linesize[1], iWidth); /* for(i=0; i<c->height; i++) fwrite(picture->data[0] + i * picture->linesize[0], 1, c->width, outf); for(i=0; i<c->height/2; i++) fwrite(picture->data[1] + i * picture->linesize[1], 1, c->width/2, outf); for(i=0; i<c->height/2; i++) fwrite(picture->data[2] + i * picture->linesize[2], 1, c->width/2, outf); // */ } (*env)->ReleaseByteArrayElements(env, in, Buf, 0); (*env)->ReleaseByteArrayElements(env, out, Pixel, 0); return consumed_bytes; }
bool Encoder::getNextFrame(void) { int bytesDecoded; int isFrameFinished; int returnCode; if (avFrame) av_free(avFrame); avFrame = avcodec_alloc_frame(); if (avFrame == NULL) { systemLog->sysLog(ERROR, "cannot allocate a YUV frame. cannot decode frame"); return false; } while (av_read_frame(avFormatContext, &avPacket) >= 0) { frameRead++; // Is this packet a packet from this video stream ? if (avPacket.stream_index == videoStream) { // Decode video frame systemLog->sysLog(NOTICE, "BLAHHHHHH"); avcodec_decode_video(avCodecContext, avFrame, &isFrameFinished, avPacket.data, avPacket.size); if (isFrameFinished) return true; } } return false; }
void VPlayer::decode() { if (av_read_frame(d->pFormatCtx, &d->packet) >= 0) { if (d->packet.stream_index == d->videoStream) { avcodec_decode_video(d->pCodecCtx, d->pFrame, &d->frameFinished, d->packet.data, d->packet.size); if (d->frameFinished) { img_convert((AVPicture *)d->pFrameRGB, PIX_FMT_RGBA32, (AVPicture *)d->pFrame, PIX_FMT_YUV420P, d->pCodecCtx->width, d->pCodecCtx->height); d->currentFrame = new QImage(d->pFrameRGB->data[0], d->pCodecCtx->width, d->pCodecCtx->height, QImage::Format_ARGB32); // d->video->setPixmap(QPixmap::fromImage(*d->currentFrame)); emit frameReady(*d->currentFrame); // delete d->currentFrame; } else { qDebug("Video not ready"); } } } else { emit videoDone(); d->vidtimer->stop(); } av_free_packet(&d->packet); }
JNIEXPORT void JNICALL Java_gestalt_candidates_NativeMovieTextureProducer_tryToReadNewFrame (JNIEnv *env, jobject obj) { if(!_myNewFrameIsReady){ if(av_read_frame(pFormatCtx, &packet)>=0) { /* Is this a packet from the video stream? */ if(packet.stream_index==videoStream) { /* Decode video frame */ avcodec_decode_video(pCodecCtx, pFrame, &frameFinished, packet.data, packet.size); /* Is the frame ready? */ if(frameFinished) { /* Convert the image from its native format to RGB */ img_convert((AVPicture *)pFrameRGB, PIX_FMT_RGB24, (AVPicture*)pFrame, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height); _myNewFrameIsReady = true; } } } else { // Do nothing } } }
void VideoPlayer::execloop() { while(!time2die) { while ( av_read_frame(pFormatCtx, &packet)>=0 && !time2die) { // Is this a packet from the video stream? if(packet.stream_index==videoStream) { // Decode video frame avcodec_decode_video(pCodecCtx, pFrame, &frameFinished, packet.data, packet.size); // Did we get a video frame? if(frameFinished) { SwsContext *img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_RGB32, SWS_BICUBIC, NULL, NULL, NULL); sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize); sws_freeContext(img_convert_ctx); build(); //SDL_Delay(42); } } } // stream finished, seek to beginning } }
static int eiio_video_grab(eiio_video_t *video) { AVPacket packet; int remain = 0; memset(&packet, 0, sizeof(packet)); while (av_read_frame(video->format, &packet) >= 0) { int finish; if (packet.stream_index != video->stream_index) { goto next; } avcodec_decode_video(video->codec, video->frame, &finish, packet.data, packet.size); if (!finish) { goto next; } remain = 1; break; next: if (packet.data == NULL) { av_free_packet(&packet); packet.data = NULL; } } if (packet.data) { av_free_packet(&packet); } return remain; }
/* * TODO: check input/output size */ static int ffdecode(struct viddec_state *st, struct vidframe *frame, bool eof, struct mbuf *src) { int i, got_picture, ret, err; /* assemble packets in "mbuf" */ err = mbuf_write_mem(st->mb, mbuf_buf(src), mbuf_get_left(src)); if (err) return err; if (!eof) return 0; st->mb->pos = 0; if (!st->got_keyframe) { err = EPROTO; goto out; } #if LIBAVCODEC_VERSION_INT <= ((52<<16)+(23<<8)+0) ret = avcodec_decode_video(st->ctx, st->pict, &got_picture, st->mb->buf, (int)mbuf_get_left(st->mb)); #else do { AVPacket avpkt; av_init_packet(&avpkt); avpkt.data = st->mb->buf; avpkt.size = (int)mbuf_get_left(st->mb); ret = avcodec_decode_video2(st->ctx, st->pict, &got_picture, &avpkt); } while (0); #endif if (ret < 0) { err = EBADMSG; goto out; } mbuf_skip_to_end(src); if (got_picture) { for (i=0; i<4; i++) { frame->data[i] = st->pict->data[i]; frame->linesize[i] = st->pict->linesize[i]; } frame->size.w = st->ctx->width; frame->size.h = st->ctx->height; frame->fmt = VID_FMT_YUV420P; } out: if (eof) mbuf_rewind(st->mb); return err; }
static int decode_and_send_const(DNxHDDecodeStreamConnect* connect, const unsigned char* buffer, unsigned int bufferSize) { int finished; /* We know that avcodec_decode_video will not modify the input data, so we can cast buffer to non-const */ avcodec_decode_video(connect->decoder->dec, connect->decoder->decFrame, &finished, (unsigned char*)buffer, bufferSize); if (!finished) { ml_log_error("Failed to decode DNxHD video\n"); return 0; } /* reformat decoded frame */ yuv422_to_yuv422(connect->streamInfo.width, connect->streamInfo.height, 0, connect->decoder->decFrame, connect->sinkBuffer); /* send decoded frame to sink */ if (!msk_receive_stream_frame(connect->sink, connect->sinkStreamId, connect->sinkBuffer, connect->sinkBufferSize)) { ml_log_error("failed to write frame to media sink\n"); return 0; } return 1; }
int IoAVCodec_decodeVideoPacket(IoAVCodec *self, AVCodecContext *c, uint8_t *inbuf, size_t size) { AVFrame *decodeFrame = DATA(self)->decodedFrame; while (size > 0) { int got_picture; size_t len = avcodec_decode_video(c, DATA(self)->decodedFrame, &got_picture, inbuf, size); if (len < 0) { printf("Error while decoding video packet\n"); return -1; } if (got_picture) { IoList_rawAppend_(DATA(self)->frames, IoAVCode_frameSeqForAVFrame_(self, decodeFrame, c->pix_fmt, c->width, c->height)); } size -= len; inbuf += len; } return 0; }
bool CAVInfo::GetNextFrame() { AVPacket packet; int frameFinished; while( av_read_frame(m_fctx, &packet) >=0 ) { // Is this a packet from the video stream? if(packet.stream_index == m_videoStream) { // Decode video frame avcodec_decode_video(m_acctx, m_pFrame, &frameFinished, packet.data, packet.size); // Did we get a video frame? if(frameFinished) { // Convert the image from its native format to RGB img_convert((AVPicture *)m_pFrameRGB, PIX_FMT_RGBA32, (AVPicture*)m_pFrame, m_acctx->pix_fmt, m_acctx->width, m_acctx->height); return true; // Process the video frame (save to disk etc.) //DoSomethingWithTheImage(pFrameRGB); } } // Free the packet that was allocated by av_read_frame av_free_packet(&packet); } return false; }
int FFMpegVideoDecoder::decode(unsigned char* inBuf,int inBufSize,unsigned char* outBuf,int outBufSize) { int gotPic = 0; int ret = avcodec_decode_video(ctx,decodedFrame,&gotPic,inBuf,inBufSize); return ret; }
int Decode(HDEC hDec, const void *inbuf, unsigned int size, VDECOUTPUT *pVFrame) { AVFrame *picture; AVCodecContext *c; struct decoder_handle *pd; int len; pd = (struct decoder_handle*)hDec; if(pd->tag != 'HDEC') return -1; len = avcodec_decode_video(pd->c, pd->picture, &pVFrame->bDisplay, inbuf, size); if(len < 0) return -1; picture = pd->picture; c = pd->c; pVFrame->pY = picture->data[0]; pVFrame->pU = picture->data[1]; pVFrame->pV = picture->data[2]; pVFrame->uYStride = picture->linesize[0]; pVFrame->uUVStride = picture->linesize[1]; pVFrame->width = c->width; pVFrame->height = c->height; pVFrame->interlaced_frame = picture->interlaced_frame; return len; }
static void *video_thread(ALLEGRO_THREAD * t, void *arg) { VideoState *is = (VideoState *) arg; AVPacket pkt1, *packet = &pkt1; int len1, frameFinished; AVFrame *pFrame; double pts; (void)t; pFrame = avcodec_alloc_frame(); for (;;) { if (packet_queue_get(is, &is->videoq, packet, 1) < 0) { // means we quit getting packets break; } if (packet->data == flush_pkt.data) { avcodec_flush_buffers(is->video_st->codec); continue; } pts = 0; // Save global pts to be stored in pFrame FIXME_global_video_pkt_pts = packet->pts; // Decode video frame #ifdef FFMPEG_0_8 len1 = avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished, packet); #else len1 = avcodec_decode_video(is->video_st->codec, pFrame, &frameFinished, packet->data, packet->size); #endif if (packet->dts == NOPTS_VALUE && pFrame->opaque && *(int64_t *) pFrame->opaque != NOPTS_VALUE) { pts = 0;//*(uint64_t *) pFrame->opaque; } else if (packet->dts != NOPTS_VALUE) { pts = packet->dts; } else { pts = 0; } pts *= av_q2d(is->video_st->time_base); // Did we get a video frame? if (frameFinished) { //pts = synchronize_video(is, pFrame, pts); if (queue_picture(is, pFrame, pts) < 0) { break; } } av_free_packet(packet); } av_free(pFrame); return NULL; }
static int tsmf_ffmpeg_decode_video(ITSMFDecoder * decoder, const uint8 * data, uint32 data_size, uint32 extensions) { TSMFFFmpegDecoder * mdecoder = (TSMFFFmpegDecoder *) decoder; int decoded; int len; int ret = 0; AVFrame * frame; #if LIBAVCODEC_VERSION_MAJOR < 52 || (LIBAVCODEC_VERSION_MAJOR == 52 && LIBAVCODEC_VERSION_MINOR <= 20) len = avcodec_decode_video(mdecoder->codec_context, mdecoder->frame, &decoded, data, data_size); #else { AVPacket pkt; av_init_packet(&pkt); pkt.data = (uint8 *) data; pkt.size = data_size; if (extensions & TSMM_SAMPLE_EXT_CLEANPOINT) pkt.flags |= AV_PKT_FLAG_KEY; len = avcodec_decode_video2(mdecoder->codec_context, mdecoder->frame, &decoded, &pkt); } #endif if (len < 0) { LLOGLN(0, ("tsmf_ffmpeg_decode_video: data_size %d, avcodec_decode_video failed (%d)", data_size, len)); ret = 1; } else if (!decoded) { LLOGLN(0, ("tsmf_ffmpeg_decode_video: data_size %d, no frame is decoded.", data_size)); ret = 1; } else { LLOGLN(10, ("tsmf_ffmpeg_decode_video: linesize[0] %d linesize[1] %d linesize[2] %d linesize[3] %d " "pix_fmt %d width %d height %d", mdecoder->frame->linesize[0], mdecoder->frame->linesize[1], mdecoder->frame->linesize[2], mdecoder->frame->linesize[3], mdecoder->codec_context->pix_fmt, mdecoder->codec_context->width, mdecoder->codec_context->height)); mdecoder->decoded_size = avpicture_get_size(mdecoder->codec_context->pix_fmt, mdecoder->codec_context->width, mdecoder->codec_context->height); mdecoder->decoded_data = malloc(mdecoder->decoded_size); frame = avcodec_alloc_frame(); avpicture_fill((AVPicture *) frame, mdecoder->decoded_data, mdecoder->codec_context->pix_fmt, mdecoder->codec_context->width, mdecoder->codec_context->height); av_picture_copy((AVPicture *) frame, (AVPicture *) mdecoder->frame, mdecoder->codec_context->pix_fmt, mdecoder->codec_context->width, mdecoder->codec_context->height); av_free(frame); } return ret; }
static int icvGrabFrameAVI_FFMPEG( CvCaptureAVI_FFMPEG* capture ) { int valid=0; static bool bFirstTime = true; static AVPacket pkt; int got_picture; // First time we're called, set packet.data to NULL to indicate it // doesn't have to be freed if (bFirstTime) { bFirstTime = false; pkt.data = NULL; } if( !capture || !capture->ic || !capture->video_st ) return 0; // free last packet if exist if (pkt.data != NULL) { av_free_packet (&pkt); } // get the next frame while ((0 == valid) && (av_read_frame(capture->ic, &pkt) >= 0)) { if( pkt.stream_index != capture->video_stream ) continue; #if LIBAVFORMAT_BUILD > 4628 avcodec_decode_video(capture->video_st->codec, capture->picture, &got_picture, pkt.data, pkt.size); #else avcodec_decode_video(&capture->video_st->codec, capture->picture, &got_picture, pkt.data, pkt.size); #endif if (got_picture) { // we have a new picture, so memorize it capture->picture_pts = pkt.pts; valid = 1; } } // return if we have a new picture or not return valid; }
static void vdpau_decode(struct media_codec *mc, void *decoder, struct media_queue *mq, struct media_buf *mb, int reqsize) { video_decoder_t *vd = decoder; media_codec_t *cw = mb->mb_cw; AVCodecContext *ctx = cw->codec_ctx; vdpau_codec_t *vc = mc->opaque; media_pipe_t *mp = vd->vd_mp; vdpau_video_surface_t *vvs; int got_pic = 0; AVFrame *frame = vd->vd_frame; if(vd->vd_do_flush) { do { avcodec_decode_video(ctx, frame, &got_pic, NULL, 0); } while(got_pic); vd->vd_do_flush = 0; vd->vd_prevpts = AV_NOPTS_VALUE; vd->vd_nextpts = AV_NOPTS_VALUE; vd->vd_estimated_duration = 0; avcodec_flush_buffers(ctx); vd->vd_compensate_thres = 5; } ctx->skip_frame = mb->mb_skip == 1 ? AVDISCARD_NONREF : AVDISCARD_NONE; if(mb->mb_skip == 2) vd->vd_skip = 1; vc->vc_mb = mb; avcodec_decode_video(ctx, frame, &got_pic, mb->mb_data, mb->mb_size); if(mp->mp_stats) mp_set_mq_meta(mq, cw->codec, cw->codec_ctx); if(!got_pic || mb->mb_skip == 1) return; vd->vd_skip = 0; vvs = frame->opaque; video_deliver_frame(vd, vd->vd_mp, mb, ctx, frame, vvs->vvs_time, vvs->vvs_pts, vvs->vvs_dts, vvs->vvs_duration, vvs->vvs_epoch); }
int Libav::decodeNextFrame(void) throw (AVException*) { int bytes; int frameFinished; if (this->compareRange(frameCounter) > 0) return -1; // loop until we are passed the frame in marker do { // loop until "frameFinished" do { // Find our specified stream do { bytes = av_read_frame(pFormatCtx, &packet); if (bytes < 0) { throw new AVException ("unable to read frame",IO_ERROR); // return bytes; } } while (packet.stream_index != avStream) ; // NSLog (@"Decode frame until framefinished"); int len; // I'm not sure when avcodec_decode_video2 became available, if your version of libav doesn't // recognise avcodec_decode_video2 then change the VERSION MAJOR here // and email [email protected] #if LIBAVCODEC_VERSION_MAJOR < 52 len = avcodec_decode_video(pCodecCtx, pFrame, &frameFinished, packet->data, packet->size); #else len = avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet); #endif if (len < 0) { throw new AVException ("unable to decode frame",FILE_ERROR); // return len; } // NSLog(@"decoded: %d finished: %d",len,frameFinished); } while (!frameFinished); // NSLog (@"Skip until frameCounter(%d) >= in(%d) ",frameCounter,[self getIn]); frameCounter ++; } while (this->compareRange(frameCounter) < 0); this->setInterlaced(pFrame->interlaced_frame); this->setInterlaceTopFieldFirst(pFrame->top_field_first); #if LIBAVCODEC_VERSION_MAJOR < 52 av_freep(&packet); #else av_free_packet(&packet); #endif return bytes; }