Пример #1
0
static void decode(AVCodecContext *dec_ctx, AVFrame *frame, AVPacket *pkt,
                   const char *filename)
{
    char buf[1024];
    int ret;

    ret = avcodec_send_packet(dec_ctx, pkt);
    if (ret < 0) {
        fprintf(stderr, "Error sending a packet for decoding\n");
        exit(1);
    }

    while (ret >= 0) {
        ret = avcodec_receive_frame(dec_ctx, frame);
        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
            return;
        else if (ret < 0) {
            fprintf(stderr, "Error during decoding\n");
            exit(1);
        }

        printf("saving frame %3d\n", dec_ctx->frame_number);
        fflush(stdout);

        /* the picture is allocated by the decoder. no need to
           free it */
        snprintf(buf, sizeof(buf), filename, dec_ctx->frame_number);
        pgm_save(frame->data[0], frame->linesize[0],
                 frame->width, frame->height, buf);
    }
}
Пример #2
0
static int decode_write_frame(const char *outfilename, AVCodecContext *avctx,
                              AVFrame *frame, int *frame_count, AVPacket *pkt, int last)
{
    int len, got_frame;
    char buf[1024];

    len = avcodec_decode_video2(avctx, frame, &got_frame, pkt);
    if (len < 0) {
        fprintf(stderr, "Error while decoding frame %d\n", *frame_count);
        return len;
    }
    if (got_frame) {
        printf("Saving %sframe %3d\n", last ? "last " : "", *frame_count);
        fflush(stdout);

        /* the picture is allocated by the decoder, no need to free it */
        snprintf(buf, sizeof(buf), outfilename, *frame_count);
        pgm_save(frame->data[0], frame->linesize[0],
                 avctx->width, avctx->height, buf);
        (*frame_count)++;
    }
    if (pkt->data) {
        pkt->size -= len;
        pkt->data += len;
    }
    return 0;
}
Пример #3
0
int main(int argc, char ** argv)
{
    av_register_all();
    MediaSource ms;
    AVPacket pkt;
    AVFrame * frame = avcodec_alloc_frame();
   // AVFrame * dst_frame = avcodec_alloc_frame();
    AVPicture picture;
    int index = 1;
    char str[100] = {0};

    ms.open(argv[1], 1);
    VideoScale sc;
    sc.init(ms.getWidth(), ms.getHeight(), ms.getPixFormat(), 352, 288,
            ms.getPixFormat(), 0, NULL, NULL, NULL);

    avpicture_alloc(&picture, ms.getPixFormat(), 352, 288);

    while (ms.read(&pkt) == 0) {
        int ret = ms.decode(frame, &pkt);
        if (ret > 0) {
            if (ms.isVideoPacket(&pkt)) {
                if (index > 30)
                    continue;
                snprintf(str, 100, "%02d.pgm", index++);
                sc.scale(frame->data, frame->linesize, 0, ms.getHeight(),
                         picture.data, picture.linesize);
                pgm_save(picture.data[0], picture.linesize[0], 352, 288, str);
            }

            if (ms.isAudioPacket(&pkt)) {
                /*
                static AudioResample rsp;
                rsp.init(ms.getChannelLayout(), ms.getSampleFormat(), ms.getSampleRate(),
                         ms.getChannelLayout(), ms.getSampleFormat(), ms.getSampleRate());
                         */

            }
        }
    }

    ms.close();

    return 0;
}
Пример #4
0
void video_decode_example(const char *outfilename, const char *filename)
{
    AVCodec *codec;
    AVCodecContext *c= NULL;
    int frame, size, got_picture, len;
    FILE *f;
    AVFrame *picture;
    uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE], *inbuf_ptr;
    char buf[1024];

    /* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
    memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);

    printf("Video decoding\n");

    /* find the mpeg1 video decoder */
    codec = avcodec_find_decoder(CODEC_ID_MPEG1VIDEO);
    if (!codec) {
        fprintf(stderr, "codec not found\n");
        exit(1);
    }

    c= avcodec_alloc_context();
    picture= avcodec_alloc_frame();

    if(codec->capabilities&CODEC_CAP_TRUNCATED)
        c->flags|= CODEC_FLAG_TRUNCATED; /* we dont send complete frames */

    /* for some codecs, such as msmpeg4 and mpeg4, width and height
       MUST be initialized there because these info are not available
       in the bitstream */

    /* open it */
    if (avcodec_open(c, codec) < 0) {
        fprintf(stderr, "could not open codec\n");
        exit(1);
    }
    
    /* the codec gives us the frame size, in samples */

    f = fopen(filename, "rb");
    if (!f) {
        fprintf(stderr, "could not open %s\n", filename);
        exit(1);
    }
    
    frame = 0;
    for(;;) {
        size = fread(inbuf, 1, INBUF_SIZE, f);
        if (size == 0)
            break;

        /* NOTE1: some codecs are stream based (mpegvideo, mpegaudio)
           and this is the only method to use them because you cannot
           know the compressed data size before analysing it. 

           BUT some other codecs (msmpeg4, mpeg4) are inherently frame
           based, so you must call them with all the data for one
           frame exactly. You must also initialize 'width' and
           'height' before initializing them. */

        /* NOTE2: some codecs allow the raw parameters (frame size,
           sample rate) to be changed at any frame. We handle this, so
           you should also take care of it */

        /* here, we use a stream based decoder (mpeg1video), so we
           feed decoder and see if it could decode a frame */
        inbuf_ptr = inbuf;
        while (size > 0) {
            len = avcodec_decode_video(c, picture, &got_picture, 
                                       inbuf_ptr, size);
            if (len < 0) {
                fprintf(stderr, "Error while decoding frame %d\n", frame);
                exit(1);
            }
            if (got_picture) {
                printf("saving frame %3d\n", frame);
                fflush(stdout);

                /* the picture is allocated by the decoder. no need to
                   free it */
                snprintf(buf, sizeof(buf), outfilename, frame);
                pgm_save(picture->data[0], picture->linesize[0], 
                         c->width, c->height, buf);
                frame++;
            }
            size -= len;
            inbuf_ptr += len;
        }
    }

    /* some codecs, such as MPEG, transmit the I and P frame with a
       latency of one frame. You must do the following to have a
       chance to get the last frame of the video */
    len = avcodec_decode_video(c, picture, &got_picture, 
                               NULL, 0);
    if (got_picture) {
        printf("saving last frame %3d\n", frame);
        fflush(stdout);
        
        /* the picture is allocated by the decoder. no need to
           free it */
        snprintf(buf, sizeof(buf), outfilename, frame);
        pgm_save(picture->data[0], picture->linesize[0], 
                 c->width, c->height, buf);
        frame++;
    }
        
    fclose(f);

    avcodec_close(c);
    av_free(c);
    av_free(picture);
    printf("\n");
}
Пример #5
0
static void video_decode_example(const char *outfilename, const char *filename)   
{   
    AVCodec *codec;  
    AVCodecContext *c= NULL;  
    int frame, got_picture, len;   
    FILE*f, *fout;   
    AVFrame *picture;  
    uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];  
    char buf[1024];   
    AVPacket avpkt;  
    AVDictionary *opts;  
    
    av_init_packet(&avpkt);  
    
    /* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */  
    memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);  
    
    printf("Video decoding/n");  
    opts = NULL;  
    //av_dict_set(&opts, "b", "2.5M", 0);  
    /* find the mpeg1 video decoder */  
    codec = avcodec_find_decoder(CODEC_ID_H264);  
    if(!codec) {   
        fprintf(stderr,"codec not found/n");  
        exit(1);  
    }  
    
    c = avcodec_alloc_context3(codec);  
    picture= avcodec_alloc_frame();  
    
    if(codec->capabilities&CODEC_CAP_TRUNCATED)  
        c->flags|= CODEC_FLAG_TRUNCATED;/* we do not send complete frames */  
    
    /* For some codecs, such as msmpeg4 and mpeg4, width and height 
       MUST be initialized there because this information is not 
       available in the bitstream. */  
    
    /* open it */  
    if(avcodec_open2(c, codec, NULL) < 0) {   
        fprintf(stderr,"could not open codec/n");  
        exit(1);  
    }  
    
     
    fout=fopen(outfilename,"wb");  
    /* the codec gives us the frame size, in samples */  
    
    f =fopen(filename,"rb");   
    if(!f) {   
        fprintf(stderr,"could not open %s/n", filename);  
        exit(1);  
    }  
    
    frame = 0;  
    for(;;) {  
        //avpkt.size = fread(inbuf, 1, INBUF_SIZE, f);  
        build_avpkt(&avpkt, f);  
        if(avpkt.size == 0)   
            break;  
    
        /* NOTE1: some codecs are stream based (mpegvideo, mpegaudio) 
           and this is the only method to use them because you cannot 
           know the compressed data size before analysing it. 
   
           BUT some other codecs (msmpeg4, mpeg4) are inherently frame 
           based, so you must call them with all the data for one 
           frame exactly. You must also initialize 'width' and 
           'height' before initializing them. */  
    
        /* NOTE2: some codecs allow the raw parameters (frame size, 
           sample rate) to be changed at any frame. We handle this, so 
           you should also take care of it */  
    
        /* here, we use a stream based decoder (mpeg1video), so we 
           feed decoder and see if it could decode a frame */  
        //avpkt.data = inbuf;  
        while(avpkt.size > 0) {   
            len = avcodec_decode_video2(c, picture, &got_picture, &avpkt);  
            if(len < 0) {   
                fprintf(stderr,"Error while decoding frame %d/n", frame);  
                break;  
                //   exit(1);  
            }  
            if(got_picture) {   
                printf("saving frame %3d/n", frame);  
                fflush(stdout);  
    
                /* the picture is allocated by the decoder. no need to 
                   free it */  
                sprintf(buf, outfilename, frame);  
                pgm_save(picture->data[0], picture->linesize[0],  
                         c->width, c->height, fout);  
                pgm_save(picture->data[1], picture->linesize[1],  
                         c->width/2, c->height/2, fout);  
                pgm_save(picture->data[2], picture->linesize[2],  
                         c->width/2, c->height/2, fout);  
                frame++;  
            }  
            avpkt.size -= len;  
            avpkt.data += len;  
        }  
    }  
    
    /* some codecs, such as MPEG, transmit the I and P frame with a 
       latency of one frame. You must do the following to have a 
       chance to get the last frame of the video */  
    avpkt.data = NULL;  
    avpkt.size = 0;  
    len = avcodec_decode_video2(c, picture, &got_picture, &avpkt);  
    if(got_picture) {   
        printf("saving last frame %3d/n", frame);  
        fflush(stdout);  
    
        /* the picture is allocated by the decoder. no need to 
           free it */  
        sprintf(buf, outfilename, frame);  
        //pgm_save(picture->data[0], picture->linesize[0],  
		//       c->width, c->height, fout);  
		pgm_save(picture->data[0], picture->linesize[0],c->width, c->height, fout);  
		pgm_save(picture->data[1], picture->linesize[1],c->width/2, c->height/2, fout);  
		pgm_save(picture->data[2], picture->linesize[2],c->width/2, c->height/2, fout);  
    
		frame++;  
    }  
    
    fclose(f);  
	fclose(fout);  
    
    avcodec_close(c);  
    av_free(c);  
    av_free(picture);  
    printf("/n");  
}   
Пример #6
0
static int decode_example(const char *filename)
{
    AVFormatContext *fctx = NULL;
    AVCodec *codec;
    AVCodecContext *avctx;
    int video_st = -1;
    int i, got_pic;
    AVFrame *picture, *tmp_picture;
    int size;
    uint8_t *tmp_buf;
    int ret = 0;

    avformat_open_input(&fctx, filename, NULL, NULL);
    if (fctx == NULL)
        return AVERROR(1);

    av_find_stream_info(fctx);

    av_dump_format(fctx, 0, filename, 0);

    for (i = 0; i < fctx->nb_streams; i++) {
        if (fctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            video_st = i;
            break;
        }
    }

    avctx = fctx->streams[video_st]->codec;

    codec = avcodec_find_decoder_by_name("libdm365_h264");
    if (codec == NULL) {
        av_log(avctx, AV_LOG_ERROR, "unsupported codec\n");
        return AVERROR(1);
    }

    if (avcodec_open(avctx, codec) < 0) {
        av_log(avctx, AV_LOG_ERROR, "cannot open codec\n");
        return AVERROR(1);
    }

    picture = avcodec_alloc_frame();
    tmp_picture = avcodec_alloc_frame();

    size = avpicture_get_size(PIX_FMT_YUV420P, avctx->width, avctx->height);
    tmp_buf = av_malloc(size);
    if (tmp_buf == NULL) {
        ret = AVERROR(ENOMEM);
        goto decode_cleanup;
    }
    avpicture_fill((AVPicture *)tmp_picture, tmp_buf,
            PIX_FMT_NV12, avctx->width, avctx->height);

    for (i = 0; i < 10; i++) {
        AVPacket pkt;
        int nb;
        char fname[32];
        int factor = 2;

        if (av_read_frame(fctx, &pkt) < 0)
            break;

        nb = avcodec_decode_video2(avctx, picture, &got_pic, &pkt);
        if (nb < 0) {
            av_log(avctx, AV_LOG_ERROR, "error in decoding\n");
            goto decode_cleanup;
        }
        printf("Decoded frame: %d\n", i);

        my_scale((AVPicture *) picture, avctx->width, avctx->height,
                (AVPicture *) tmp_picture, factor);

        sprintf(fname, "frame%02d.pgm", i+1);
        pgm_save(picture->data[0], picture->linesize[0],
                avctx->width, avctx->height, fname);

        sprintf(fname, "frame%02d.bmp", i+1);
        save_image((AVPicture *)tmp_picture, avctx->pix_fmt,
                avctx->width/factor, avctx->height/factor, fname);
    }

decode_cleanup:
    av_free(picture);
    av_free(tmp_picture->data[0]);
    av_free(tmp_picture);
    av_close_input_file(fctx);
    avcodec_close(avctx);
    return ret;
}
Пример #7
0
int main(int argc, char **argv)
{
	const char *outfilename = "outrec.txt";
	const char *outrecfilename = "outrec.yuv";
	const char *filename = "test.264";
	extern AVCodec h264_decoder;
	AVCodec *codec = &h264_decoder;
	AVCodecContext *c= NULL;
	int frame, size, got_picture, len;
	FILE *fin, *fout;
	AVFrame *picture;
	uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE], *inbuf_ptr;
	char buf[1024]; 
	DSPContext dsp;
	unsigned int tbegin;



	/* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
	memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);


	/* find the mpeg1 video decoder */
	avcodec_init();
	c= avcodec_alloc_context();
	picture= avcodec_alloc_frame();
	//	 dsputil_init(&dsp, c);

	if(codec->capabilities&CODEC_CAP_TRUNCATED)
		c->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */

	/* For some codecs, such as msmpeg4 and mpeg4, width and height
	   MUST be initialized there because this information is not
	   available in the bitstream. */

	/* open it */


	if (avcodec_open(c, codec) < 0) {
		fprintf(stderr, "could not open codec\n");
		exit(1);
	}
	{

		H264Context *h = c->priv_data;
		MpegEncContext *s = &h->s;
		s->dsp.idct_permutation_type =1;
		dsputil_init(&s->dsp, c);
	}
	/* the codec gives us the frame size, in samples */

	if(argc == 2) filename = argv[1];
	fin = fopen(filename, "rb");
	if (!fin) {
		fprintf(stderr, "could not open %s\n", filename);
		exit(1);
	}
	fout = fopen(outfilename, "wb");
	if (!fin) {
		fprintf(stderr, "could not open %s\n", outfilename);
		exit(1);
	}
	fclose(fout);

	fout = fopen(outrecfilename, "wb");
	if (!fin) {
		fprintf(stderr, "could not open %s\n", outrecfilename);
		exit(1);
	}
	fclose(fout);

	printf("Video decoding...\n");

	frame = 0;
	for(;;) {
		size = fread(inbuf, 1, INBUF_SIZE, fin);
		if (size == 0)
			break;

		/* NOTE1: some codecs are stream based (mpegvideo, mpegaudio)
		   and this is the only method to use them because you cannot
		   know the compressed data size before analysing it.

		   BUT some other codecs (msmpeg4, mpeg4) are inherently frame
		   based, so you must call them with all the data for one
		   frame exactly. You must also initialize 'width' and
		   'height' before initializing them. */

		/* NOTE2: some codecs allow the raw parameters (frame size,
		   sample rate) to be changed at any frame. We handle this, so
		   you should also take care of it */

		/* here, we use a stream based decoder (mpeg1video), so we
		   feed decoder and see if it could decode a frame */
		inbuf_ptr = inbuf;
		while (size > 0) {
			len = avcodec_decode_video(c, picture, &got_picture,
					inbuf_ptr, size);
			if (len < 0) {
				fprintf(stderr, "Error while decoding frame %d\n", frame);
				//exit(1);
			}
			if (got_picture) {
				//printf("saving frame %3d\n", frame);
				fflush(stdout);

				/* the picture is allocated by the decoder. no need to
				   free it */
				//  snprintf(buf, sizeof(buf), outfilename, frame);
#if 0	//save file
				pgm_save(picture->data[0], picture->linesize[0],
						c->width, c->height, outfilename, outrecfilename);
				pgm_save(picture->data[1], picture->linesize[1],
						c->width/2, c->height/2, outfilename, outrecfilename);
				pgm_save(picture->data[2], picture->linesize[2],
						c->width/2, c->height/2, outfilename, outrecfilename);
#endif
				frame++;
			}
			size -= len;
			inbuf_ptr += len;
		}
	}

	printf("%0.3f elapsed\n", (double)clock()/1000);
	/* some codecs, such as MPEG, transmit the I and P frame with a
	   latency of one frame. You must do the following to have a
	   chance to get the last frame of the video */
#define NOTFOR264
#ifdef NOTFOR264

	//    len = avcodec_decode_video(c, picture, &got_picture,
	//                               NULL, 0);
	len = avcodec_decode_video(c, picture, &got_picture,
			inbuf_ptr, 0);
	if (got_picture) {
		printf("saving last frame %3d\n", frame);
		fflush(stdout);

		/* the picture is allocated by the decoder. no need to
		   free it */
		//    snprintf(buf, sizeof(buf), outfilename, frame);
		pgm_save(picture->data[0], picture->linesize[0],
				c->width, c->height, outfilename, outrecfilename);
		pgm_save(picture->data[1], picture->linesize[1],
				c->width/2, c->height/2, outfilename, outrecfilename);
		pgm_save(picture->data[2], picture->linesize[2],
				c->width/2, c->height/2, outfilename, outrecfilename);
		frame++;
	}
#endif

	fclose(fin);
	//	 fclose(fout);

	avcodec_close(c);
	av_free(c);
	av_free(picture);
	printf("\n");
}