JNIEXPORT void JNICALL Java_gestalt_candidates_NativeMovieTextureProducer_tryToReadNewFrame
	(JNIEnv *env, jobject obj)
{
	if(!_myNewFrameIsReady){

		if(av_read_frame(pFormatCtx, &packet)>=0) {
			/* Is this a packet from the video stream? */
			if(packet.stream_index==videoStream) {
				/* Decode video frame */
				avcodec_decode_video(pCodecCtx, pFrame, &frameFinished, 
					packet.data, packet.size);

				/* Is the frame ready? */
				if(frameFinished) {

					/* Convert the image from its native format to RGB */
					img_convert((AVPicture *)pFrameRGB, PIX_FMT_RGB24, 
						(AVPicture*)pFrame, pCodecCtx->pix_fmt, pCodecCtx->width, 
						pCodecCtx->height);

					_myNewFrameIsReady = true;
				}
			}
		}
		else {
			// Do nothing
		}
	}
}
Example #2
0
void VPlayer::decode() {
  if (av_read_frame(d->pFormatCtx, &d->packet) >= 0) {
    if (d->packet.stream_index == d->videoStream) {

      avcodec_decode_video(d->pCodecCtx, d->pFrame, &d->frameFinished,
                           d->packet.data, d->packet.size);

      if (d->frameFinished) {

        img_convert((AVPicture *)d->pFrameRGB, PIX_FMT_RGBA32,
                    (AVPicture *)d->pFrame, PIX_FMT_YUV420P,
                    d->pCodecCtx->width, d->pCodecCtx->height);
        d->currentFrame =
            new QImage(d->pFrameRGB->data[0], d->pCodecCtx->width,
                       d->pCodecCtx->height, QImage::Format_ARGB32);
        // d->video->setPixmap(QPixmap::fromImage(*d->currentFrame));
        emit frameReady(*d->currentFrame);
        //      delete d->currentFrame;
      } else {
        qDebug("Video not ready");
      }
    }
  } else {
    emit videoDone();
    d->vidtimer->stop();
  }

  av_free_packet(&d->packet);
}
Example #3
0
static int write(struct img_pixmap *img, struct img_io *io)
{
	struct img_pixmap fimg;

	img_init(&fimg);
	if(img_copy(&fimg, img) == -1) {
		img_destroy(&fimg);
		return -1;
	}
	if(img_convert(&fimg, IMG_FMT_RGBF) == -1) {
		img_destroy(&fimg);
		return -1;
	}

	if(rgbe_write_header(io, fimg.width, fimg.height, 0) == -1) {
		img_destroy(&fimg);
		return -1;
	}
	if(rgbe_write_pixels_rle(io, fimg.pixels, fimg.width, fimg.height) == -1) {
		img_destroy(&fimg);
		return -1;
	}
	img_destroy(&fimg);
	return 0;
}
Example #4
0
int
pgm_fill(AVPicture *dst, const VideoFrame *frame)
{
    enum PixelFormat        srcfmt;
    AVPicture               src;

    if ((srcfmt = pixelTypeOfVideoFrameType(frame->codec)) == PIX_FMT_NONE)
    {
        VERBOSE(VB_COMMFLAG, QString("pgm_fill unknown codec: %1")
                .arg(frame->codec));
        return -1;
    }

    if (avpicture_fill(&src, frame->buf, srcfmt, frame->width,
                frame->height) < 0)
    {
        VERBOSE(VB_COMMFLAG, "pgm_fill avpicture_fill failed");
        return -1;
    }

    if (img_convert(dst, PIX_FMT_GRAY8, &src, srcfmt, frame->width,
                frame->height))
    {
        VERBOSE(VB_COMMFLAG, "pgm_fill img_convert failed");
        return -1;
    }

    return 0;
}
Example #5
0
int FFmpegDecoderVideo::convert(AVPicture *dst, int dst_pix_fmt, AVPicture *src,
            int src_pix_fmt, int src_width, int src_height)
{
    osg::Timer_t startTick = osg::Timer::instance()->tick();
#ifdef USE_SWSCALE
    if (m_swscale_ctx==0)
    {
        m_swscale_ctx = sws_getContext(src_width, src_height, (PixelFormat) src_pix_fmt,
                                      src_width, src_height, (PixelFormat) dst_pix_fmt,
                                      /*SWS_BILINEAR*/ SWS_BICUBIC, NULL, NULL, NULL);
    }


    OSG_DEBUG<<"Using sws_scale ";

    int result =  sws_scale(m_swscale_ctx,
                            (src->data), (src->linesize), 0, src_height,
                            (dst->data), (dst->linesize));
#else

    OSG_DEBUG<<"Using img_convert ";

    int result = img_convert(dst, dst_pix_fmt, src,
                             src_pix_fmt, src_width, src_height);

#endif
    osg::Timer_t endTick = osg::Timer::instance()->tick();
    OSG_DEBUG<<" time = "<<osg::Timer::instance()->delta_m(startTick,endTick)<<"ms"<<std::endl;

    return result;
}
bool CAVInfo::GetNextFrame()
{
    AVPacket packet;
    int frameFinished;
    
	while( av_read_frame(m_fctx, &packet) >=0 ) {
	    // Is this a packet from the video stream?
	    if(packet.stream_index == m_videoStream) {
	        // Decode video frame
	        avcodec_decode_video(m_acctx, m_pFrame, &frameFinished, 
	            packet.data, packet.size);
	
	        // Did we get a video frame?
	        if(frameFinished) {
	            // Convert the image from its native format to RGB
	            img_convert((AVPicture *)m_pFrameRGB, PIX_FMT_RGBA32, 
	                (AVPicture*)m_pFrame, m_acctx->pix_fmt, m_acctx->width, 
	                m_acctx->height);
				return true;
	            // Process the video frame (save to disk etc.)
	            //DoSomethingWithTheImage(pFrameRGB);
	        }
	    }
	
	    // Free the packet that was allocated by av_read_frame
	    av_free_packet(&packet);
	}
	return false;
}
Example #7
0
static int write(struct img_pixmap *img, struct img_io *io)
{
	int i, nlines = 0;
	struct jpeg_compress_struct cinfo;
	struct jpeg_error_mgr jerr;
	struct dst_mgr dest;
	struct img_pixmap tmpimg;
	unsigned char **scanlines;

	img_init(&tmpimg);

	if(img->fmt != IMG_FMT_RGB24) {
		if(img_copy(&tmpimg, img) == -1) {
			return -1;
		}
		if(img_convert(&tmpimg, IMG_FMT_RGB24) == -1) {
			img_destroy(&tmpimg);
			return -1;
		}
		img = &tmpimg;
	}

	if(!(scanlines = malloc(img->height * sizeof *scanlines))) {
		img_destroy(&tmpimg);
		return -1;
	}
	scanlines[0] = img->pixels;
	for(i=1; i<img->height; i++) {
		scanlines[i] = scanlines[i - 1] + img->width * img->pixelsz;
	}

	cinfo.err = jpeg_std_error(&jerr);	/* XXX */
	jpeg_create_compress(&cinfo);

	dest.pub.init_destination = init_destination;
	dest.pub.empty_output_buffer = empty_output_buffer;
	dest.pub.term_destination = term_destination;
	dest.io = io;
	cinfo.dest = (struct jpeg_destination_mgr*)&dest;

	cinfo.image_width = img->width;
	cinfo.image_height = img->height;
	cinfo.input_components = 3;
	cinfo.in_color_space = JCS_RGB;

	jpeg_set_defaults(&cinfo);

	jpeg_start_compress(&cinfo, 1);
	while(nlines < img->height) {
		int res = jpeg_write_scanlines(&cinfo, scanlines + nlines, img->height - nlines);
		nlines += res;
	}
	jpeg_finish_compress(&cinfo);
	jpeg_destroy_compress(&cinfo);

	free(scanlines);
	img_destroy(&tmpimg);
	return 0;
}
Example #8
0
void SimpleVideo::convertFrameToRGB( AVFrame* pOutput )
{
	// frame is finished
	// Convert the image from its native format to RGB
	img_convert( ( AVPicture* )pOutput, PIX_FMT_RGB24, ( AVPicture* )m_pFrame,
		m_pCodecContext->pix_fmt,
		m_pCodecContext->width, m_pCodecContext->height );
}
Example #9
0
void Encoder::YUVtoRGB(void) {
  // Allocate a RGB Frame
  allocateFrameRGB();

  // convert a YUV Frame into RGB Frame
  img_convert((AVPicture *)avFrameRGB, PIX_FMT_RGB24, (AVPicture *)avFrame, avCodecContext->pix_fmt, avCodecContext->width, avCodecContext->height);

  return;
}
Example #10
0
static const IplImage* icvRetrieveFrameAVI_FFMPEG( CvCaptureAVI_FFMPEG* capture )
{
    if( !capture || !capture->video_st || !capture->picture->data[0] )
    return 0;
#if LIBAVFORMAT_BUILD > 4628
    img_convert( (AVPicture*)&capture->rgb_picture, PIX_FMT_BGR24,
                 (AVPicture*)capture->picture,
                 capture->video_st->codec->pix_fmt,
                 capture->video_st->codec->width,
                 capture->video_st->codec->height );
#else
    img_convert( (AVPicture*)&capture->rgb_picture, PIX_FMT_BGR24,
                 (AVPicture*)capture->picture,
                 capture->video_st->codec.pix_fmt,
                 capture->video_st->codec.width,
                 capture->video_st->codec.height );
#endif
    return &capture->frame;
}
Example #11
0
IplImage* CvCapture_FFMPEG::retrieveFrame()
{
    if( !video_st || !picture->data[0] )
        return 0;


#if LIBAVFORMAT_BUILD > 4628
    img_convert( (AVPicture*)&rgb_picture, PIX_FMT_BGR24,
                 (AVPicture*)picture,
                 video_st->codec->pix_fmt,
                 video_st->codec->width,
                 video_st->codec->height );
#else
    img_convert( (AVPicture*)&rgb_picture, PIX_FMT_BGR24,
                 (AVPicture*)picture,
                 video_st->codec.pix_fmt,
                 video_st->codec.width,
                 video_st->codec.height );
#endif
    return &frame;
}
Example #12
0
// ---------------------------------------------------------------------------------
//Preprocess frame.
// Only does deinterlacing for now
static void Preprocess_Frame(PyCodecObject* cObj, AVPicture *picture, void **bufp)
{
	AVCodecContext *dec;
	AVPicture *picture2;
	AVPicture picture_tmp;
	uint8_t *buf = 0;

	dec = cObj->cCodec;
	/* deinterlace : must be done before any resize */
	if ((cObj->iVcodecFlags & VCODEC_DEINTERLACE_FL) ||
			(cObj->iVcodecFlags & VCODEC_POSTPROC_FL)) {
		int size;
		/* create temporary picture */
		size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
		buf = (uint8_t*)av_malloc(size);
		if (!buf)
			return;

		picture2 = &picture_tmp;
		avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);

		if (cObj->iVcodecFlags & VCODEC_DEINTERLACE_FL) {
			if(avpicture_deinterlace(picture2,
						picture,
						dec->pix_fmt,
						dec->width,
						dec->height) < 0) {
				/* if error, do not deinterlace */
				av_free(buf);
				buf = NULL;
				picture2 = picture;
			}
		} else {
			if (img_convert(picture2, dec->pix_fmt,
					picture, dec->pix_fmt,
					dec->width,
					dec->height) < 0) {
				/* if error, do not copy */
				av_free(buf);
				buf = NULL;
				picture2 = picture;
			}
		}
	} else {
		picture2 = picture;
	}

	//frame_hook_process(picture2, dec->pix_fmt, dec->width, dec->height);

	if (picture != picture2)
		*picture = *picture2;
	*bufp = buf;
}
Example #13
0
int FFMPEG::convert_cmodel(VFrame *frame_in,  VFrame *frame_out) {

    PixelFormat pix_fmt_in =
        color_model_to_pix_fmt(frame_in->get_color_model());
    PixelFormat pix_fmt_out =
        color_model_to_pix_fmt(frame_out->get_color_model());
#ifdef HAVE_SWSCALER
    // We need a context for swscale
    struct SwsContext *convert_ctx;
#endif
    // do conversion within libavcodec if possible
    if (pix_fmt_in != PIX_FMT_NB && pix_fmt_out != PIX_FMT_NB) {
        // set up a temporary pictures from frame_in and frame_out
        AVPicture picture_in, picture_out;
        init_picture_from_frame(&picture_in, frame_in);
        init_picture_from_frame(&picture_out, frame_out);
        int result;
#ifndef HAVE_SWSCALER
        result = img_convert(&picture_out,
                             pix_fmt_out,
                             &picture_in,
                             pix_fmt_in,
                             frame_in->get_w(),
                             frame_out->get_h());
        if (result) {
            printf("FFMPEG::convert_cmodel img_convert() failed\n");
        }
#else
        convert_ctx = sws_getContext(frame_in->get_w(), frame_in->get_h(),pix_fmt_in,
                                     frame_out->get_w(),frame_out->get_h(),pix_fmt_out,
                                     SWS_BICUBIC, NULL, NULL, NULL);

        if(convert_ctx == NULL) {
            printf("FFMPEG::convert_cmodel : swscale context initialization failed\n");
            return 1;
        }

        sws_scale(convert_ctx,
                  picture_in.data, picture_in.linesize,
                  0, frame_in->get_h(),
                  picture_out.data, picture_out.linesize);

        sws_freeContext(convert_ctx);
#endif
        return result;
    }


    // failing the fast method, use the failsafe cmodel_transfer()
    return convert_cmodel_transfer(frame_in, frame_out);
}
Example #14
0
IplImage* CvCapture_FFMPEG::retrieveFrame(int)
{
    if( !video_st || !picture->data[0] )
        return 0;

#if !defined(HAVE_FFMPEG_SWSCALE)
#if LIBAVFORMAT_BUILD > 4628
    img_convert( (AVPicture*)&rgb_picture, PIX_FMT_BGR24,
                 (AVPicture*)picture,
                 video_st->codec->pix_fmt,
                 video_st->codec->width,
                 video_st->codec->height );
#else
    img_convert( (AVPicture*)&rgb_picture, PIX_FMT_BGR24,
                 (AVPicture*)picture,
                 video_st->codec.pix_fmt,
                 video_st->codec.width,
                 video_st->codec.height );
#endif
#else
    img_convert_ctx = sws_getContext(video_st->codec->width,
                  video_st->codec->height,
                  video_st->codec->pix_fmt,
                  video_st->codec->width,
                  video_st->codec->height,
                  PIX_FMT_BGR24,
                  SWS_BICUBIC,
                  NULL, NULL, NULL);

         sws_scale(img_convert_ctx, picture->data,
             picture->linesize, 0,
             video_st->codec->height,
             rgb_picture.data, rgb_picture.linesize);
    sws_freeContext(img_convert_ctx);
#endif
    return &frame;
}
Example #15
0
/// write a frame with FFMPEG
CV_IMPL int cvWriteFrame( CvVideoWriter * writer, const IplImage * image )
{
	int ret = 0;

	CV_FUNCNAME("cvWriteFrame");

	__BEGIN__;

	// typecast from opaque data type to implemented struct
	CvAVI_FFMPEG_Writer * mywriter = (CvAVI_FFMPEG_Writer*) writer;
#if LIBAVFORMAT_BUILD > 4628
    AVCodecContext *c = mywriter->video_st->codec;
#else
	AVCodecContext *c = &(mywriter->video_st->codec);
#endif
	// check parameters
	assert ( image );
	assert ( image->nChannels == 3 );
	assert ( image->depth == IPL_DEPTH_8U );


	// check if buffer sizes match, i.e. image has expected format (size, channels, bitdepth, alignment)
	assert (image->imageSize == avpicture_get_size (PIX_FMT_BGR24, image->width, image->height));

	if (c->pix_fmt != PIX_FMT_BGR24 ) {
		assert( mywriter->rgb_picture );
		// let rgb_picture point to the raw data buffer of 'image'
		avpicture_fill((AVPicture *)mywriter->rgb_picture, (uint8_t *) image->imageData, 
				PIX_FMT_BGR24, image->width, image->height);

		// convert to the color format needed by the codec
		if( img_convert((AVPicture *)mywriter->picture, c->pix_fmt,
					(AVPicture *)mywriter->rgb_picture, PIX_FMT_BGR24, 
					image->width, image->height) < 0){
			CV_ERROR(CV_StsUnsupportedFormat, "FFMPEG::img_convert pixel format conversion from BGR24 not handled");
		}
	}
	else{
		avpicture_fill((AVPicture *)mywriter->picture, (uint8_t *) image->imageData,
				PIX_FMT_BGR24, image->width, image->height);
	}

	ret = icv_av_write_frame_FFMPEG( mywriter->oc, mywriter->video_st, mywriter->outbuf, mywriter->outbuf_size, mywriter->picture);

	__END__;
	return ret;
}
Example #16
0
/*! convert/scale between an input and an output format.
 * Old version of ffmpeg only have img_convert, which does not rescale.
 * New versions use sws_scale which does both.
 */
static void my_scale(struct fbuf_t *in, AVPicture *p_in,
	struct fbuf_t *out, AVPicture *p_out)
{
	AVPicture my_p_in, my_p_out;
	int eff_w=out->w, eff_h=out->h;

	if (p_in == NULL)
		p_in = fill_pict(in, &my_p_in);
	if (p_out == NULL)
		p_out = fill_pict(out, &my_p_out);

	/*if win_w is different from zero then we must change
	the size of the scaled buffer (the position is already
	encoded into the out parameter)*/
	if (out->win_w) { /* picture in picture enabled */
		eff_w=out->win_w;
		eff_h=out->win_h;
	}
#ifdef OLD_FFMPEG
	/* XXX img_convert is deprecated, and does not do rescaling, PiP not supported */
	img_convert(p_out, out->pix_fmt,
		p_in, in->pix_fmt, in->w, in->h);
#else /* XXX replacement */
	{
		struct SwsContext *convert_ctx;

		convert_ctx = sws_getContext(in->w, in->h, in->pix_fmt,
			eff_w, eff_h, out->pix_fmt,
			SWS_BICUBIC, NULL, NULL, NULL);
		if (convert_ctx == NULL) {
			ast_log(LOG_ERROR, "FFMPEG::convert_cmodel : swscale context initialization failed");
			return;
		}
		if (0)
			ast_log(LOG_WARNING, "in %d %dx%d out %d %dx%d\n",
				in->pix_fmt, in->w, in->h, out->pix_fmt, eff_w, eff_h);
		sws_scale(convert_ctx,
			p_in->data, p_in->linesize,
			in->w, in->h, /* src slice */
			p_out->data, p_out->linesize);

		sws_freeContext(convert_ctx);
	}
#endif /* XXX replacement */
}
Example #17
0
pixerrorcode pix_convert_avpicture(int flags, piximage * img_dst, AVPicture * img_src, pixosi src_fmt) {
	pixosi desiredPalette = pix_ffmpeg_from_pix_osi(img_dst->palette);

	if (!pictureBuffer) {
		pictureBuffer = (AVPicture *) malloc(sizeof(AVPicture));
		atexit(pix_ffmpeg_cleanup);
	}

	avpicture_fill(pictureBuffer, img_dst->data, desiredPalette, img_dst->width, img_dst->height);

	img_convert(pictureBuffer, desiredPalette,
		img_src, pix_ffmpeg_from_pix_osi(src_fmt),
		img_dst->width, img_dst->height);

	//pictureBuffer->data[0] should contain only valid data

	return PIX_OK;
}
Example #18
0
const unsigned char *TFfmpeg::getCurrentFrame(int frameNumber, int *width, int *height) 
{
	frameNumber = frameNumber * AV_TIME_BASE / 1000;
	
	*width = pCodecCtx->width;
	*height = pCodecCtx->height;
	
	int frameFinished;
	AVPacket packet;

	
	//seeking
	av_seek_frame(pFormatCtx, -1, frameNumber, AVSEEK_FLAG_BACKWARD);
	avcodec_flush_buffers(pCodecCtx);
	
	while(av_read_frame(pFormatCtx, &packet) >= 0)
	{
		// Is this a packet from the video stream?
		if(packet.stream_index == videoStream)
		{
			// Decode video frame
			avcodec_decode_video(pCodecCtx, pFrame, &frameFinished, packet.data, packet.size);
	
			// Did we get a video frame?
			if(frameFinished)
			{	
				// Convert the image from its native format to RGB
				img_convert((AVPicture *)pFrameRGB, PIX_FMT_RGBA32, (AVPicture*)pFrame, pCodecCtx->pix_fmt,  pCodecCtx->width, pCodecCtx->height);
			
				// Save the frame to disk
				//SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, minute);
				break;
			}
		}
	}
	
	av_free_packet(&packet);

	//QImage *tmp_image = new QImage(pFrameRGB->data[0], *width, *height, QImage::Format_RGB32);
	QString tmpstr = "/home/troorl/Desktop/AVcodec_test/" + QString::number(frameNumber) + ".png";
	//tmp_image->save(tmpstr);
		
	return /*(unsigned char *)*/pFrameRGB->data[0];
}
Example #19
0
File: IoAVCodec.c Project: josip/io
IoSeq *IoAVCode_frameSeqForAVFrame_(IoAVCodec *self, AVFrame *avframe, int srcPixelFormat, int width, int height)
{
	AVPicture *rgbPicture = IoAVCode_allocDstPictureIfNeeded(self, srcPixelFormat, width, height);
	AVPicture srcPicture;
	int result;

	memcpy(srcPicture.data,     avframe->data,     sizeof(uint8_t *) * 4);
	memcpy(srcPicture.linesize, avframe->linesize, sizeof(int)       * 4);

	result = img_convert(rgbPicture, PIX_FMT_RGB24, &srcPicture, srcPixelFormat, width, height);

	if (result)
	{
		printf("AVCodec: img_convert error?\n");
	}

	UArray *data = UArray_newWithData_type_encoding_size_copy_(rgbPicture->data[0], CTYPE_uint8_t, CENCODING_NUMBER, width * height * 3, 1);

	return IoSeq_newWithUArray_copy_(IOSTATE, data, 0);
}
Example #20
0
void capture_snap(capture* c) {
  u_int j;
  uint8_t* row1;
  uint8_t* row2;
  u_int row_size;

  row_size = c->c->width * 3;

  for (j = 0; j < c->c->height / 2; j++) {
    row1 = c->picture_buf_rgb + j * row_size;
    row2 = c->picture_buf_rgb + (c->c->height - j) * row_size;

    memcpy(c->temp_buf, row2, row_size);
    memcpy(row2, row1, row_size);
    memcpy(row1, c->temp_buf, row_size);
  }

  img_convert((AVPicture*) c->picture, PIX_FMT_YUV420P, (AVPicture*) c->picture_rgb, PIX_FMT_RGB24, c->c->width, c->c->height);

  c->out_size = avcodec_encode_video(c->c, c->outbuf, c->outbuf_size, c->picture);
  printf("encoding frame %3d (size=%5d)\n", c->i++, c->out_size);
  fwrite(c->outbuf, 1, c->out_size, c->f);
}
Example #21
0
/*! convert/scale between an input and an output format.
 * Old version of ffmpeg only have img_convert, which does not rescale.
 * New versions use sws_scale which does both.
 */
static void my_scale(struct fbuf_t *in, AVPicture *p_in,
	struct fbuf_t *out, AVPicture *p_out)
{
	AVPicture my_p_in, my_p_out;

	if (p_in == NULL)
		p_in = fill_pict(in, &my_p_in);
	if (p_out == NULL)
		p_out = fill_pict(out, &my_p_out);

#ifdef OLD_FFMPEG
	/* XXX img_convert is deprecated, and does not do rescaling */
	img_convert(p_out, out->pix_fmt,
		p_in, in->pix_fmt, in->w, in->h);
#else /* XXX replacement */
    {
	struct SwsContext *convert_ctx;

	convert_ctx = sws_getContext(in->w, in->h, in->pix_fmt,
		out->w, out->h, out->pix_fmt,
		SWS_BICUBIC, NULL, NULL, NULL);
	if (convert_ctx == NULL) {
		ast_log(LOG_ERROR, "FFMPEG::convert_cmodel : swscale context initialization failed");
		return;
	}
	if (0)
		ast_log(LOG_WARNING, "in %d %dx%d out %d %dx%d\n",
			in->pix_fmt, in->w, in->h, out->pix_fmt, out->w, out->h);
	sws_scale(convert_ctx,
		p_in->data, p_in->linesize,
		in->w, in->h, /* src slice */
		p_out->data, p_out->linesize);

	sws_freeContext(convert_ctx);
    }
#endif /* XXX replacement */
}
Example #22
0
static int queue_picture(FFMovie *movie, AVFrame *src_frame)
{
/*DECODE LOOP*/
    AVPicture pict;

    SDL_LockMutex(movie->dest_mutex);

    /* if the frame movie not skipped, then display it */

    if (movie->dest_overlay) {
        /* get a pointer on the bitmap */
        SDL_LockYUVOverlay(movie->dest_overlay);

        pict.data[0] = movie->dest_overlay->pixels[0];
        pict.data[1] = movie->dest_overlay->pixels[2];
        pict.data[2] = movie->dest_overlay->pixels[1];
        pict.linesize[0] = movie->dest_overlay->pitches[0];
        pict.linesize[1] = movie->dest_overlay->pitches[2];
        pict.linesize[2] = movie->dest_overlay->pitches[1];

/*
  first fields of AVFrame match AVPicture, so it appears safe to
  cast here (at least of ffmpeg-0.4.8, this is how ffplay does it)
  AVPicture is just a container for 4 pixel pointers and 4 strides
*/
        img_convert(&pict, PIX_FMT_YUV420P,
                    (AVPicture *)src_frame, movie->video_st->codec.pix_fmt,
                    movie->video_st->codec.width, movie->video_st->codec.height);

        SDL_UnlockYUVOverlay(movie->dest_overlay);

        video_refresh_timer(movie);
    }
    SDL_UnlockMutex(movie->dest_mutex);

    return 0;
}
Example #23
0
//--------------------------------------------------------------------
bool ofUCUtils::getFrameUC(unsigned char ** _pixels) {

	if ( !SUCCESS( unicap_queue_buffer( handle, &buffer ) )) {
		printf("Unicap : Failed to queue a buffer\n");
		return false;
	}
	/*
	 Wait until the image buffer is ready
	 */
	if ( !SUCCESS( unicap_wait_buffer( handle, &returned_buffer ) )) {
		printf("Unicap : Failed to wait for buffer\n");
		return false;
	}

	if(src_pix_fmt!=PIX_FMT_RGB24){
		avpicture_fill(src,returned_buffer->data,src_pix_fmt,format.size.width,format.size.height);
		img_convert(dst,PIX_FMT_RGB24,src,src_pix_fmt,format.size.width,format.size.height);
		avpicture_layout(dst,PIX_FMT_RGB24,format.size.width,format.size.height,*_pixels,format.size.width*format.size.height*3);
	}else{
		*_pixels = returned_buffer->data;
	}
	return true;

}
Example #24
0
int parse_args(int argc, char **argv)
{
	int i;
	char *endp;
	struct list_node *head = 0, *tail = 0;

	for(i=1; i<argc; i++) {
		if(argv[i][0] == '-' && argv[i][2] == 0) {
			switch(argv[i][1]) {
			case 'f':
				fullscreen = !fullscreen;
				break;

			case 's':
				stereo = !stereo;
				break;

			case 't':
				threshold = strtod(argv[++i], &endp);
				if(endp == argv[i]) {
					fprintf(stderr, "-t must be followed by a number\n");
					return -1;
				}
				break;

			case 'h':
				printf("usage: %s [opt]\n", argv[0]);
				printf("options:\n");
				printf("  -f    start in fullscreen\n");
				printf("  -s    enable stereoscopic rendering\n");
				printf("  -h    print usage and exit\n");
				exit(0);

			default:
				fprintf(stderr, "unrecognized option: %s\n", argv[i]);
				return -1;
			}
		} else {
			struct list_node *slice;

			if(!(slice = malloc(sizeof *slice))) {
				fprintf(stderr, "failed to allocate volume slice: %d\n", num_slices);
				return -1;
			}
			slice->next = 0;

			img_init(&slice->img);
			if(img_load(&slice->img, argv[i]) == -1) {
				fprintf(stderr, "failed to load volume slice %d: %s\n", num_slices, argv[i]);
				free(slice);
				return -1;
			}
			img_convert(&slice->img, IMG_FMT_GREY8);

			if(num_slices > 0 && (xres != slice->img.width || yres != slice->img.height)) {
				fprintf(stderr, "error: slice %d (%s) is %dx%d, up to now we had %dx%d images\n", num_slices, argv[i],
						slice->img.width, slice->img.height, xres, yres);
				img_destroy(&slice->img);
				free(slice);
				return -1;
			}
			xres = slice->img.width;
			yres = slice->img.height;

			if(head) {
				tail->next = slice;
				tail = slice;
			} else {
				head = tail = slice;
			}
			printf("loaded volume slice %d: %s\n", num_slices++, argv[i]);
		}
	}

	if(!head) {
		fprintf(stderr, "you must specify a list of images for the volume data slices\n");
		return -1;
	}

	if(!(volume = malloc(num_slices * sizeof *volume))) {
		fprintf(stderr, "failed to allocate volume data (%d slices)\n", num_slices);
		return -1;
	}

	for(i=0; i<num_slices; i++) {
		void *tmp;

		assert(head);
		volume[i] = head->img;

		tmp = head;
		head = head->next;
		free(tmp);
	}

	return 0;
}
Example #25
0
double VideoStream::EncodeFrame( uint8_t *buffer, int buffer_size, bool add_timestamp, unsigned int timestamp )
{
#ifdef HAVE_LIBSWSCALE
    static struct SwsContext *img_convert_ctx = 0;
#endif // HAVE_LIBSWSCALE
	double pts = 0.0;


	if (ost)
	{
#if ZM_FFMPEG_048
		pts = (double)ost->pts.val * ofc->pts_num / ofc->pts_den;
#else
		pts = (double)ost->pts.val * ost->time_base.num / ost->time_base.den;
#endif
	}

#if ZM_FFMPEG_SVN
	AVCodecContext *c = ost->codec;
#else
	AVCodecContext *c = &ost->codec;
#endif
	if ( c->pix_fmt != pf )
	{
		memcpy( tmp_opicture->data[0], buffer, buffer_size );
#ifdef HAVE_LIBSWSCALE
        if ( !img_convert_ctx )
        {
            img_convert_ctx = sws_getCachedContext( NULL, c->width, c->height, pf, c->width, c->height, c->pix_fmt, SWS_BICUBIC, NULL, NULL, NULL );
            if ( !img_convert_ctx )
                Panic( "Unable to initialise image scaling context" );
        }
        sws_scale( img_convert_ctx, tmp_opicture->data, tmp_opicture->linesize, 0, c->height, opicture->data, opicture->linesize );
#else // HAVE_LIBSWSCALE
		img_convert( (AVPicture *)opicture, c->pix_fmt, (AVPicture *)tmp_opicture, pf, c->width, c->height );
#endif // HAVE_LIBSWSCALE
	}
	else
	{
		memcpy( opicture->data[0], buffer, buffer_size );
	}
	AVFrame *opicture_ptr = opicture;

	int ret = 0;
	if ( ofc->oformat->flags & AVFMT_RAWPICTURE )
	{
#if ZM_FFMPEG_048
		ret = av_write_frame( ofc, ost->index, (uint8_t *)opicture_ptr, sizeof(AVPicture) );
#else
		AVPacket pkt;
		av_init_packet( &pkt );

#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51,2,1)
		pkt.flags |= AV_PKT_FLAG_KEY;
#else
		pkt.flags |= PKT_FLAG_KEY;
#endif
		pkt.stream_index = ost->index;
		pkt.data = (uint8_t *)opicture_ptr;
		pkt.size = sizeof(AVPicture);

		ret = av_write_frame(ofc, &pkt);
#endif
	}
	else
	{
		if ( add_timestamp )
			ost->pts.val = timestamp;
		int out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, opicture_ptr);
		if ( out_size > 0 )
		{
#if ZM_FFMPEG_048
			ret = av_write_frame(ofc, ost->index, video_outbuf, out_size);
#else
			AVPacket pkt;
			av_init_packet(&pkt);

#if ZM_FFMPEG_049
			pkt.pts = c->coded_frame->pts;
#else
			pkt.pts= av_rescale_q( c->coded_frame->pts, c->time_base, ost->time_base );
#endif
			if(c->coded_frame->key_frame)
#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51,2,1)
				pkt.flags |= AV_PKT_FLAG_KEY;
#else
				pkt.flags |= PKT_FLAG_KEY;
#endif
			pkt.stream_index = ost->index;
			pkt.data = video_outbuf;
			pkt.size = out_size;

			ret = av_write_frame( ofc, &pkt );
#endif
		}
	}
	if ( ret != 0 )
	{
		Fatal( "Error %d while writing video frame: %s", ret, strerror( errno ) );
	}
	return( pts );
}
Example #26
0
static int ffmpeg_decode (codec_data_t *ptr,
                          frame_timestamp_t *pts,
                          int from_rtp,
                          int *sync_frame,
                          uint8_t *buffer,
                          uint32_t buflen,
                          void *ud)
{
    ffmpeg_codec_t *ffmpeg = (ffmpeg_codec_t *)ptr;
    uint32_t bytes_used = 0;
    int got_picture = 0;
    uint64_t ts = pts->msec_timestamp;

    //ffmpeg_message(LOG_ERR, "ffmpeg", "%u timestamp "U64, buflen, ts);
    if (ffmpeg->m_codec_opened == false) {
        // look for header, like above, and open it
        bool open_codec = true;
        switch (ffmpeg->m_codecId) {
        case CODEC_ID_H264:
            open_codec = ffmpeg_find_h264_size(ffmpeg, buffer, buflen);
            break;
        default:
            break;
        }
        if (open_codec) {
            if (avcodec_open(ffmpeg->m_c, ffmpeg->m_codec) < 0) {
                ffmpeg_message(LOG_CRIT, "ffmpeg", "failed to open codec");
                return buflen;
            }
            ffmpeg->m_codec_opened = true;
            ffmpeg_message(LOG_ERR, "ffmpeg", "opened codec");
        } else {
            ffmpeg_message(LOG_ERR, "ffmpeg", "no open %u "U64, buflen, ts);
            return buflen;
        }
    }

    // look and see if we have read the I frame.
    if (ffmpeg->m_got_i == false) {
        if (ffmpeg_frame_is_sync(ptr, buffer, buflen, NULL) == 0) {
            return buflen;
        }
        ffmpeg->m_got_i = true;
    }

    int ret;
    do {
        int local_got_picture;
        ret = avcodec_decode_video(ffmpeg->m_c,
                                   ffmpeg->m_picture,
                                   &local_got_picture,
                                   buffer + bytes_used,
                                   buflen - bytes_used);
        bytes_used += ret;
        //ffmpeg_message(LOG_CRIT, "ffmpeg", "used %d %d", ret, local_got_picture);
        got_picture |= local_got_picture;
    } while (ret != -1 && bytes_used < buflen);

    if (pts->timestamp_is_pts) {
        //ffmpeg_message(LOG_ERR, "ffmpeg", "pts timestamp "U64, ts);
        if (ffmpeg->m_codecId == CODEC_ID_MPEG2VIDEO) {
            if (ffmpeg->pts_convert.frame_rate == 0.0) {
                int have_mpeg2;
                uint32_t h, w;
                double bitrate, aspect_ratio;
                uint8_t profile;
                MP4AV_Mpeg3ParseSeqHdr(buffer, buflen,
                                       &have_mpeg2,
                                       &h, &w,
                                       &ffmpeg->pts_convert.frame_rate,
                                       &bitrate, &aspect_ratio,
                                       &profile);
            }

            int ftype;
            int header = MP4AV_Mpeg3FindPictHdr(buffer, buflen, &ftype);
            if (header >= 0) {
                uint16_t temp_ref = MP4AV_Mpeg3PictHdrTempRef(buffer + header);
                uint64_t ret;
                if (got_picture == 0 ||
                        mpeg3_find_dts_from_pts(&ffmpeg->pts_convert,
                                                ts,
                                                ftype,
                                                temp_ref,
                                                &ret) < 0) {
                    ffmpeg->have_cached_ts = false;
                    return buflen;
                }
#if 0
                ffmpeg->m_vft->log_msg(LOG_DEBUG, "ffmpeg", "pts "U64" dts "U64" temp %u type %u %u",
                                       ts, ret,
                                       temp_ref, ftype, got_picture);
#endif
                ts = ret;
                //	ffmpeg_message(LOG_ERR, "ffmpeg", "type %d ref %u "U64, ftype, temp_ref, ret);
            }
        } else if (ffmpeg->m_codecId == CODEC_ID_MPEG4) {
            uint8_t *vopstart = MP4AV_Mpeg4FindVop(buffer, buflen);
            if (vopstart) {
                int ftype = MP4AV_Mpeg4GetVopType(vopstart, buflen);
                uint64_t dts;
                if (MP4AV_calculate_dts_from_pts(&ffmpeg->pts_to_dts,
                                                 ts,
                                                 ftype,
                                                 &dts) < 0) {
                    ffmpeg->have_cached_ts = false;
#ifdef DEBUG_FFMPEG_PTS
                    ffmpeg_message(LOG_DEBUG, "ffmpeg", "type %d %d pts "U64" failed to calc",
                                   ftype, got_picture, ts);
#endif
                    return buflen;
                }
#ifdef DEBUG_FFMPEG_PTS
                ffmpeg_message(LOG_DEBUG, "ffmpeg", "type %d %d pts "U64" dts "U64,
                               ftype, got_picture, ts, dts);
#endif
                ts = dts;
            }
        } else if (ffmpeg->m_codecId == CODEC_ID_H264) {
            uint8_t *nal_ptr = buffer;
            uint32_t len = buflen;
            bool have_b_nal = false;
            do {
                if (h264_nal_unit_type_is_slice(h264_nal_unit_type(nal_ptr))) {
                    uint8_t slice_type;
                    if (h264_find_slice_type(nal_ptr, len, &slice_type, false) >= 0) {
                        have_b_nal = H264_TYPE_IS_B(slice_type);
                    }
                }
                uint32_t offset = h264_find_next_start_code(nal_ptr, len);
                if (offset == 0) {
                    len = 0;
                } else {
                    nal_ptr += offset;
                    len -= offset;
                }
            } while (len > 0 && have_b_nal == false);
            uint64_t dts;
            if (MP4AV_calculate_dts_from_pts(&ffmpeg->pts_to_dts,
                                             ts,
                                             have_b_nal ? VOP_TYPE_B : VOP_TYPE_P,
                                             &dts) < 0) {
                ffmpeg->have_cached_ts = false;
#ifdef DEBUG_FFMPEG_PTS
                ffmpeg_message(LOG_DEBUG, "ffmpeg", "pts "U64" failed to calc",
                               ts);
#endif
                return buflen;
            }
            ts = dts;
        }
    }
    if (got_picture != 0) {
        if (ffmpeg->m_video_initialized == false) {
            double aspect;
            if (ffmpeg->m_c->sample_aspect_ratio.den == 0) {
                aspect = 0.0; // don't have one
            } else {
                aspect = av_q2d(ffmpeg->m_c->sample_aspect_ratio);
            }
            if (ffmpeg->m_c->width == 0) {
                return buflen;
            }
            ffmpeg->m_vft->video_configure(ffmpeg->m_ifptr,
                                           ffmpeg->m_c->width,
                                           ffmpeg->m_c->height,
                                           VIDEO_FORMAT_YUV,
                                           aspect);
            ffmpeg->m_video_initialized = true;
        }

        if (ffmpeg->m_c->pix_fmt != PIX_FMT_YUV420P) {
            // convert the image from whatever it is to YUV 4:2:0
            AVPicture from, to;
            int ret;
            // get the buffer to copy into (put it right into the ring buffer)
            ret = ffmpeg->m_vft->video_get_buffer(ffmpeg->m_ifptr,
                                                  &to.data[0],
                                                  &to.data[1],
                                                  &to.data[2]);
            if (ret == 0) {
                return buflen;
            }
            // set up the AVPicture structures
            to.linesize[0] = ffmpeg->m_c->width;
            to.linesize[1] = ffmpeg->m_c->width / 2;
            to.linesize[2] = ffmpeg->m_c->width / 2;
            for (int ix = 0; ix < 4; ix++) {
                from.data[ix] = ffmpeg->m_picture->data[ix];
                from.linesize[ix] = ffmpeg->m_picture->linesize[ix];
            }

            img_convert(&to, PIX_FMT_YUV420P,
                        &from, ffmpeg->m_c->pix_fmt,
                        ffmpeg->m_c->width, ffmpeg->m_c->height);
            ffmpeg->m_vft->video_filled_buffer(ffmpeg->m_ifptr,
                                               ffmpeg->have_cached_ts ?
                                               ffmpeg->cached_ts : ts);
        } else {
            ffmpeg->m_vft->video_have_frame(ffmpeg->m_ifptr,
                                            ffmpeg->m_picture->data[0],
                                            ffmpeg->m_picture->data[1],
                                            ffmpeg->m_picture->data[2],
                                            ffmpeg->m_picture->linesize[0],
                                            ffmpeg->m_picture->linesize[1],
                                            ffmpeg->have_cached_ts ?
                                            ffmpeg->cached_ts : ts);
        }
        ffmpeg->cached_ts = ts;
    } else {
        ffmpeg->cached_ts = ts;
        ffmpeg->have_cached_ts = true;
    }
#ifdef DEBUG_FFMPEG_FRAME
    ffmpeg_message(LOG_DEBUG, "ffmpeg", "used %u of %u", bytes_used, buflen);
#endif
    return (buflen);
}
Example #27
0
void frame_dump ( struct mame_bitmap * bitmap )
{
  static unsigned int *dumpbig = NULL;
  unsigned char *dumpd;
  int y;
  int xoff, yoff, xsize, ysize;
  int outsz;
  static int framecnt=0;
  static unsigned char * myoutframe;

  framecnt++;
  if ((framecnt % frame_halver) != 0)
    return; // skip this frame

#if 0
  xoff = Machine->visible_area.min_x;
  yoff = Machine->visible_area.min_y;
  xsize= Machine->visible_area.max_x-xoff+1;
  ysize = Machine->visible_area.max_y-yoff+1;
#endif

  xsize = visual_width;
  ysize = visual_height;
  xoff=0;
  yoff=0; 

	if (!dumpbig)
	{
		int dstsize = bitmap->width *bitmap->height * sizeof (unsigned int);
		dumpbig = malloc ( dstsize );
		myoutframe = malloc( dstsize );
  }

  dumpd = (unsigned char*)dumpbig;

	/* Blit into dumpbig */
#define INDIRECT current_palette->lookup
#define DEST dumpbig
#define DEST_WIDTH (bitmap->width)
#define SRC_PIXEL unsigned short
#define DEST_PIXEL unsigned int
#define PACK_BITS
#include "blit.h"
#undef PACK_BITS
#undef SRC_PIXEL
#undef DEST_PIXEL
#undef INDIRECT

	/* Now make some corrections. */
	for (y=0; y < ysize; y++)
   {
   	int offs = bitmap->width*(y+yoff)*4;
      int x;

      for(x=0; x < xsize; x++)
	   {
	   	unsigned char c;
	  		c = dumpd[offs+x*3+2];
	  		dumpd[offs+x*3+2] = dumpd[offs+x*3];
	      dumpd[offs+x*3] = c;
	   }

		memcpy( &myoutframe[xsize*y*3], &dumpd[offs+3*xoff], xsize*3 );
	}

	/* dumpd now contains a nice RGB (or somethiing) frame.. */
  inpic.data[0] = myoutframe;

  img_convert(&outpic, PIX_FMT_YUV420P, &inpic, PIX_FMT_RGB24, xsize, ysize); 
  
  outsz = avcodec_encode_video (avctx, 
  		(unsigned char*)output_buffer, BUFFSIZE, pic);
  fwrite(output_buffer, 1, outsz, video_outf);
}
Example #28
0
int FFMPEG::convert_cmodel(AVPicture *picture_in, PixelFormat pix_fmt_in,
                           int width_in, int height_in, VFrame *frame_out) {

    // set up a temporary picture_out from frame_out
    AVPicture picture_out;
    init_picture_from_frame(&picture_out, frame_out);
    int cmodel_out = frame_out->get_color_model();
    PixelFormat pix_fmt_out = color_model_to_pix_fmt(cmodel_out);

#ifdef HAVE_SWSCALER
    // We need a context for swscale
    struct SwsContext *convert_ctx;
#endif
    int result;
#ifndef HAVE_SWSCALER
    // do conversion within libavcodec if possible
    if (pix_fmt_out != PIX_FMT_NB) {
        result = img_convert(&picture_out,
                             pix_fmt_out,
                             picture_in,
                             pix_fmt_in,
                             width_in,
                             height_in);
        if (result) {
            printf("FFMPEG::convert_cmodel img_convert() failed\n");
        }
        return result;
    }
#else
    convert_ctx = sws_getContext(width_in, height_in,pix_fmt_in,
                                 frame_out->get_w(),frame_out->get_h(),pix_fmt_out,
                                 SWS_BICUBIC, NULL, NULL, NULL);

    if(convert_ctx == NULL) {
        printf("FFMPEG::convert_cmodel : swscale context initialization failed\n");
        return 1;
    }

    result = sws_scale(convert_ctx,
                       picture_in->data, picture_in->linesize,
                       width_in, height_in,
                       picture_out.data, picture_out.linesize);


    sws_freeContext(convert_ctx);

    if(result) {
        printf("FFMPEG::convert_cmodel sws_scale() failed\n");
    }
#endif

    // make an intermediate temp frame only if necessary
    int cmodel_in = pix_fmt_to_color_model(pix_fmt_in);
    if (cmodel_in == BC_TRANSPARENCY) {
        if (pix_fmt_in == PIX_FMT_RGB32) {
            // avoid infinite recursion if things are broken
            printf("FFMPEG::convert_cmodel pix_fmt_in broken!\n");
            return 1;
        }

        // NOTE: choose RGBA8888 as a hopefully non-lossy colormodel
        VFrame *temp_frame = new VFrame(0, width_in, height_in,
                                        BC_RGBA8888);
        if (convert_cmodel(picture_in, pix_fmt_in,
                           width_in, height_in, temp_frame)) {
            delete temp_frame;
            return 1;  // recursed call will print error message
        }

        int result = convert_cmodel(temp_frame, frame_out);
        delete temp_frame;
        return result;
    }


    // NOTE: no scaling possible in img_convert() so none possible here
    if (frame_out->get_w() != width_in ||
            frame_out->get_h() != height_in) {
        printf("scaling from %dx%d to %dx%d not allowed\n",
               width_in, height_in,
               frame_out->get_w(), frame_out->get_h());
        return 1;
    }


    // if we reach here we know that cmodel_transfer() will work
    uint8_t *yuv_in[3] = {0,0,0};
    uint8_t *row_pointers_in[height_in];
    if (cmodel_is_planar(cmodel_in)) {
        yuv_in[0] = picture_in->data[0];
        yuv_in[1] = picture_in->data[1];
        yuv_in[2] = picture_in->data[2];
    }
    else {
        // set row pointers for picture_in
        uint8_t *data = picture_in->data[0];
        int bytes_per_line =
            cmodel_calculate_pixelsize(cmodel_in) * height_in;
        for (int i = 0; i < height_in; i++) {
            row_pointers_in[i] = data + i * bytes_per_line;
        }
    }

    cmodel_transfer
    (// Packed data out
        frame_out->get_rows(),
        // Packed data in
        row_pointers_in,

        // Planar data out
        frame_out->get_y(), frame_out->get_u(), frame_out->get_v(),
        // Planar data in
        yuv_in[0], yuv_in[1], yuv_in[2],

        // Dimensions in
        0, 0, width_in, height_in,  // NOTE: dimensions are same
        // Dimensions out
        0, 0, width_in, height_in,

        // Color model in, color model out
        cmodel_in, cmodel_out,

        // Background color
        0,

        // Rowspans in, out (of luma for YUV)
        width_in, width_in

    );

    return 0;
}
Example #29
0
int main (int argc, const char * argv[])
{
    AVFormatContext *pFormatCtx;
    int             i, videoStream;
    AVCodecContext  *pCodecCtx;
    AVCodec         *pCodec;
    AVFrame         *pFrame; 
    AVFrame         *pFrameRGB;
    AVPacket        packet;
    int             frameFinished;
    int             numBytes;
    uint8_t         *buffer;

    // Register all formats and codecs
    av_register_all();

    // Open video file
    AVInputFormat *iformat = NULL;
    AVDictionary *format_opts = NULL;
    if (avformat_open_input(&pFormatCtx, argv[1], iformat, &format_opts)!=0)
        return -1; // Couldn't open file

    // Retrieve stream information
    if(av_find_stream_info(pFormatCtx)<0)
        return -1; // Couldn't find stream information

    // Dump information about file onto standard error
    av_dump_format(pFormatCtx, 0, argv[1], false);

    // Find the first video stream
    videoStream=-1;
    for(i=0; i<pFormatCtx->nb_streams; i++)
        if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
        {
            videoStream=i;
            break;
        }
    if(videoStream==-1)
        return -1; // Didn't find a video stream

    // Get a pointer to the codec context for the video stream
    pCodecCtx=pFormatCtx->streams[videoStream]->codec;

    // Find the decoder for the video stream
    pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
    if(pCodec==NULL)
        return -1; // Codec not found

    // Open codec
    if(avcodec_open(pCodecCtx, pCodec)<0)
        return -1; // Could not open codec

    // Hack to correct wrong frame rates that seem to be generated by some codecs
    if(pCodecCtx->time_base.num>1000 && pCodecCtx->time_base.den==1)
		pCodecCtx->time_base.den=1000;
		
    // Allocate video frame
    pFrame=avcodec_alloc_frame();

    // Allocate an AVFrame structure
    pFrameRGB=avcodec_alloc_frame();
    if(pFrameRGB==NULL)
        return -1;

    // Determine required buffer size and allocate buffer
    numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
        pCodecCtx->height);

    buffer=malloc(numBytes);

    // Assign appropriate parts of buffer to image planes in pFrameRGB
    avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
        pCodecCtx->width, pCodecCtx->height);

    // Read frames and save first five frames to disk
    i=0;
    av_init_packet(&packet);
    while(av_read_frame(pFormatCtx, &packet)>=0)
    {
        // Is this a packet from the video stream?
        if(packet.stream_index==videoStream)
        {
            // Decode video frame
            avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,
                &packet);

            // Did we get a video frame?
            if(frameFinished)
            {
				static struct SwsContext *img_convert_ctx;

#if 0
				// Older removed code
                // Convert the image from its native format to RGB swscale
                img_convert((AVPicture *)pFrameRGB, PIX_FMT_RGB24, 
                    (AVPicture*)pFrame, pCodecCtx->pix_fmt, pCodecCtx->width, 
                    pCodecCtx->height);
				
				// function template, for reference
				int sws_scale(struct SwsContext *context, uint8_t* src[], int srcStride[], int srcSliceY,
							  int srcSliceH, uint8_t* dst[], int dstStride[]);
#endif
				// Convert the image into RGB format that PPM files expect
				if(img_convert_ctx == NULL) {
					int w = pCodecCtx->width;
					int h = pCodecCtx->height;
					
					img_convert_ctx = sws_getContext(w, h, 
									pCodecCtx->pix_fmt, 
									w, h, PIX_FMT_RGB24, SWS_BICUBIC,
									NULL, NULL, NULL);
					if(img_convert_ctx == NULL) {
						fprintf(stderr, "Cannot initialize the conversion context!\n");
						exit(1);
					}
				}
				int ret = sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, 
						  pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
#if 0 // this use to be true, as of 1/2009, but apparently it is no longer true in 3/2009
				if(ret) {
					fprintf(stderr, "SWS_Scale failed [%d]!\n", ret);
					exit(-1);
				}
#endif
                // Save the frame to disk
                if(i++<=5)
                    SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, i);
            }
        }

        // Free the packet that was allocated by av_read_frame
        av_free_packet(&packet);
    }

    // Free the RGB image
    free(buffer);
    av_free(pFrameRGB);

    // Free the YUV frame
    av_free(pFrame);

    // Close the codec
    avcodec_close(pCodecCtx);

    // Close the video file
    av_close_input_file(pFormatCtx);

    return 0;
}
Example #30
0
int queue_picture(VideoState *is, AVFrame *pFrame, double pts) {

  VideoPicture *vp;
  int dst_pix_fmt;
  AVPicture pict;

  /* wait until we have space for a new pic */
  SDL_LockMutex(is->pictq_mutex);
  while(is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
	!is->quit) {
    SDL_CondWait(is->pictq_cond, is->pictq_mutex);
  }
  SDL_UnlockMutex(is->pictq_mutex);

  if(is->quit)
    return -1;

  // windex is set to 0 initially
  vp = &is->pictq[is->pictq_windex];

  /* allocate or resize the buffer! */
  if(!vp->bmp ||
     vp->width != is->video_st->codec->width ||
     vp->height != is->video_st->codec->height) {
    SDL_Event event;

    vp->allocated = 0;
    /* we have to do it in the main thread */
    event.type = FF_ALLOC_EVENT;
    event.user.data1 = is;
    SDL_PushEvent(&event);

    /* wait until we have a picture allocated */
    SDL_LockMutex(is->pictq_mutex);
    while(!vp->allocated && !is->quit) {
      SDL_CondWait(is->pictq_cond, is->pictq_mutex);
    }
    SDL_UnlockMutex(is->pictq_mutex);
    if(is->quit) {
      return -1;
    }
  }
  /* We have a place to put our picture on the queue */
  /* If we are skipping a frame, do we set this to null 
     but still return vp->allocated = 1? */


  if(vp->bmp) {

    SDL_LockYUVOverlay(vp->bmp);
    
    dst_pix_fmt = PIX_FMT_YUV420P;
    /* point pict at the queue */

    pict.data[0] = vp->bmp->pixels[0];
    pict.data[1] = vp->bmp->pixels[2];
    pict.data[2] = vp->bmp->pixels[1];
    
    pict.linesize[0] = vp->bmp->pitches[0];
    pict.linesize[1] = vp->bmp->pitches[2];
    pict.linesize[2] = vp->bmp->pitches[1];
    
    // Convert the image into YUV format that SDL uses
    img_convert(&pict, dst_pix_fmt,
		(AVPicture *)pFrame, is->video_st->codec->pix_fmt, 
		is->video_st->codec->width, is->video_st->codec->height);
    
    SDL_UnlockYUVOverlay(vp->bmp);
    vp->pts = pts;

    /* now we inform our display thread that we have a pic ready */
    if(++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE) {
      is->pictq_windex = 0;
    }
    SDL_LockMutex(is->pictq_mutex);
    is->pictq_size++;
    SDL_UnlockMutex(is->pictq_mutex);
  }
  return 0;
}