コード例 #1
0
static void open_video(AVFormatContext *oc, OutputStream *ost)
{
    AVCodecContext *c;

    c = ost->st->codec;

    /* open the codec */
    if (avcodec_open2(c, NULL, NULL) < 0) {
        fprintf(stderr, "could not open codec\n");
        exit(1);
    }

    /* Allocate the encoded raw picture. */
    ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
    if (!ost->frame) {
        fprintf(stderr, "Could not allocate picture\n");
        exit(1);
    }

    ost->tmp_frame = NULL;
    if (c->pix_fmt != AV_PIX_FMT_RGB24) {
        ost->tmp_frame = alloc_picture(AV_PIX_FMT_RGB24, 1024, 1024);
        if (!ost->tmp_frame) {
            fprintf(stderr, "Could not allocate temporary picture\n");
            exit(1);
        }
    }
}
コード例 #2
0
ファイル: output.c プロジェクト: JackDanger/libav
static void open_video(AVFormatContext *oc, AVStream *st)
{
    AVCodecContext *c;

    c = st->codec;

    /* open the codec */
    if (avcodec_open2(c, NULL, NULL) < 0) {
        fprintf(stderr, "could not open codec\n");
        exit(1);
    }

    /* Allocate the encoded raw picture. */
    picture = alloc_picture(c->pix_fmt, c->width, c->height);
    if (!picture) {
        fprintf(stderr, "Could not allocate picture\n");
        exit(1);
    }

    /* If the output format is not YUV420P, then a temporary YUV420P
     * picture is needed too. It is then converted to the required
     * output format. */
    tmp_picture = NULL;
    if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
        tmp_picture = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);
        if (!tmp_picture) {
            fprintf(stderr, "Could not allocate temporary picture\n");
            exit(1);
        }
    }
}
コード例 #3
0
ファイル: muxing.c プロジェクト: KuMiMusic/FFmpeg
static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
{
    int ret;
    AVCodecContext *c = ost->st->codec;
    AVDictionary *opt = NULL;

    av_dict_copy(&opt, opt_arg, 0);

    /* open the codec */
    ret = avcodec_open2(c, codec, &opt);
    av_dict_free(&opt);
    if (ret < 0) {
        fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
        exit(1);
    }

    /* allocate and init a re-usable frame */
    ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
    if (!ost->frame) {
        fprintf(stderr, "Could not allocate video frame\n");
        exit(1);
    }

    /* If the output format is not YUV420P, then a temporary YUV420P
     * picture is needed too. It is then converted to the required
     * output format. */
    ost->tmp_frame = NULL;
    if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
        ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);
        if (!ost->tmp_frame) {
            fprintf(stderr, "Could not allocate temporary picture\n");
            exit(1);
        }
    }
}
コード例 #4
0
ファイル: VideoLivRecord.cpp プロジェクト: u-stone/CodeBase
BOOL CVideoLivRecord::open_video(AVStream *st, AVCodec* codec, AVDictionary* opt_arg)
{
	AVCodecContext* avcc = st->codec;
	AVDictionary* opt = 0;
	av_dict_copy(&opt, opt_arg, 0);

	int ret = avcodec_open2(avcc, codec, &opt);
	av_dict_free(&opt);
	if (ret < 0){
		log("[CVideoLivRecord::open_video] -- avcodec_open2() error");
		string str = av_make_error_string(64, ret);
		log(str);
		return FALSE;
	}

	m_pVideoFrame = alloc_picture(avcc->pix_fmt, m_Width, m_Height);
	if (!m_pVideoFrame){
		log("[CVideoLivRecord::open_video] -- alloc_picture() error");
		return FALSE;
	}
	m_pVideoBkFrame = NULL;
	if (avcc->pix_fmt != AV_PIX_FMT_RGBA){
		m_pVideoBkFrame = alloc_picture(AV_PIX_FMT_RGBA, m_Width, m_Height);
		if (!m_pVideoBkFrame){
			log("[CVideoLivRecord::open_video] -- alloc_picture(AV_PIX_FMT) error");
			return FALSE;
		}
	}
	return TRUE;
}
コード例 #5
0
ファイル: video_recording.cpp プロジェクト: habnabit/macemu
bool video_recording_state_t::initialize(uint16 video_nr, int width, int height, int depth)
{
	enum AVPixelFormat raw_fmt;
	if (depth == VIDEO_DEPTH_8BIT) raw_fmt = AV_PIX_FMT_PAL8;
	else if (depth == VIDEO_DEPTH_32BIT) raw_fmt = AV_PIX_FMT_ARGB;
	else return false;
	char filename[32];
	snprintf(filename, sizeof filename, "rec%hu.avi", video_nr);
	AVOutputFormat *fmt = av_guess_format(NULL, filename, NULL);
	if (!fmt) return false;
	if (fmt->flags & AVFMT_NOFILE) return false;

	output_context = avformat_alloc_context();
	if (!output_context) return false;
	output_context->oformat = fmt;
	snprintf(output_context->filename, sizeof(output_context->filename), "%s", filename);
	if (fmt->video_codec == AV_CODEC_ID_NONE) return false;

	if (!add_audio_stream(AV_CODEC_ID_PCM_S16LE)) return false;
	if (!add_video_stream(fmt->video_codec, width, height)) return false;
	if (!open_audio()) return false;
	if (!open_video()) return false;
	if (!(video_frame_raw = alloc_picture(video_stream, raw_fmt))) return false;
	if (!(video_frame = alloc_picture(video_stream, AV_PIX_FMT_YUV420P))) return false;
	if (!init_sws_context()) return false;

	if (avio_open(&output_context->pb, filename, AVIO_FLAG_WRITE) < 0) return false;
	avformat_write_header(output_context, NULL);
	return true;
}
コード例 #6
0
ファイル: LibAVVideoWriter.cpp プロジェクト: ethz-asl/iclcv
    void LibAVVideoWriter::Data::fill_rgb_image(const ImgBase *src, AVFrame **pict)
    {
      if(!*pict) {
        *pict = alloc_picture(INPUT_FORMAT,src->getSize().width,src->getSize().height);
      } else {
        if((*pict)->width != src->getSize().width || (*pict)->height != src->getSize().height) {
#if LIBAVCODEC_VERSION_MAJOR > 54
          av_frame_free(pict);
#else
          av_free((*pict)->data[0]);
          av_free(*pict);
#endif
          *pict = alloc_picture(INPUT_FORMAT,src->getSize().width,src->getSize().height);
        }
      }
#if LIBAVCODEC_VERSION_MAJOR > 54
      av_frame_make_writable(*pict);
#endif
      depth d = src->getDepth();
      switch(d) {
        case depth16s:
          core::planarToInterleaved(src->as16s(),(*pict)->data[0],(*pict)->linesize[0]);
        case depth32f:
          core::planarToInterleaved(src->as32f(),(*pict)->data[0],(*pict)->linesize[0]);
        case depth32s:
          core::planarToInterleaved(src->as32s(),(*pict)->data[0],(*pict)->linesize[0]);
        case depth64f:
          core::planarToInterleaved(src->as64f(),(*pict)->data[0],(*pict)->linesize[0]);
        default:
          core::planarToInterleaved(src->as8u(),(*pict)->data[0],(*pict)->linesize[0]);
      }
    }
コード例 #7
0
ファイル: ff_example.c プロジェクト: licshire/ff_dm365
static void open_video(AVFormatContext *oc, AVStream *st)
{
    AVCodec *codec;
    AVCodecContext *c;

    c = st->codec;

    /* find the video encoder */
    codec = avcodec_find_encoder(c->codec_id);
    if (!codec) {
        fprintf(stderr, "codec not found\n");
        exit(1);
    }

    /* open the codec */
    if (avcodec_open(c, codec) < 0) {
        fprintf(stderr, "could not open codec\n");
        exit(1);
    }

    if (codec->pix_fmts && codec->pix_fmts[0] != -1) {
        c->pix_fmt = codec->pix_fmts[0];
    }

    video_outbuf = NULL;
    if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {

        /* allocate output buffer */
        /* buffers passed into lav* can be allocated any way you prefer,
           as long as they're aligned enough for the architecture, and
           they're freed appropriately (such as using av_free for buffers
           allocated with av_malloc) */
        video_outbuf_size = 3*1024*1024;
        video_outbuf = CMEM_alloc(video_outbuf_size, &alloc_params);
    }

    /* allocate the encoded raw picture */
    picture = alloc_picture(c->pix_fmt, c->width, c->height);
    if (!picture) {
        fprintf(stderr, "Could not allocate picture\n");
        exit(1);
    }

    /* if the output format is not YUV420P, then a temporary YUV420P
       picture is needed too. It is then converted to the required
       output format */
    tmp_picture = NULL;
    if (c->pix_fmt != PIX_FMT_YUV420P) {
        tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height);
        if (!tmp_picture) {
            fprintf(stderr, "Could not allocate temporary picture\n");
            exit(1);
        }
    }
}
コード例 #8
0
ファイル: ff-save.c プロジェクト: OpenCL/GEGL-OpenCL-old
static void
open_video (Priv * p, AVFormatContext * oc, AVStream * st)
{
  AVCodec  *codec;
  AVCodecContext *c;

  c = st->codec;

  /* find the video encoder */
  codec = avcodec_find_encoder (c->codec_id);
  if (!codec)
    {
      fprintf (stderr, "codec not found\n");
      exit (1);
    }

  /* open the codec */
  if (avcodec_open2 (c, codec, NULL) < 0)
    {
      fprintf (stderr, "could not open codec\n");
      exit (1);
    }

  p->video_outbuf = NULL;
  if (!(oc->oformat->flags & AVFMT_RAWPICTURE))
    {
      /* allocate output buffer */
      /* XXX: API change will be done */
      p->video_outbuf_size = 200000;
      p->video_outbuf = malloc (p->video_outbuf_size);
    }

  /* allocate the encoded raw picture */
  p->picture = alloc_picture (c->pix_fmt, c->width, c->height);
  if (!p->picture)
    {
      fprintf (stderr, "Could not allocate picture\n");
      exit (1);
    }

  /* if the output format is not YUV420P, then a temporary YUV420P
     picture is needed too. It is then converted to the required
     output format */
  p->tmp_picture = NULL;
  if (c->pix_fmt != AV_PIX_FMT_RGB24)
    {
      p->tmp_picture = alloc_picture (AV_PIX_FMT_RGB24, c->width, c->height);
      if (!p->tmp_picture)
        {
          fprintf (stderr, "Could not allocate temporary picture\n");
          exit (1);
        }
    }
}
コード例 #9
0
Codec_Errors H263VideoDecoder::Decode(void* inSample, long insize, void** outSample, long* outsize, long long timestamp){
	Codec_Errors retval = CODEC_SUCCEEDED;
	try{
		VideoMediaFormat* vf = (VideoMediaFormat*)CurrentFormat;
		//validate parameters, if not opened, then fail.
		if(vf == NULL){
			retval = CODEC_CODEC_NOT_OPENED;
		}
		else if(FFDecoderContext == NULL){
			retval = CODEC_CODEC_NOT_OPENED;
		}
		else{
			//initialize a packet.
			AVPacket avpkt;
			av_init_packet(&avpkt);
			avpkt.size = insize;
			avpkt.data = (unsigned char*) inSample;
			//allocate a picture to receive the decoded frame.
			AVFrame* picture= avcodec_alloc_frame();
			int got_picture, len;
			//decode the packet.
			len = avcodec_decode_video2(FFDecoderContext, picture, &got_picture, &avpkt);
			//if got_picture returned true, then we have a decoded frame!
			if(got_picture != 0){
				//get the desired output format.
				PixelFormat fmt = (PixelFormat)VideoMediaFormat::GetFFPixel(vf->PixelFormat);
				//if the desired format isn't the decoder format, then we need to scale.
				if(fmt != PIX_FMT_YUV420P){
					//allocate a frame of the desired format.
					AVFrame* tpic = alloc_picture(fmt, vf->Width, vf->Height);
					//scale the frame.
					sws_scale(ScaleContext, picture->data, picture->linesize,
						  0, vf->Height, tpic->data, tpic->linesize);
					//set the outgoing reference.
					*outSample = tpic->data[0];
					//calculate and set the outgoing frame size, in bytes.
					*outsize = picture->width * picture->height * VideoMediaFormat::GetPixelBits(vf->PixelFormat) / 8;
					//free the temporary picture.
					av_free(tpic);
					tpic = NULL;
				}
				else{//if we desire the standard format, then just set the reference and size.
					*outSample = picture->data[0];
					*outsize = picture->width * picture->height * 12 / 8;
				}
			
			}
			else{
				*outsize = 0;
			}
			av_free(picture);
		}
		

	}
	catch(...){
		retval = CODEC_UNEXPECTED;
	}
	return retval;
}
コード例 #10
0
void Java_com_richitec_imeeting_video_ECVideoEncoder_setupVideoEncoder(
		JNIEnv* env, jobject thiz) {
	qvo = (QuickVideoOutput*) malloc(sizeof(QuickVideoOutput));
	qvo->width = out_img_width;
	qvo->height = out_img_height;

	char rtmp_full_path[300];
	memset(rtmp_full_path, 0, sizeof rtmp_full_path);
	sprintf(rtmp_full_path, "%s/%s/%s live=1 conn=S:%s", rtmp_url, group_id,
			live_name, live_name);
	D("rtmp full path: %s", rtmp_full_path);

	int ret = init_quick_video_output(qvo, rtmp_full_path, "flv");
	if (ret < 0) {
		D("quick video output initial failed.");
		release_video_encoder();
		call_void_method(env, thiz, "onVideoLiveCannotEstablish");
		return;
	}

	enum PixelFormat dst_pix_fmt = qvo->video_stream->codec->pix_fmt;
	src_pix_fmt = PIX_FMT_NV21;

	raw_picture = alloc_picture(dst_pix_fmt, qvo->width, qvo->height);
	tmp_picture = avcodec_alloc_frame();
	raw_picture->pts = 0;

	is_video_encode_ready = 1;
	D("video encoder setup ok");
	call_void_method(env, thiz, "onVideoLiveEstablish");
}
コード例 #11
0
ファイル: load-git.c プロジェクト: AresDice/subsurface
static int parse_picture_entry(git_repository *repo, const git_tree_entry *entry, const char *name)
{
	git_blob *blob;
	struct picture *pic;
	int hh, mm, ss, offset;
	char sign;

	/*
	 * The format of the picture name files is just the offset
	 * within the dive in form [[+-]hh:mm:ss, possibly followed
	 * by a hash to make the filename unique (which we can just
	 * ignore).
	 */
	if (sscanf(name, "%c%d:%d:%d", &sign, &hh, &mm, &ss) != 4)
		return report_error("Unknown file name %s", name);
	offset = ss + 60*(mm + 60*hh);
	if (sign == '-')
		offset = -offset;

	blob = git_tree_entry_blob(repo, entry);
	if (!blob)
		return report_error("Unable to read trip file");

	pic = alloc_picture();
	pic->offset.seconds = offset;
	dive_add_picture(active_dive, pic);

	for_each_line(blob, picture_parser, pic);
	git_blob_free(blob);
	return 0;
}
コード例 #12
0
Codec_Errors VC1VideoDecoder::Decode(void* inSample, long insize, void** outSample, long* outsize, long long timestamp){
	Codec_Errors retval = CODEC_SUCCEEDED;
	try{
		VideoMediaFormat* vf = (VideoMediaFormat*)CurrentFormat;
		//check for validity.
		if(vf == NULL){
			retval = CODEC_CODEC_NOT_OPENED;
		}
		else if(FFDecoderContext == NULL){
			retval = CODEC_CODEC_NOT_OPENED;
		}
		else{
			//Instantiate packet.
			AVPacket avpkt;
			av_init_packet(&avpkt);
			avpkt.size = insize;
			avpkt.data = (unsigned char*) inSample;
			//allocate a frame to receive the decoded frame.
			AVFrame* picture= avcodec_alloc_frame();
			int got_picture, len;
			//decode the frame.
			len = avcodec_decode_video2(FFDecoderContext, picture, &got_picture, &avpkt);
			//if true, then we have a decoded frame!
			if(got_picture != 0){
				//get the desired output format.
				PixelFormat fmt = (PixelFormat)VideoMediaFormat::GetFFPixel(vf->PixelFormat);
				int linesize = VideoMediaFormat::GetPixelBits(vf->PixelFormat) / 8 * vf->Width;
				//if desired format isn't standard, then we need to scale.
				if(fmt != PIX_FMT_YUV420P){
					//allocate temp picture as desired format.
					AVFrame* tpic = alloc_picture(fmt, vf->Width, vf->Height);
					//scale to desired format.
					sws_scale(ScaleContext, picture->data, picture->linesize,
						  0, vf->Height, tpic->data, tpic->linesize);
					//set output references.
					*outSample = tpic->data[0];
					*outsize = picture->width * picture->height * VideoMediaFormat::GetPixelBits(vf->PixelFormat) / 8;
					av_free(tpic);
					tpic = NULL;
				}
				else{
					*outSample = picture->data[0];
					*outsize = picture->width * picture->height * 12 / 8;
				}
			
			}
			else{
				*outsize = 0;
			}
			av_free(picture);
		}
		

	}
	catch(...){
		retval = CODEC_UNEXPECTED;
	}
	return retval;
}
コード例 #13
0
ファイル: output-example.c プロジェクト: JSinglan/libav
static void open_video(AVFormatContext *oc, AVStream *st)
{
    AVCodecContext *c;

    c = st->codec;

    /* open the codec */
    if (avcodec_open2(c, NULL, NULL) < 0) {
        fprintf(stderr, "could not open codec\n");
        exit(1);
    }

    video_outbuf = NULL;
    if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
        /* Allocate output buffer. */
        /* XXX: API change will be done. */
        /* Buffers passed into lav* can be allocated any way you prefer,
         * as long as they're aligned enough for the architecture, and
         * they're freed appropriately (such as using av_free for buffers
         * allocated with av_malloc). */
        video_outbuf_size = 200000;
        video_outbuf      = av_malloc(video_outbuf_size);
    }

    /* Allocate the encoded raw picture. */
    picture = alloc_picture(c->pix_fmt, c->width, c->height);
    if (!picture) {
        fprintf(stderr, "Could not allocate picture\n");
        exit(1);
    }

    /* If the output format is not YUV420P, then a temporary YUV420P
     * picture is needed too. It is then converted to the required
     * output format. */
    tmp_picture = NULL;
    if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
        tmp_picture = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);
        if (!tmp_picture) {
            fprintf(stderr, "Could not allocate temporary picture\n");
            exit(1);
        }
    }
}
コード例 #14
0
ファイル: ffdecode.cpp プロジェクト: xuchuansheng/GenXSource
// size pDataOut for YUV (width*height*3)/2
int CFFDecode::DecodePic(unsigned char* pData, long lsize, long pts, AVFrame **ppFrame, bool *pInterlaced)
{
	if (ppFrame == NULL) return E_POINTER;
	if (m_c->pix_fmt != PIX_FMT_YUV420P) return E_FAIL;
	*ppFrame = NULL;

	//{
	//	int size = m_c->width * m_c->height;
	//	/*DbgOutInt("size:",size);

	//	m_picture->data[0] = pData;
	//	m_picture->data[1] = pData+size;
	//	m_picture->data[2] = pData+size+(m_c->width/2)*(m_c->height/2);*/

	//	m_picture->data[0] = pDataOut;
	//	m_picture->data[1] = m_picture->data[0] + size;
	//	m_picture->data[2] = m_picture->data[1] + size / 4;
	//	m_picture->linesize[0] = m_c->width;
	//	m_picture->linesize[1] = m_c->width / 2;
	//	m_picture->linesize[2] = m_c->width / 2;
	//}

	int got_picture = 0;
	int ret = 0;

	try {
		ret = avcodec_decode_video(m_c, &m_picture, &got_picture, pData, lsize);
		DbgOutInt("ret decode:",ret);
		DbgOutInt("lsize decode:",lsize);
	} catch (...) {
		ret = 0;
		got_picture = 0;
	}

	if (got_picture == 0) return S_FALSE;

	m_picture.quality = 1;
	*ppFrame = &m_picture;
	if (pInterlaced != NULL) 
		*pInterlaced = (bool) m_picture.interlaced_frame;

	if (!tmp_picture){ // one time jpg conversion
		img_convert_ctx = sws_getContext(m_c->width, m_c->height, PIX_FMT_YUV420P,320, 240,PIX_FMT_RGB32, SWS_FAST_BILINEAR,NULL,NULL,NULL);
		if (img_convert_ctx){
			tmp_picture = alloc_picture(PIX_FMT_RGB32, 320, 240);
			//fill_yuv_image(tmp_picture, 0, m_c->width, m_c->height);
			int ires=sws_scale(img_convert_ctx, m_picture.data,  m_picture.linesize,0, m_c->height, tmp_picture->data, tmp_picture->linesize);
			SaveFrame(tmp_picture, 320, 240);
		}
	}
	

	return S_OK;
}
コード例 #15
0
ファイル: output.c プロジェクト: Brainiarc7/libav
static void open_video(AVFormatContext *oc, OutputStream *ost)
{
    AVCodecContext *c;
    int ret;

    c = ost->enc;

    /* open the codec */
    if (avcodec_open2(c, NULL, NULL) < 0) {
        fprintf(stderr, "could not open codec\n");
        exit(1);
    }

    /* Allocate the encoded raw picture. */
    ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
    if (!ost->frame) {
        fprintf(stderr, "Could not allocate picture\n");
        exit(1);
    }

    /* If the output format is not YUV420P, then a temporary YUV420P
     * picture is needed too. It is then converted to the required
     * output format. */
    ost->tmp_frame = NULL;
    if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
        ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);
        if (!ost->tmp_frame) {
            fprintf(stderr, "Could not allocate temporary picture\n");
            exit(1);
        }
    }

    /* copy the stream parameters to the muxer */
    ret = avcodec_parameters_from_context(ost->st->codecpar, c);
    if (ret < 0) {
        fprintf(stderr, "Could not copy the stream parameters\n");
        exit(1);
    }
}
コード例 #16
0
ファイル: mpegvideo.c プロジェクト: 90robin/MyGit
int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
{
    int i;
    AVFrame *pic;
    s->mb_skiped = 0;

    if (s->pict_type != B_TYPE && s->last_picture_ptr && s->last_picture_ptr->data[0])
	{
        avcodec_default_release_buffer(avctx, (AVFrame*)s->last_picture_ptr);
    
        for(i=0; i<MAX_PICTURE_COUNT; i++)
		{
            if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference)
			{
                avcodec_default_release_buffer(avctx, (AVFrame*)&s->picture[i]);
            }
        }
    }

    for(i=0; i<MAX_PICTURE_COUNT; i++)
	{
        if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/)
		{
            avcodec_default_release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
        }
    }

    if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
        pic= (AVFrame*)s->current_picture_ptr; //we allready have a unused image (maybe it was set before reading the header)
    else
	{
        i= ff_find_unused_picture(s, 0);
        pic= (AVFrame*)&s->picture[i];
    }

    pic->reference= s->pict_type != B_TYPE ? 3 : 0;

    if( alloc_picture(s, (Picture*)pic, 0) < 0)
        return -1;

    s->current_picture_ptr= (Picture*)pic;

    s->current_picture_ptr->pict_type= s->pict_type;
    s->current_picture_ptr->key_frame= s->pict_type == I_TYPE;

    copy_picture(&s->current_picture, s->current_picture_ptr);

    s->hurry_up= s->avctx->hurry_up;

    return 0;
}
コード例 #17
0
void handle_event(int event_type, VideoState *is) {
	switch(event_type) {
	case FF_ALLOC_EVENT:
		// uncomment for video
		alloc_picture(is);
		break;
	case FF_REFRESH_EVENT:
		// uncomment for video
		video_refresh_timer(is);
		break;
	default:
		break;
	}
}
コード例 #18
0
ファイル: FFMPEGer.cpp プロジェクト: forbe/recorder
	bool FFMPEGer::open_video(AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg){
		int ret;
		AVCodecContext *c = ost->st->codec;
		//AVDictionary *opt = NULL;
		//av_dict_copy(&opt, opt_arg, 0);

		/* open the codec */
		ret = avcodec_open2(c, codec, NULL);
		//av_dict_free(&opt);
		if (ret < 0) {
			ALOGE("Could not open video codec: %s", av_err2str(ret));
			return false;
		}
		
		/* allocate and init a re-usable frame */
		ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
		if (!ost->frame) {
			ALOGE("Could not allocate video frame");
			return false;
		}
		
		/* If the output format is not YUV420P, then a temporary YUV420P
		 * picture is needed too. It is then converted to the required
		 * output format. */
		ost->tmp_frame = NULL;
		if (c->pix_fmt != mPixFmt) {
			ALOGE("open_video alloc_picture for te");
			ost->tmp_frame = alloc_picture(mPixFmt, c->width, c->height);
			if (!ost->tmp_frame) {
				ALOGE("Could not allocate temporary picture");
				return false;
			}
		}

		return true;
	}
コード例 #19
0
Codec_Errors H263VideoDecoder::Open(MediaFormat* encFormat, CodecData* encData){
	Codec_Errors retval = CODEC_SUCCEEDED;
	try
	{
		avcodec_register_all(); //initialize FFMPEG codecs.
		//set global variables.
		CurrentFormat = encFormat;
		CurrentData = encData;
		VideoMediaFormat* vf = (VideoMediaFormat*)encFormat;
		//find the H.263 decoder.
		FFDecoder = avcodec_find_decoder(CODEC_ID_H263);
		if(!FFDecoder) //if it returned null, we didn't find it, exit function.
			retval = CODEC_NOT_SUPPORTED;
		else{ //found decoder, now open.
			//allocate context.
			FFDecoderContext = avcodec_alloc_context3(FFDecoder);
			//open decoder.
			int err = avcodec_open2(FFDecoderContext, FFDecoder, NULL);
			if(err < 0) //if error in open, fail.
				retval = CODEC_FAILED_TO_OPEN;
			else{
				//get the ffmpeg format from the desired output format.
				PixelFormat fmt = (PixelFormat)VideoMediaFormat::GetFFPixel(vf->PixelFormat);
				TempFrame = alloc_picture(fmt, vf->Width, vf->Height); //allocate temp based on this format.

				if(fmt != PIX_FMT_YUV420P) //if it isn't the standard format, then instantiate the scaler.
				{
				
					ScaleContext = sws_getContext(vf->Width, vf->Height,
													 PIX_FMT_YUV420P,
													 vf->Width, vf->Height,
													 fmt,
													 SWS_BICUBIC, NULL, NULL, NULL);
				}
			}
			
		}
	}
	catch(...)
	{
		retval = CODEC_UNEXPECTED;
	}

	return retval;
}
コード例 #20
0
ファイル: LibAVVideoWriter.cpp プロジェクト: ethz-asl/iclcv
    void LibAVVideoWriter::Data::open_video(AVFormatContext *oc, OutputStream *ost)
    {
        AVCodecContext *c;

        c = ost->st->codec;

        /* open the codec */
        if (avcodec_open2(c, 0, 0) < 0) throw ICLException("could not open codec");

        /* Allocate the encoded raw picture. */
        ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
        if (!ost->frame) throw ICLException("Could not allocate picture");

        /* If the output format is not YUV420P, then a temporary YUV420P
         * picture is needed too. It is then converted to the required
         * output format. */
        ost->tmp_frame = 0;
    }
コード例 #21
0
ファイル: Test7.cpp プロジェクト: soffio/FFmpegTutorial
int queue_picture(VideoState* is, AVFrame* pFrame, AVFrame* pFrameYUV,
		double pts) {
	VideoPicture* vp;

	SDL_LockMutex(is->pictq_mutex);
	while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->quit) {
		SDL_CondWait(is->pictq_cond, is->pictq_mutex);
	}
	SDL_UnlockMutex(is->pictq_mutex);

	if (is->quit)
		return -1;

	vp = &is->pictq[is->pictq_windex];
	if (!vp->bmp || vp->width != is->video_ctx->width
			|| vp->height != is->video_ctx->height) {
		vp->allocated = 0;
		alloc_picture(is);
		if (is->quit) {
			return -1;
		}
	}
	if (vp->bmp) {
		SDL_LockMutex(screen_mutex);
		vp->pts = pts;
		sws_scale(is->sws_ctx, (const uint8_t* const *) pFrame->data,
				pFrame->linesize, 0, is->video_ctx->height, pFrameYUV->data,
				pFrameYUV->linesize);
		SDL_UpdateYUVTexture(vp->bmp, NULL, pFrameYUV->data[0],
				pFrameYUV->linesize[0], pFrameYUV->data[1],
				pFrameYUV->linesize[1], pFrameYUV->data[2],
				pFrameYUV->linesize[2]);
		SDL_UnlockMutex(screen_mutex);

		if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE) {
			is->pictq_windex = 0;
		}
		SDL_LockMutex(is->pictq_mutex);
		is->pictq_size++;
		SDL_UnlockMutex(is->pictq_mutex);
	}
	return 0;
}
コード例 #22
0
/**
 * Perform some lazy initialization of the context on the first frame write,
 * assuming that all future frame writes will have the same dimensions.
 * Blargh, our API should be restructured.
 */
static bool first_frame_init(JNIEnv *env, RtpOutputContext *rtpContext,
        jint frameFormat, jint frameWidth, jint frameHeight) {
    AVCodecContext *codec = rtpContext->avContext->streams[0]->codec;
    rtpContext->tempFrame = alloc_picture(codec->pix_fmt,
            codec->width, codec->height);
    if (rtpContext->tempFrame == NULL) {
        jniThrowOOM(env);
        return false;
    }

    rtpContext->imgConvert = sws_getContext(frameWidth, frameHeight,
            androidPixFmtToFFmpeg(frameFormat),
            codec->width, codec->height, codec->pix_fmt,
            SWS_BICUBIC, NULL, NULL, NULL);
    if (rtpContext->imgConvert == NULL) {
        jniThrowOOM(env);
        return false;
    }

    return true;
}
コード例 #23
0
ファイル: decode.c プロジェクト: khaled777b/snaps
// Invoked when a new picture is detected;
static should_inline void start_picture(PictureDecoderData* pdd, slice_header_t* sh, seq_parameter_set_rbsp_t* sps, pic_parameter_set_rbsp_t* pps)
{
  LUD_DEBUG_ASSERT(!pdd->pic_not_finished);
  pdd->pic_not_finished = 1;

  // Start to assign picture number (in decoding order) to the current picture
  pdd->dec_num++;

  // Detect if this is the second field of a complementary field pair, the process is different if this is a reference picture or not
  pdd->is_second_field_of_a_pair = 0;


  //Picture* prev_pic = pdd->pic;
  if (!pdd->is_second_field_of_a_pair)
  {
    // Allocate memory for that picture
    pdd->pic = alloc_picture(pdd, sh);
  }

  pdd->pic->dec_num[sh->bottom_field_flag] = pdd->dec_num;

}
コード例 #24
0
ファイル: ffdec.cpp プロジェクト: JohnCrash/ffplayer
	/*
	* ����Ƶ������
	*/
	static int open_video(AVDecodeCtx *pec, AVCodecID video_codec_id, AVDictionary *opt_arg)
	{
		int ret;
		AVCodecContext *c = pec->_video_st->codec;
		AVDictionary *opt = NULL;
		AVCodec *codec;

		if(av_decode_init(c,video_codec_id,opt_arg)!=0){
			av_log(NULL, AV_LOG_FATAL, "Could not init decoder '%s'\n", avcodec_get_name(video_codec_id));
			return -1;
		}

		pec->_vctx.st = pec->_video_st;
		/* allocate and init a re-usable frame */
		pec->_vctx.frame = alloc_picture(c->pix_fmt, c->width, c->height);
		if (!pec->_vctx.frame) {
			av_log(NULL, AV_LOG_FATAL, "Could not allocate video frame\n");
			return -1;
		}

		return 0;
	}
コード例 #25
0
ファイル: multicast_compressed.cpp プロジェクト: dluobo/ingex
/* open the video stream */
static int open_video(internal_mpegts_encoder_t *ts)
{
	AVFormatContext *oc = ts->oc;
	AVStream *st = ts->video_st;
    AVCodec *codec;
    AVCodecContext *c;

    c = st->codec;

    /* find the video encoder */
    codec = avcodec_find_encoder(c->codec_id);
    if (!codec) {
        fprintf(stderr, "codec not found\n");
        return -1;
    }

    /* open the codec */
    if (avcodec_open(c, codec) < 0) {
        fprintf(stderr, "could not open codec\n");
        return -1;
    }

    ts->video_outbuf = NULL;
    if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
        /* allocate output buffer */
        /* XXX: API change will be done */
        ts->video_outbuf_size = 550000;
        ts->video_outbuf = (uint8_t *)malloc(ts->video_outbuf_size);
    }

    /* allocate the encoded raw picture */
    ts->picture = alloc_picture(c->pix_fmt, c->width, c->height);
    if (!ts->picture) {
        fprintf(stderr, "Could not allocate picture\n");
        return -1;
    }

	return 0;
}
コード例 #26
0
ファイル: sample1.cpp プロジェクト: david74chou/fMP4
static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
{
    int ret;
    AVCodecContext *c = ost->st->codec;
    AVDictionary *opt = NULL;

    av_dict_copy(&opt, opt_arg, 0);

    /* open the codec */
    ret = avcodec_open2(c, codec, &opt);
    av_dict_free(&opt);
    if (ret < 0) {
        fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
        exit(1);
    }

    /* allocate and init a re-usable frame */
    ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
    if (!ost->frame) {
        fprintf(stderr, "Could not allocate video frame\n");
        exit(1);
    }
}
コード例 #27
0
ファイル: tutorial04_1.cpp プロジェクト: shileiz/notes
static void event_loop(VideoState *is)
{
	SDL_Event event;
	for (;;) {
		SDL_PumpEvents();
		while (!SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_ALLEVENTS)) {
			av_usleep(40000); //TODO 40000改为帧率分之一
			video_display(is);
			SDL_PumpEvents();
			}
		switch (event.type) {
		case SDL_QUIT:
		case FF_QUIT_EVENT:
			SDL_Quit();
			exit(0);
			break;
		case FF_ALLOC_EVENT:
			alloc_picture(event.user.data1);
			break;
		default:
			break;
		}
	}
}
コード例 #28
0
static int open_video(EncoderJob &jobSpec, AVFormatContext *oc, AVStream *st)
{
	AVCodec *codec;
	AVCodecContext *c;

	c = st->codec;

	/* find the video encoder */
	codec = avcodec_find_encoder(c->codec_id);
	if (!codec) {
		fprintf(stderr, "codec not found\n");
		return -1;
	}

	/* open the codec */
	if (avcodec_open(c, codec) < 0) {
		fprintf(stderr, "could not open codec\n");
		return -1;
	}

	jobSpec.video_outbuf = NULL;
	if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
		jobSpec.video_outbuf_size = jobSpec.Width * jobSpec.Height * 64; // way bigger than it needs to be
		jobSpec.video_outbuf = (uint8_t*)av_malloc(jobSpec.video_outbuf_size);
	}

	// allocate the encoded raw picture
	jobSpec.picture = alloc_picture(c->pix_fmt, c->width, c->height);
	if (!jobSpec.picture) {
		fprintf(stderr, "Could not allocate picture\n");
		return -1;
	}
	jobSpec.picture->data[3] = jobSpec.picture->data[0]; // hide the reference so we can correctly free(). Nasty hack!
	
	return 0;
}
コード例 #29
0
int decode_thread(void *arg) {

    VideoState *is = (VideoState *)arg;
    AVFormatContext *pFormatCtx = NULL;
    AVPacket pkt1, *packet = &pkt1;

    AVDictionary *io_dict = NULL;
    AVIOInterruptCB callback;

    int video_index = -1;
    int audio_index = -1;
    int i;

    is->videoStream = -1;
    is->audioStream = -1;
    is->audio_need_resample = 0;

    global_video_state = is;
    // will interrupt blocking functions if we quit!
    callback.callback = decode_interrupt_cb;
    callback.opaque = is;

    if(avio_open2(&is->io_context, is->filename, 0, &callback, &io_dict)) {
        fprintf(stderr, "Unable to open I/O for %s\n", is->filename);
        return -1;
    }

    // Open video file
    if(avformat_open_input(&pFormatCtx, is->filename, NULL, NULL) != 0) {
        return -1;    // Couldn't open file
    }

    is->pFormatCtx = pFormatCtx;

    // Retrieve stream information
    if(avformat_find_stream_info(pFormatCtx, NULL) < 0) {
        return -1;    // Couldn't find stream information
    }

    // Dump information about file onto standard error
    av_dump_format(pFormatCtx, 0, is->filename, 0);

    // Find the first video stream
    for(i = 0; i < pFormatCtx->nb_streams; i++) {
        if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
                video_index < 0) {
            video_index = i;
        }

        if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
                audio_index < 0) {
            audio_index = i;
        }
    }

    if(audio_index >= 0) {
        stream_component_open(is, audio_index);
    }

    if(video_index >= 0) {
        stream_component_open(is, video_index);
    }

    if(is->videoStream < 0 && is->audioStream < 0) {
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
        goto fail;
    }

#ifdef __RESAMPLER__

    if( audio_index >= 0
            && pFormatCtx->streams[audio_index]->codec->sample_fmt != AV_SAMPLE_FMT_S16) {
        is->audio_need_resample = 1;
        is->pResampledOut = NULL;
        is->pSwrCtx = NULL;

        printf("Configure resampler: ");

#ifdef __LIBAVRESAMPLE__
        printf("libAvResample\n");
        is->pSwrCtx = avresample_alloc_context();
#endif

#ifdef __LIBSWRESAMPLE__
        printf("libSwResample\n");
        is->pSwrCtx = swr_alloc();
#endif

        // Some MP3/WAV don't tell this so make assumtion that
        // They are stereo not 5.1
        if (pFormatCtx->streams[audio_index]->codec->channel_layout == 0
                && pFormatCtx->streams[audio_index]->codec->channels == 2) {
            pFormatCtx->streams[audio_index]->codec->channel_layout = AV_CH_LAYOUT_STEREO;

        } else if (pFormatCtx->streams[audio_index]->codec->channel_layout == 0
                   && pFormatCtx->streams[audio_index]->codec->channels == 1) {
            pFormatCtx->streams[audio_index]->codec->channel_layout = AV_CH_LAYOUT_MONO;

        } else if (pFormatCtx->streams[audio_index]->codec->channel_layout == 0
                   && pFormatCtx->streams[audio_index]->codec->channels == 0) {
            pFormatCtx->streams[audio_index]->codec->channel_layout = AV_CH_LAYOUT_STEREO;
            pFormatCtx->streams[audio_index]->codec->channels = 2;
        }

        av_opt_set_int(is->pSwrCtx, "in_channel_layout",
                       pFormatCtx->streams[audio_index]->codec->channel_layout, 0);
        av_opt_set_int(is->pSwrCtx, "in_sample_fmt",
                       pFormatCtx->streams[audio_index]->codec->sample_fmt, 0);
        av_opt_set_int(is->pSwrCtx, "in_sample_rate",
                       pFormatCtx->streams[audio_index]->codec->sample_rate, 0);

        av_opt_set_int(is->pSwrCtx, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0);
        av_opt_set_int(is->pSwrCtx, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
        av_opt_set_int(is->pSwrCtx, "out_sample_rate", 44100, 0);

#ifdef __LIBAVRESAMPLE__

        if (avresample_open(is->pSwrCtx) < 0) {
#else

        if (swr_init(is->pSwrCtx) < 0) {
#endif
            fprintf(stderr, " ERROR!! From Samplert: %d Hz Sample format: %s\n",
                    pFormatCtx->streams[audio_index]->codec->sample_rate,
                    av_get_sample_fmt_name(pFormatCtx->streams[audio_index]->codec->sample_fmt));
            fprintf(stderr, "         To 44100 Sample format: s16\n");
            is->audio_need_resample = 0;
            is->pSwrCtx = NULL;;
        }

    }

#endif

    // main decode loop

    for(;;) {
        if(is->quit) {
            break;
        }

        // seek stuff goes here
        if(is->seek_req) {
            int stream_index = -1;
            int64_t seek_target = is->seek_pos;

            if(is->videoStream >= 0) {
                stream_index = is->videoStream;

            } else if(is->audioStream >= 0) {
                stream_index = is->audioStream;
            }

            if(stream_index >= 0) {
                seek_target = av_rescale_q(seek_target, AV_TIME_BASE_Q, pFormatCtx->streams[stream_index]->time_base);
            }

            if(av_seek_frame(is->pFormatCtx, stream_index, seek_target, is->seek_flags) < 0) {
                fprintf(stderr, "%s: error while seeking\n", is->pFormatCtx->filename);

            } else {
                if(is->audioStream >= 0) {
                    packet_queue_flush(&is->audioq);
                    packet_queue_put(&is->audioq, &flush_pkt);
                }

                if(is->videoStream >= 0) {
                    packet_queue_flush(&is->videoq);
                    packet_queue_put(&is->videoq, &flush_pkt);
                }
            }

            is->seek_req = 0;
        }

        if(is->audioq.size > MAX_AUDIOQ_SIZE ||
                is->videoq.size > MAX_VIDEOQ_SIZE) {
            SDL_Delay(10);
            continue;
        }

        if(av_read_frame(is->pFormatCtx, packet) < 0) {
            if(is->pFormatCtx->pb->error == 0) {
                SDL_Delay(100); /* no error; wait for user input */
                continue;

            } else {
                break;
            }
        }

        // Is this a packet from the video stream?
        if(packet->stream_index == is->videoStream) {
            packet_queue_put(&is->videoq, packet);

        } else if(packet->stream_index == is->audioStream) {
            packet_queue_put(&is->audioq, packet);

        } else {
            av_free_packet(packet);
        }
    }

    /* all done - wait for it */
    while(!is->quit) {
        SDL_Delay(100);
    }

fail: {
        SDL_Event event;
        event.type = FF_QUIT_EVENT;
        event.user.data1 = is;
        SDL_PushEvent(&event);
    }
    return 0;
}

void stream_seek(VideoState *is, int64_t pos, int rel) {

    if(!is->seek_req) {
        is->seek_pos = pos;
        is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
        is->seek_req = 1;
    }
}

int main(int argc, char *argv[]) {

    SDL_Event       event;
    //double          pts;
    VideoState      *is;

    is = av_mallocz(sizeof(VideoState));

    if(argc < 2) {
        fprintf(stderr, "Usage: test <file>\n");
        exit(1);
    }

    // Register all formats and codecs
    av_register_all();

    if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
        exit(1);
    }

    // Make a screen to put our video
#ifndef __DARWIN__
    screen = SDL_SetVideoMode(640, 480, 0, 0);
#else
    screen = SDL_SetVideoMode(640, 480, 24, 0);
#endif

    if(!screen) {
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
        exit(1);
    }

    av_strlcpy(is->filename, argv[1], 1024);

    is->pictq_mutex = SDL_CreateMutex();
    is->pictq_cond = SDL_CreateCond();

    schedule_refresh(is, 40);

    is->av_sync_type = DEFAULT_AV_SYNC_TYPE;
    is->parse_tid = SDL_CreateThread(decode_thread, is);

    if(!is->parse_tid) {
        av_free(is);
        return -1;
    }

    av_init_packet(&flush_pkt);
    flush_pkt.data = (unsigned char *)"FLUSH";

    for(;;) {
        double incr, pos;
        SDL_WaitEvent(&event);

        switch(event.type) {
            case SDL_KEYDOWN:
                switch(event.key.keysym.sym) {
                    case SDLK_LEFT:
                        incr = -10.0;
                        goto do_seek;

                    case SDLK_RIGHT:
                        incr = 10.0;
                        goto do_seek;

                    case SDLK_UP:
                        incr = 60.0;
                        goto do_seek;

                    case SDLK_DOWN:
                        incr = -60.0;
                        goto do_seek;
do_seek:

                        if(global_video_state) {
                            pos = get_master_clock(global_video_state);
                            pos += incr;
                            stream_seek(global_video_state, (int64_t)(pos * AV_TIME_BASE), incr);
                        }

                        break;

                    default:
                        break;
                }

                break;

            case FF_QUIT_EVENT:
            case SDL_QUIT:
                is->quit = 1;
                /*
                 * If the video has finished playing, then both the picture and
                 * audio queues are waiting for more data.  Make them stop
                 * waiting and terminate normally.
                 */
                SDL_CondSignal(is->audioq.cond);
                SDL_CondSignal(is->videoq.cond);
                SDL_Quit();
                exit(0);
                break;

            case FF_ALLOC_EVENT:
                alloc_picture(event.user.data1);
                break;

            case FF_REFRESH_EVENT:
                video_refresh_timer(event.user.data1);
                break;

            default:
                break;
        }
    }

    return 0;
}
コード例 #30
0
ファイル: tutorial05.c プロジェクト: huamulan/ffmpeg-tutor
int main(int argc, char *argv[]) {

    SDL_Event       event;

    VideoState      *is;

    is = av_mallocz(sizeof(VideoState));

    if(argc < 2) {
        fprintf(stderr, "Usage: %s filepath\n", argv[0]);
        exit(1);
    }
    // Register all formats and codecs
    av_register_all();

    if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
        exit(1);
    }

    // Make a screen to put our video
#ifndef __DARWIN__
    screen = SDL_SetVideoMode(640, 480, 0, 0);
#else
    screen = SDL_SetVideoMode(640, 480, 24, 0);
#endif
    if(!screen) {
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
        exit(1);
    }

    av_strlcpy(is->filename, argv[1], 1024);

    is->pictq_mutex = SDL_CreateMutex();
    is->pictq_cond = SDL_CreateCond();

    schedule_refresh(is, 40);

    is->parse_tid = SDL_CreateThread(decode_thread, is);
    if(!is->parse_tid) {
        av_free(is);
        return -1;
    }
    for(;;) {

        SDL_WaitEvent(&event);
        switch(event.type) {
            case FF_QUIT_EVENT:
            case SDL_QUIT:
                is->quit = 1;
                /*
                 * If the video has finished playing, then both the picture and
                 * audio queues are waiting for more data.  Make them stop
                 * waiting and terminate normally.
                 */
                SDL_CondSignal(is->audioq.cond);
                SDL_CondSignal(is->videoq.cond);
                SDL_Quit();
                exit(0);
                break;

            case FF_ALLOC_EVENT:
                alloc_picture(event.user.data1);
                break;

            case FF_REFRESH_EVENT:
                video_refresh_timer(event.user.data1);
                break;

            default:
                break;
        }
    }
    return 0;

}