Exemplo n.º 1
0
static AVFrame *get_video_frame(OutputStream *ost)
{
    AVCodecContext *c = ost->enc;

    /* check if we want to generate more frames */
    if (av_compare_ts(ost->next_pts, c->time_base,
                      STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
        return NULL;

    if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
        /* as we only generate a YUV420P picture, we must convert it
         * to the codec pixel format if needed */
        if (!ost->sws_ctx) {
            ost->sws_ctx = sws_getContext(c->width, c->height,
                                          AV_PIX_FMT_YUV420P,
                                          c->width, c->height,
                                          c->pix_fmt,
                                          SCALE_FLAGS, NULL, NULL, NULL);
            if (!ost->sws_ctx) {
                fprintf(stderr,
                        "Cannot initialize the conversion context\n");
                exit(1);
            }
        }
        fill_yuv_image(ost->tmp_frame, ost->next_pts, c->width, c->height);
        sws_scale(ost->sws_ctx, ost->tmp_frame->data, ost->tmp_frame->linesize,
                  0, c->height, ost->frame->data, ost->frame->linesize);
    } else {
        fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height);
    }

    ost->frame->pts = ost->next_pts++;

    return ost->frame;
}
Exemplo n.º 2
0
static AVFrame *get_video_frame(OutputStream *ost)
{
    /* check if we want to generate more frames */
    if (av_compare_ts(ost->next_pts, ost->st->codec->time_base, STREAM_DURATION, (AVRational){ 1, 1 }) >= 0) {
        return NULL;
    }

    fill_yuv_image(ost->frame,
                   ost->next_pts,
                   ost->st->codec->width,
                   ost->st->codec->height);

    ost->frame->pts = ost->next_pts++;

    return ost->frame;
}
Exemplo n.º 3
0
static void write_video_frame(AVFormatContext *oc, AVStream *st)
{
    int out_size, ret;
    AVCodecContext *c;
    static struct SwsContext *img_convert_ctx;

    c = st->codec;

    if (frame_count >= STREAM_NB_FRAMES) {
        /* No more frames to compress. The codec has a latency of a few
         * frames if using B-frames, so we get the last frames by
         * passing the same picture again. */
    } else {
        if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
            /* as we only generate a YUV420P picture, we must convert it
             * to the codec pixel format if needed */
            if (img_convert_ctx == NULL) {
                img_convert_ctx = sws_getContext(c->width, c->height,
                                                 AV_PIX_FMT_YUV420P,
                                                 c->width, c->height,
                                                 c->pix_fmt,
                                                 sws_flags, NULL, NULL, NULL);
                if (img_convert_ctx == NULL) {
                    fprintf(stderr,
                            "Cannot initialize the conversion context\n");
                    exit(1);
                }
            }
            fill_yuv_image(tmp_picture, frame_count, c->width, c->height);
            sws_scale(img_convert_ctx, tmp_picture->data, tmp_picture->linesize,
                      0, c->height, picture->data, picture->linesize);
        } else {
            fill_yuv_image(picture, frame_count, c->width, c->height);
        }
    }

    if (oc->oformat->flags & AVFMT_RAWPICTURE) {
        /* Raw video case - the API will change slightly in the near
         * future for that. */
        AVPacket pkt;
        av_init_packet(&pkt);

        pkt.flags        |= AV_PKT_FLAG_KEY;
        pkt.stream_index  = st->index;
        pkt.data          = (uint8_t *)picture;
        pkt.size          = sizeof(AVPicture);

        ret = av_interleaved_write_frame(oc, &pkt);
    } else {
        /* encode the image */
        out_size = avcodec_encode_video(c, video_outbuf,
                                        video_outbuf_size, picture);
        /* If size is zero, it means the image was buffered. */
        if (out_size > 0) {
            AVPacket pkt;
            av_init_packet(&pkt);

            if (c->coded_frame->pts != AV_NOPTS_VALUE)
                pkt.pts = av_rescale_q(c->coded_frame->pts,
                                       c->time_base, st->time_base);
            if (c->coded_frame->key_frame)
                pkt.flags |= AV_PKT_FLAG_KEY;
            pkt.stream_index = st->index;
            pkt.data         = video_outbuf;
            pkt.size         = out_size;

            /* Write the compressed frame to the media file. */
            ret = av_interleaved_write_frame(oc, &pkt);
        } else {
            ret = 0;
        }
    }
    if (ret != 0) {
        fprintf(stderr, "Error while writing video frame\n");
        exit(1);
    }
    frame_count++;
}
Exemplo n.º 4
0
static void write_video_frame(AVFormatContext *oc, AVStream *st, int flush)
{

	int ret;
	static struct SwsContext *sws_ctx = NULL;
	AVCodecContext *c = st->codec;

	//flush가 0 일 경우에만 검사한다.
	if (!flush) {
		if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
			/* as we only generate a YUV420P picture, we must convert it to the codec pixel format if needed */
			if (!sws_ctx) {
				//YUV파일로 변환하기 위한 설정
				sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P, c->width, c->height, c->pix_fmt, sws_flags, NULL, NULL, NULL);
				if (!sws_ctx) {
					fprintf(stderr, "Could not initialize the conversion context\n");
					exit(1);
				}
			}

			fill_yuv_image(&src_picture, frame_count, c->width, c->height); //yuv 더미 파일 생성

			sws_scale(sws_ctx, (const uint8_t * const *)src_picture.data, src_picture.linesize, 0, c->height, dst_picture.data, dst_picture.linesize);
			/* srcSlice 이미지를 dst 이미지로 바꿔주는 함수
			int sws_scale 	( 	struct SwsContext *  	c,
			const uint8_t *const  	srcSlice[],
			const int  	srcStride[],  	the array containing the strides for each plane of the source image
			int  	srcSliceY, 	the position in the source image of the slice to process, that is the number (counted starting from zero) in the image of the first row of the slice
			int  	srcSliceH, the height of the source slice, that is the number of rows in the slice
			uint8_t *const  	dst[],
			const int  	dstStride[]
			)
			*/
		}

		//YUV 파일일 경우
		else {
			fill_yuv_image(&dst_picture, frame_count, c->width, c->height);
		}
	}

	/* Raw video case - directly store the picture in the packet */
	if (oc->oformat->flags & AVFMT_RAWPICTURE && !flush) {


		AVPacket pkt;
		av_init_packet(&pkt);
		pkt.flags |= AV_PKT_FLAG_KEY;
		pkt.stream_index = st->index;
		pkt.data = dst_picture.data[0];
		pkt.size = sizeof(AVPicture);
		ret = av_interleaved_write_frame(oc, &pkt); //rawvideo은 그냥 interleave로 저장

	}

	//RAWPicture 가 아닐 경우
	else {

		AVPacket pkt = { 0 };
		int got_packet;
		av_init_packet(&pkt);

		/* encode the image */
		frame->pts = frame_count;
		ret = avcodec_encode_video2(c, &pkt, flush ? NULL : frame, &got_packet); //frame - > pkt 파일로
		//got_packet : This field is set to 1 by libavcodec if the output packet is non-empty, 
		//and to 0 if it is empty. 
		//If the function returns an error, the packet can be assumed to be invalid, and the value of got_packet_ptr is undefined and should not be used. 

		if (ret < 0) {
			char buf[256];
			av_strerror(ret, buf, sizeof(buf));
			fprintf(stderr, "Error encoding video frame: %s\n", buf);
			exit(1);
		}

		/* If size is zero, it means the image was buffered. */
		if (got_packet) {
			ret = write_frame(oc, &c->time_base, st, &pkt);
		}

		// empty일 경우
		else {
			if (flush)
				video_is_eof = 1;
			ret = 0;
		}
	}

	if (ret < 0) {
		char buf[256];
		av_strerror(ret, buf, sizeof(buf));
		fprintf(stderr, "Error while writing video frame: %s\n", buf);
		exit(1);
	}

	frame_count++;
}
Exemplo n.º 5
0
static int write_video_frame(AVFormatContext *oc, AVStream *st)
{
    int out_size, ret;
    AVCodecContext *c;
    static struct SwsContext *sctx = NULL;

    c = st->codec;

    if (c->pix_fmt != PIX_FMT_YUV420P) {
        if (sctx == NULL) {
            sctx = sws_getContext(c->width, c->height, PIX_FMT_YUV420P,
                    c->width, c->height, c->pix_fmt,
                    SWS_BICUBIC, NULL, NULL, NULL);
            if (sctx == NULL)
                return -1;
        }

        fill_yuv_image(tmp_picture, frame_count, c->width, c->height);
        sws_scale(sctx, (const uint8_t * const *) tmp_picture->data, tmp_picture->linesize,
                0, c->height, picture->data, picture->linesize);
    } else {
        fill_yuv_image(picture, frame_count, c->width, c->height);
    }


    /* encode the image */
    out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture);
    /* if zero size, it means the image was buffered */
    if (out_size > 0) {
        AVPacket pkt;
        FILE *ff = fopen("xx.jpeg", "w+");

        av_init_packet(&pkt);

        if (c->coded_frame->pts != AV_NOPTS_VALUE)
            pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
        if(c->coded_frame->key_frame)
            pkt.flags |= AV_PKT_FLAG_KEY;
        pkt.stream_index= st->index;
        pkt.data= video_outbuf;
        pkt.size= out_size;

        /* write the compressed frame in the media file */
        ret = av_interleaved_write_frame(oc, &pkt);

        fwrite(video_outbuf, out_size, 1, ff);
        fclose(ff);
    } else {
        ret = 0;
    }

    if (ret != 0) {
        fprintf(stderr, "Error while writing video frame\n");
        return -1;
    }

    printf("Frame written: %d\n", frame_count);
    frame_count++;

    return 0;
}
Exemplo n.º 6
0
static void write_video_frame(AVFormatContext *oc, AVStream *st, int flush)
{
    int ret;
    static struct SwsContext *sws_ctx;
    AVCodecContext *c = st->codec;

    if (!flush) {
        if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
            /* as we only generate a YUV420P picture, we must convert it
             * to the codec pixel format if needed */
            if (!sws_ctx) {
                sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P,
                                         c->width, c->height, c->pix_fmt,
                                         sws_flags, NULL, NULL, NULL);
                if (!sws_ctx) {
                    fprintf(stderr,
                            "Could not initialize the conversion context\n");
                    exit(1);
                }
            }
            fill_yuv_image(&src_picture, frame_count, c->width, c->height);
            sws_scale(sws_ctx,
                      (const uint8_t * const *)src_picture.data, src_picture.linesize,
                      0, c->height, dst_picture.data, dst_picture.linesize);
        } else {
            fill_yuv_image(&dst_picture, frame_count, c->width, c->height);
        }
    }

    if (oc->oformat->flags & AVFMT_RAWPICTURE && !flush) {
        /* Raw video case - directly store the picture in the packet */
        AVPacket pkt;
        av_init_packet(&pkt);

        pkt.flags        |= AV_PKT_FLAG_KEY;
        pkt.stream_index  = st->index;
        pkt.data          = dst_picture.data[0];
        pkt.size          = sizeof(AVPicture);

        ret = av_interleaved_write_frame(oc, &pkt);
    } else {
        AVPacket pkt = { 0 };
        int got_packet;
        av_init_packet(&pkt);

        /* encode the image */
        frame->pts = frame_count;
        ret = avcodec_encode_video2(c, &pkt, flush ? NULL : frame, &got_packet);
        if (ret < 0) {
            fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
            exit(1);
        }
        /* If size is zero, it means the image was buffered. */

        if (got_packet) {
            ret = write_frame(oc, &c->time_base, st, &pkt);
        } else {
            if (flush)
                video_is_eof = 1;
            ret = 0;
        }
    }

    if (ret < 0) {
        fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
        exit(1);
    }
    frame_count++;
}
Exemplo n.º 7
0
int main(int argc, char **argv)
{
    uint8_t *src_data[4], *dst_data[4];
    int src_linesize[4], dst_linesize[4];
    int src_w = 320, src_h = 240, dst_w, dst_h;
    enum AVPixelFormat src_pix_fmt = AV_PIX_FMT_YUV420P, dst_pix_fmt = AV_PIX_FMT_RGB24;
    const char *dst_size = NULL;
    const char *dst_filename = NULL;
    FILE *dst_file;
    int dst_bufsize;
    struct SwsContext *sws_ctx;
    int i, ret;

    if (argc != 3) {
        fprintf(stderr, "Usage: %s output_file output_size\n"
                "API example program to show how to scale an image with libswscale.\n"
                "This program generates a series of pictures, rescales them to the given "
                "output_size and saves them to an output file named output_file\n."
                "\n", argv[0]);
        exit(1);
    }
    dst_filename = argv[1];
    dst_size     = argv[2];

    if (av_parse_video_size(&dst_w, &dst_h, dst_size) < 0) {
        fprintf(stderr,
                "Invalid size '%s', must be in the form WxH or a valid size abbreviation\n",
                dst_size);
        exit(1);
    }

    dst_file = fopen(dst_filename, "wb");
    if (!dst_file) {
        fprintf(stderr, "Could not open destination file %s\n", dst_filename);
        exit(1);
    }

    /* create scaling context */
    sws_ctx = sws_getContext(src_w, src_h, src_pix_fmt,
                             dst_w, dst_h, dst_pix_fmt,
                             SWS_BILINEAR, NULL, NULL, NULL);
    if (!sws_ctx) {
        fprintf(stderr,
                "Impossible to create scale context for the conversion "
                "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
                av_get_pix_fmt_name(src_pix_fmt), src_w, src_h,
                av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h);
        ret = AVERROR(EINVAL);
        goto end;
    }

    /* allocate source and destination image buffers */
    if ((ret = av_image_alloc(src_data, src_linesize,
                              src_w, src_h, src_pix_fmt, 16)) < 0) {
        fprintf(stderr, "Could not allocate source image\n");
        goto end;
    }

    /* buffer is going to be written to rawvideo file, no alignment */
    if ((ret = av_image_alloc(dst_data, dst_linesize,
                              dst_w, dst_h, dst_pix_fmt, 1)) < 0) {
        fprintf(stderr, "Could not allocate destination image\n");
        goto end;
    }
    dst_bufsize = ret;

    for (i = 0; i < 100; i++) {
        /* generate synthetic video */
        fill_yuv_image(src_data, src_linesize, src_w, src_h, i);

        /* convert to destination format */
        sws_scale(sws_ctx, (const uint8_t * const*)src_data,
                  src_linesize, 0, src_h, dst_data, dst_linesize);

        /* write scaled image to file */
        fwrite(dst_data[0], 1, dst_bufsize, dst_file);
    }

    fprintf(stderr, "Scaling succeeded. Play the output file with the command:\n"
           "ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
           av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h, dst_filename);

end:
    if (dst_file)
        fclose(dst_file);
    av_freep(&src_data[0]);
    av_freep(&dst_data[0]);
    sws_freeContext(sws_ctx);
    return ret < 0;
}
Exemplo n.º 8
0
Arquivo: muxing.c Projeto: vlakuc/algo
static void write_video_frame(AVFormatContext *oc, AVStream *st)
{
    int ret;
    static struct SwsContext *sws_ctx;
    AVCodecContext *c = st->codec;

    if (frame_count >= STREAM_NB_FRAMES) {
        /* No more frames to compress. The codec has a latency of a few
         * frames if using B-frames, so we get the last frames by
         * passing the same picture again. */
    } else {
        if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
            /* as we only generate a YUV420P picture, we must convert it
             * to the codec pixel format if needed */
            if (!sws_ctx) {
                sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P,
                                         c->width, c->height, c->pix_fmt,
                                         sws_flags, NULL, NULL, NULL);
                if (!sws_ctx) {
                    fprintf(stderr,
                            "Could not initialize the conversion context\n");
                    exit(1);
                }
            }
            fill_yuv_image(&src_picture, frame_count, c->width, c->height);
            sws_scale(sws_ctx,
                      (const uint8_t * const *)src_picture.data, src_picture.linesize,
                      0, c->height, dst_picture.data, dst_picture.linesize);
        } else {
            fill_yuv_image(&dst_picture, frame_count, c->width, c->height);
        }
    }

    if (oc->oformat->flags & AVFMT_RAWPICTURE) {
        /* Raw video case - directly store the picture in the packet */
        AVPacket pkt;
        av_init_packet(&pkt);

        pkt.flags        |= AV_PKT_FLAG_KEY;
        pkt.stream_index  = st->index;
        pkt.data          = dst_picture.data[0];
        pkt.size          = sizeof(AVPicture);

        ret = av_interleaved_write_frame(oc, &pkt);
    } else {
        AVPacket pkt = { 0 };
        int got_packet;
        av_init_packet(&pkt);

        /* encode the image */
        frame->pts = frame_count;
        ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
        if (ret < 0) {
            fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
            exit(1);
        }
        /* If size is zero, it means the image was buffered. */

        if (!ret && got_packet && pkt.size) {
            /* rescale output packet timestamp values from codec to stream timebase */
            pkt.pts = av_rescale_q_rnd(pkt.pts, c->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
            pkt.dts = av_rescale_q_rnd(pkt.dts, c->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
            pkt.duration = av_rescale_q(pkt.duration, c->time_base, st->time_base);
            pkt.stream_index = st->index;

            /* Write the compressed frame to the media file. */
            ret = av_interleaved_write_frame(oc, &pkt);
        } else {
            ret = 0;
        }
    }
    if (ret != 0) {
        fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
        exit(1);
    }
    frame_count++;
}
Exemplo n.º 9
0
static void write_video_frame(AVFormatContext *oc, AVStream *st)
{
    int ret;
    static struct SwsContext *sws_ctx;
    AVCodecContext *c = st->codec;

    if (frame_count >= STREAM_NB_FRAMES) {
        /* No more frames to compress. The codec has a latency of a few
         * frames if using B-frames, so we get the last frames by
         * passing the same picture again. */
    } else {
        if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
            /* as we only generate a YUV420P picture, we must convert it
             * to the codec pixel format if needed */
            if (!sws_ctx) {
                sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P,
                                         c->width, c->height, c->pix_fmt,
                                         sws_flags, NULL, NULL, NULL);
                if (!sws_ctx) {
                    fprintf(stderr,
                            "Could not initialize the conversion context\n");
                    exit(1);
                }
            }
            fill_yuv_image(&src_picture, frame_count, c->width, c->height);
            sws_scale(sws_ctx,
                      (const uint8_t * const *)src_picture.data, src_picture.linesize,
                      0, c->height, dst_picture.data, dst_picture.linesize);
        } else {
            fill_yuv_image(&dst_picture, frame_count, c->width, c->height);
        }
    }

    if (oc->oformat->flags & AVFMT_RAWPICTURE) {
        /* Raw video case - the API will change slightly in the near
         * future for that. */
        AVPacket pkt;
        av_init_packet(&pkt);

        pkt.flags        |= AV_PKT_FLAG_KEY;
        pkt.stream_index  = st->index;
        pkt.data          = dst_picture.data[0];
        pkt.size          = sizeof(AVPicture);

        ret = av_interleaved_write_frame(oc, &pkt);
    } else {
        /* encode the image */
        AVPacket pkt;
        int got_output;

        av_init_packet(&pkt);
        pkt.data = NULL;    // packet data will be allocated by the encoder
        pkt.size = 0;

        ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
        if (ret < 0) {
            fprintf(stderr, "Error encoding video frame\n");
            exit(1);
        }

        /* If size is zero, it means the image was buffered. */
        if (got_output) {
            if (c->coded_frame->pts != AV_NOPTS_VALUE)
                pkt.pts = av_rescale_q(c->coded_frame->pts,
                                       c->time_base, st->time_base);
            if (c->coded_frame->key_frame)
                pkt.flags |= AV_PKT_FLAG_KEY;

            pkt.stream_index = st->index;

            /* Write the compressed frame to the media file. */
            ret = av_interleaved_write_frame(oc, &pkt);
        } else {
            ret = 0;
        }
    }
    if (ret != 0) {
        fprintf(stderr, "Error while writing video frame\n");
        exit(1);
    }
    frame_count++;
}
Exemplo n.º 10
0
void write_image_to_movie(image_info *image, movie_info *movie){
#if USE_FFMPEG
  AVCodecContext *c;
  int             out_size;
  int             ret;
  static struct   SwsContext *img_convert_ctx;
  int             flag_silent;

  if(SID.I_am_Master){

    c = movie->video_stream->codec;

    flag_silent=FALSE;
    if(movie->frame_count>=movie->n_frames) {
      // no more frames to compress. The codec has a latency of a few
      //   frames if using B frames, so we get the last frames by
      //   passing the same picture again 
      flag_silent=TRUE;
    } 
    else {
      SID_log("Writing frame %d of %d to movie...",SID_LOG_OPEN,movie->frame_count+1,movie->n_frames);
      if(c->pix_fmt!=PIX_FMT_YUV420P) {
        // as we only generate a YUV420 picture, we must convert it to the codec pixel format if needed
        if (img_convert_ctx == NULL) {
          img_convert_ctx = sws_getContext(c->width, c->height,
                                           PIX_FMT_YUV420P,
                                           c->width, c->height,
                                           c->pix_fmt,
                                           GBPGFX_SWS_FLAGS, NULL, NULL, NULL);
          fprintf(stderr,"test1\n");
          if(img_convert_ctx==NULL)
            SID_trap_error("Cannot initialize the conversion context",ERROR_LOGIC);
        }
        fill_yuv_image(movie->temp_picture,image,c);
        sws_scale(img_convert_ctx,
                  movie->temp_picture->data,
                  movie->temp_picture->linesize,
                  0, 
                  c->height,
                  movie->picture->data,
                  movie->picture->linesize);
      } 
      else
        fill_yuv_image(movie->picture,image,c);
    }

    // Raw video case. The API will change slightly in the near future for that 
    if(movie->video_context->oformat->flags & AVFMT_RAWPICTURE) {
      AVPacket pkt;
      av_init_packet(&pkt);
      pkt.flags       |= PKT_FLAG_KEY;
      pkt.stream_index = movie->video_stream->index;
      pkt.data         = (uint8_t *)movie->picture;
      pkt.size         = sizeof(AVPicture);
      ret              = av_write_frame(movie->video_context,&pkt);
    } 
    // encode the image
    else {
      out_size=avcodec_encode_video(c,movie->video_outbuf,movie->video_outbuf_size,movie->picture);
      // if zero size, it means the image was buffered
      if(out_size>0) {
        AVPacket pkt;
        av_init_packet(&pkt);
        pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, movie->video_stream->time_base);
        if(c->coded_frame->key_frame)
          pkt.flags |= PKT_FLAG_KEY;
        pkt.stream_index=movie->video_stream->index;
        pkt.data        =movie->video_outbuf;
        pkt.size        =out_size;

        // write the compressed frame in the media file
        ret = av_write_frame(movie->video_context,&pkt);
      } 
      else
        ret=0;
    }
    if(ret!=0)
      SID_trap_error("Error while writing video frame",ERROR_LOGIC);
    movie->frame_count++;
    if(!flag_silent)
      SID_log("Done.",SID_LOG_CLOSE);
  }
#else
  SID_trap_error("Routine not supported.  FFMPEG not installed.",ERROR_LOGIC);
#endif
}