Exemple #1
0
static int decode(quicktime_t *file, unsigned char **row_pointers, int track)
{
	int bytes;
	quicktime_video_map_t *vtrack = &(file->vtracks[track]);
	quicktime_yuv2_codec_t *codec = ((quicktime_codec_t*)vtrack->codec)->priv;
	int width = quicktime_video_width(file, track);
	int height = quicktime_video_height(file, track);
	int result = 0;

	initialize(vtrack, codec, width, height);
	quicktime_set_video_position(file, vtrack->current_position, track);
	bytes = quicktime_frame_size(file, vtrack->current_position, track);

        result = !quicktime_read_data(file, (char*)codec->work_buffer, bytes);
	if(codec->is_2vuy)
		convert_decode_2vuy(codec);
	else
		convert_decode_yuv2(codec);

	cmodel_transfer(row_pointers, codec->rows,
		row_pointers[0], row_pointers[1], row_pointers[2],
		0, 0, 0,
		file->in_x, file->in_y, file->in_w, file->in_h,
		0, 0, file->out_w, file->out_h,
		BC_YUV422, file->color_model,
		0, codec->coded_w, file->out_w);

	return result;
}
Exemple #2
0
int lqt_gavl_get_video_format(quicktime_t * file,
                              int track,
                              gavl_video_format_t * format, int encode)
  {
  int constant_framerate;
  int tc_framerate;
  uint32_t tc_flags;
  int pixel_width, pixel_height;
  if(track >= quicktime_video_tracks(file) ||
     track < 0)
    return 0;

  format->image_width = quicktime_video_width(file, track);
  format->image_height = quicktime_video_height(file, track);

  format->frame_width = format->image_width;
  format->frame_height = format->image_height;

  lqt_get_pixel_aspect(file, track, &pixel_width,
                       &pixel_height);

  format->pixel_width = pixel_width;
  format->pixel_height = pixel_height;
  
  format->timescale = lqt_video_time_scale(file, track);
  format->frame_duration = lqt_frame_duration(file, track,
                                              &constant_framerate);

  if(lqt_has_timecode_track(file, track, &tc_flags, &tc_framerate))
    {
    format->timecode_format.int_framerate = tc_framerate;
    format->timecode_format.flags = timecode_flags_lqt_2_gavl(tc_flags);
    }
  
  if(encode)
    {
    if((lqt_get_file_type(file) & (LQT_FILE_AVI|LQT_FILE_AVI_ODML)))
      format->framerate_mode = GAVL_FRAMERATE_CONSTANT;
    }
  else
    {
    if(!constant_framerate)
      format->framerate_mode = GAVL_FRAMERATE_VARIABLE;
    else
      format->framerate_mode = GAVL_FRAMERATE_CONSTANT;
    }
  
  format->chroma_placement =
    chroma_placement_lqt_2_gavl(lqt_get_chroma_placement(file, track));
  
  format->interlace_mode =
    interlace_mode_lqt_2_gavl(lqt_get_interlace_mode(file, track));

  format->pixelformat = pixelformat_lqt_2_gavl(lqt_get_cmodel(file, track));
  
  return 1;
  }
Exemple #3
0
int lqt_gavl_encode_video(quicktime_t * file, int track,
                          gavl_video_frame_t * frame, uint8_t ** rows, int64_t pts_offset)
  {
  int i, height;
  int result;
  int tc_framerate;
  uint32_t tc_flags;
    
  /* Write timecode */

  if(lqt_has_timecode_track(file, track, &tc_flags, &tc_framerate) &&
     (frame->timecode != GAVL_TIMECODE_UNDEFINED))
    {
    gavl_timecode_format_t f;
    f.int_framerate = tc_framerate;
    f.flags         = timecode_flags_lqt_2_gavl(tc_flags);
    
    lqt_write_timecode(file, track,
                       gavl_timecode_to_framecount(&f, frame->timecode));
    
    }
  
  if(lqt_colormodel_is_planar(lqt_get_cmodel(file, track)))
    {
    lqt_set_row_span(file, track, frame->strides[0]);
    lqt_set_row_span_uv(file, track, frame->strides[1]);
#if LQT_BUILD >= LQT_MAKE_BUILD(1,1,2)
    if(frame->duration > 0)
      result = lqt_encode_video_d(file, frame->planes, track, frame->timestamp - pts_offset,
                                  frame->duration);
    else
#endif
      result = lqt_encode_video(file, frame->planes, track, frame->timestamp - pts_offset);

    }
  else
    {
    height = quicktime_video_height(file, track);
    for(i = 0; i < height; i++)
      {
      lqt_set_row_span(file, track, frame->strides[0]);
      rows[i] = frame->planes[0] + i * frame->strides[0];
      }
#if LQT_BUILD >= LQT_MAKE_BUILD(1,1,2)
    if(frame->duration > 0)
      result = lqt_encode_video_d(file, rows, track, frame->timestamp - pts_offset,
                                  frame->duration);
    else
#endif
      result = lqt_encode_video(file, rows, track, frame->timestamp - pts_offset);
    }
  return result;
  }
Exemple #4
0
int lqt_gavl_decode_video(quicktime_t * file, int track,
                          gavl_video_frame_t * frame, uint8_t ** rows)
  {
  int i, height;
  uint32_t tc;
  uint32_t tc_flags;
  int      tc_framerate;
  
  if(quicktime_video_position(file, track) >=
     quicktime_video_length(file, track))
    return 0;
  
  frame->timestamp = lqt_frame_time(file, track);

  if(lqt_has_timecode_track(file, track, &tc_flags,
                            &tc_framerate) &&
     lqt_read_timecode(file, track, &tc))
    {
    gavl_timecode_format_t f;
    f.int_framerate = tc_framerate;
    f.flags = timecode_flags_lqt_2_gavl(tc_flags);
    frame->timecode = gavl_timecode_from_framecount(&f, tc);
    }
  else
    frame->timecode = GAVL_TIMECODE_UNDEFINED;
  
  if(lqt_colormodel_is_planar(lqt_get_cmodel(file, track)))
    {
    lqt_set_row_span(file, track, frame->strides[0]);
    lqt_set_row_span_uv(file, track, frame->strides[1]);
    lqt_decode_video(file, frame->planes, track);
    }
  else
    {
    height = quicktime_video_height(file, track);
    for(i = 0; i < height; i++)
      {
      lqt_set_row_span(file, track, frame->strides[0]);
      rows[i] = frame->planes[0] + i * frame->strides[0];
      }
    lqt_decode_video(file, rows, track);
    }
  return 1;
  }
Exemple #5
0
int main(int argc, char *argv[])
{
	quicktime_t *file;
	FILE *output;
	int result = 0;
	int i, j;
	int64_t length;
	char string[1024], *prefix = 0, *input = 0;
	char *data = 0;
	int bytes = 0, old_bytes = 0;
	float output_rate = 0;
	float input_rate;
	int64_t input_frame;
	int64_t new_length;
	int width, height;
	int rgb_to_ppm = 0;

	if(argc < 3)
	{
		usage();
	}

	for(i = 1, j = 0; i < argc; i++)
	{
		if(!strcmp(argv[i], "-f"))
		{
			if(i + 1 < argc)
			{
				output_rate = atof(argv[++i]);
			}
			else
				usage();
		}
		else
		if(j == 0)
		{
			input = argv[i];
			j++;
		}
		else
		if(j == 1)
		{
			prefix = argv[i];
			j++;
		}
	}

	if(!prefix || !input) usage();

	if(!(file = quicktime_open(input, 1, 0)))
	{
		printf("Open failed\n");
		exit(1);
	}
	
	if(!quicktime_video_tracks(file))
	{
		printf("No video tracks.\n");
		exit(1);
	}
	
	if(quicktime_match_32(quicktime_video_compressor(file, 0), QUICKTIME_RAW))
	{
		printf("Converting to ppm.\n");
		rgb_to_ppm = 1;
	}

	length = quicktime_video_length(file, 0);
	input_rate = quicktime_frame_rate(file, 0);
	if(!output_rate) output_rate = input_rate;
	new_length = output_rate / input_rate * length;
	width = quicktime_video_width(file, 0);
	height = quicktime_video_height(file, 0);

	for(i = 0; i < new_length; i++)
	{
/* Get output file */
		sprintf(string, "%s%06d", prefix, i);
		if(!(output = fopen(string, "wb")))
		{
			perror("Open failed");
			exit(1);
		}

/* Get input frame */
		input_frame = (int64_t)(input_rate / output_rate * i);
		bytes = quicktime_frame_size(file, input_frame, 0);

		if(data)
		{
			if(bytes > old_bytes) { free(data); data = 0; }
		}

		if(!data)
		{
			old_bytes = bytes;
			data = malloc(bytes);
		}

		quicktime_set_video_position(file, input_frame, 0);
		quicktime_read_data(file, data, bytes);
		if(rgb_to_ppm)
		{
			fprintf(output, "P6\n%d %d\n%d\n", width, height, 0xff);
		}

		if(!fwrite(data, bytes, 1, output))
		{
			perror("write failed");
		}
		fclose(output);
	}

	quicktime_close(file);
}
Exemple #6
0
static int encode(quicktime_t *file, unsigned char **row_pointers, int track)
{
	int64_t offset = quicktime_position(file);
	quicktime_video_map_t *vtrack = &(file->vtracks[track]);
	quicktime_h264_codec_t *codec = ((quicktime_codec_t*)vtrack->codec)->priv;
	quicktime_trak_t *trak = vtrack->track;
	int width = quicktime_video_width(file, track);
	int height = quicktime_video_height(file, track);
	int w_2 = quicktime_quantize2(width);
// ffmpeg interprets the codec height as the presentation height
	int h_2 = quicktime_quantize2(height);
	int i;
	int result = 0;
	int bytes = 0;
	int is_keyframe = 0;
	int current_field = vtrack->current_position % codec->total_fields;
	quicktime_atom_t chunk_atom;
	unsigned char header[1024];
	int header_size = 0;
	int got_pps = 0;
	int got_sps = 0;
	quicktime_avcc_t *avcc = &trak->mdia.minf.stbl.stsd.table[0].avcc;






	pthread_mutex_lock(&h264_lock);

	if(!codec->encode_initialized[current_field])
	{
		codec->encode_initialized[current_field] = 1;
		codec->param.i_width = w_2;
		codec->param.i_height = h_2;
		codec->param.i_fps_num = quicktime_frame_rate_n(file, track);
		codec->param.i_fps_den = quicktime_frame_rate_d(file, track);

#if X264_BUILD >= 48
		codec->param.rc.i_rc_method = X264_RC_CQP;
#endif
// Reset quantizer if fixed bitrate
		x264_param_t default_params;
		x264_param_default(&default_params);
#if X264_BUILD < 48
		if(codec->param.rc.b_cbr)
#else
		if(codec->param.rc.i_qp_constant)
#endif
		{
			codec->param.rc.i_qp_constant = default_params.rc.i_qp_constant;
			codec->param.rc.i_qp_min = default_params.rc.i_qp_min;
			codec->param.rc.i_qp_max = default_params.rc.i_qp_max;
		}


		if(file->cpus > 1)
		{
			codec->param.i_threads = file->cpus;
		}

		codec->encoder[current_field] = x264_encoder_open(&codec->param);
		codec->pic[current_field] = calloc(1, sizeof(x264_picture_t));
//printf("encode 1 %d %d\n", codec->param.i_width, codec->param.i_height);
  		x264_picture_alloc(codec->pic[current_field],
			X264_CSP_I420,
			codec->param.i_width,
			codec->param.i_height);
	}






	codec->pic[current_field]->i_type = X264_TYPE_AUTO;
	codec->pic[current_field]->i_qpplus1 = 0;


	if(codec->header_only)
	{
		bzero(codec->pic[current_field]->img.plane[0], w_2 * h_2);
		bzero(codec->pic[current_field]->img.plane[1], w_2 * h_2 / 4);
		bzero(codec->pic[current_field]->img.plane[2], w_2 * h_2 / 4);
	}
	else
	if(file->color_model == BC_YUV420P)
	{
		memcpy(codec->pic[current_field]->img.plane[0], row_pointers[0], w_2 * h_2);
		memcpy(codec->pic[current_field]->img.plane[1], row_pointers[1], w_2 * h_2 / 4);
		memcpy(codec->pic[current_field]->img.plane[2], row_pointers[2], w_2 * h_2 / 4);
	}
	else
	{
//printf("encode 2 %p %p %p\n", codec->pic[current_field]->img.plane[0], codec->pic[current_field]->img.plane[1], codec->pic[current_field]->img.plane[2]);
		cmodel_transfer(0, /* Leave NULL if non existent */
			row_pointers,
			codec->pic[current_field]->img.plane[0], /* Leave NULL if non existent */
			codec->pic[current_field]->img.plane[1],
			codec->pic[current_field]->img.plane[2],
			row_pointers[0], /* Leave NULL if non existent */
			row_pointers[1],
			row_pointers[2],
			0,        /* Dimensions to capture from input frame */
			0,
			width,
			height,
			0,       /* Dimensions to project on output frame */
			0,
			width,
			height,
			file->color_model,
			BC_YUV420P,
			0,         /* When transfering BC_RGBA8888 to non-alpha this is the background color in 0xRRGGBB hex */
			width,       /* For planar use the luma rowspan */
			codec->pic[current_field]->img.i_stride[0]);

	}












    x264_picture_t pic_out;
    x264_nal_t *nals;
	int nnal = 0;
	do
	{
		x264_encoder_encode(codec->encoder[current_field],
			&nals,
			&nnal,
			codec->pic[current_field],
			&pic_out);
//printf("encode %d nnal=%d\n", __LINE__, nnal);
	} while(codec->header_only && !nnal);
	int allocation = w_2 * h_2 * 3;
	if(!codec->work_buffer)
	{
		codec->work_buffer = calloc(1, allocation);
	}

	codec->buffer_size = 0;
//printf("encode %d nnal=%d\n", __LINE__, nnal);
	for(i = 0; i < nnal; i++)
	{
#if X264_BUILD >= 76
                int size = nals[i].i_payload;
                memcpy(codec->work_buffer + codec->buffer_size,
			nals[i].p_payload,
			nals[i].i_payload);
#else
		int size_return = 0;
		int size = x264_nal_encode(codec->work_buffer + codec->buffer_size,
			&size_return,
			1,
			nals + i);
#endif
		unsigned char *ptr = codec->work_buffer + codec->buffer_size;

//printf("encode %d size=%d\n", __LINE__, size);
		if(size > 0)
		{
			if(size + codec->buffer_size > allocation)
			{
				printf("qth264.c %d: overflow size=%d allocation=%d\n",
					__LINE__,
					size,
					allocation);
			}

// Size of NAL for avc
			uint64_t avc_size = size - 4;

// Synthesize header.
// Hopefully all the parameter set NAL's are present in the first frame.
			if(!avcc->data_size)
			{
				if(header_size < 6)
				{
					header[header_size++] = 0x01;
					header[header_size++] = 0x4d;
					header[header_size++] = 0x40;
					header[header_size++] = 0x1f;
					header[header_size++] = 0xff;
					header[header_size++] = 0xe1;
				}

				int nal_type = (ptr[4] & 0x1f);
// Picture parameter or sequence parameter set
				if(nal_type == 0x7 && !got_sps)
				{
					got_sps = 1;
					header[header_size++] = (avc_size & 0xff00) >> 8;
					header[header_size++] = (avc_size & 0xff);
					memcpy(&header[header_size],
						ptr + 4,
						avc_size);
					header_size += avc_size;
				}
				else
				if(nal_type == 0x8 && !got_pps)
				{
					got_pps = 1;
// Number of sps nal's.
					header[header_size++] = 0x1;
					header[header_size++] = (avc_size & 0xff00) >> 8;
					header[header_size++] = (avc_size & 0xff);
					memcpy(&header[header_size],
						ptr + 4,
						avc_size);
					header_size += avc_size;
				}

// Write header
				if(got_sps && got_pps)
				{
/*
 * printf("encode %d\n", __LINE__);
 * int j;
 * for(j = 0; j < header_size; j++)
 * {
 * printf("%02x ", header[j]);
 * }
 * printf("\n");
 */
					quicktime_set_avcc_header(avcc,
		  				header,
		  				header_size);
				}
			}
void probe_mov(info_t *ipipe)
{

  quicktime_t *qt_file=NULL;
  char *codec=NULL;

  int j, tracks;

  /* open movie for video probe */
  if(qt_file==NULL)
    if(NULL == (qt_file = quicktime_open((char *)ipipe->name,1,0))){
      tc_log_error(__FILE__,"can't open quicktime!");
      ipipe->error=1;
      return;
    }

  // extract audio parameters
  tracks=quicktime_audio_tracks(qt_file);

  if(tracks>TC_MAX_AUD_TRACKS) {
    tc_log_warn(__FILE__, "only %d of %d audio tracks scanned",
                TC_MAX_AUD_TRACKS, tracks);
    tracks=TC_MAX_AUD_TRACKS;
  }

  for(j=0; j<tracks; ++j) {

    ipipe->probe_info->track[j].samplerate = quicktime_sample_rate(qt_file, j);
    ipipe->probe_info->track[j].chan = quicktime_track_channels(qt_file, j);
    ipipe->probe_info->track[j].bits = quicktime_audio_bits(qt_file, j);

    codec  = quicktime_audio_compressor(qt_file, j);

    if(strcasecmp(codec,QUICKTIME_RAW)==0 || strcasecmp(codec,QUICKTIME_TWOS)==0)
      ipipe->probe_info->track[j].format = CODEC_PCM;
    else if(strcasecmp(codec,QUICKTIME_IMA4)==0)
      ipipe->probe_info->track[j].format = CODEC_IMA4;
    else
      /* XXX not right but works */
      ipipe->probe_info->track[j].format = CODEC_PCM;

    if (! binary_dump)
    	tc_log_info(__FILE__, "audio codec=%s", codec);

    if(ipipe->probe_info->track[j].chan>0) ++ipipe->probe_info->num_tracks;
  }


  // read all video parameter from input file
  ipipe->probe_info->width  =  quicktime_video_width(qt_file, 0);
  ipipe->probe_info->height =  quicktime_video_height(qt_file, 0);
  ipipe->probe_info->fps = quicktime_frame_rate(qt_file, 0);

  ipipe->probe_info->frames = quicktime_video_length(qt_file, 0);

  codec  =  quicktime_video_compressor(qt_file, 0);

  //check for supported codecs

  if(codec!=NULL) {

    if(strlen(codec)==0) {
      ipipe->probe_info->codec=TC_CODEC_RGB;
    } else {

      if(strcasecmp(codec,QUICKTIME_DV)==0)
	ipipe->probe_info->codec=TC_CODEC_DV;

      if(strcasecmp(codec,"dvsd")==0)
	ipipe->probe_info->codec=TC_CODEC_DV;

      if(strcasecmp(codec,"DIV3")==0)
	ipipe->probe_info->codec=TC_CODEC_DIVX3;

      if(strcasecmp(codec,"DIVX")==0)
	ipipe->probe_info->codec=TC_CODEC_DIVX4;

      if(strcasecmp(codec,"DX50")==0)
	ipipe->probe_info->codec=TC_CODEC_DIVX5;

      if(strcasecmp(codec,"MJPG")==0 || strcasecmp(codec,"JPEG")==0)
	ipipe->probe_info->codec=TC_CODEC_MJPEG;

      if(strcasecmp(codec,"YUV2")==0)
	ipipe->probe_info->codec=TC_CODEC_YUV2;

      if(strcasecmp(codec,"SVQ1")==0)
	ipipe->probe_info->codec=TC_CODEC_SVQ1;

      if(strcasecmp(codec,"SVQ3")==0)
	ipipe->probe_info->codec=TC_CODEC_SVQ3;
    }
  } else
    ipipe->probe_info->codec=TC_CODEC_UNKNOWN;

  if (! binary_dump)
  	tc_log_info(__FILE__, "video codec=%s", codec);
  ipipe->probe_info->magic=TC_MAGIC_MOV;
  tc_frc_code_from_value(&(ipipe->probe_info->frc),
                         ipipe->probe_info->fps);

  return;
}
Exemple #8
0
int
cmdline_main (int argc, char *argv[])
{
    guchar *output;
    int num_frames = 1;
#ifdef MOVIES
    int generate_movie = 0;
    quicktime_t *output_movie;
    guchar **rows;
#endif
    int antialiasing = 0, supersampling = 0;
    int img_width, img_height;
    char *generator = 0;
    userval_info_t *userval_info;
    int num_input_drawables = 0;
    gboolean size_is_set = FALSE;
    char *script = NULL;
    char *output_filename;
    gboolean htmldoc = FALSE;
    define_t *defines = NULL;
    int bench_render_count = 1;
    int render_num;
    gboolean bench_no_output = FALSE;
    gboolean bench_no_backend = FALSE;
    int compile_time_limit = DEFAULT_OPTIMIZATION_TIMEOUT;

    for (;;)
    {
	static struct option long_options[] =
	    {
		{ "version", no_argument, 0, OPTION_VERSION },
		{ "help", no_argument, 0, OPTION_HELP },
		{ "intersampling", no_argument, 0, 'i' },
		{ "oversampling", no_argument, 0, 'o' },
		{ "cache", required_argument, 0, 'c' },
		{ "generator", required_argument, 0, 'g' },
		{ "size", required_argument, 0, 's' },
		{ "script-file", required_argument, 0, 'f' },
		{ "htmldoc", no_argument, 0, OPTION_HTMLDOC },
		{ "bench-no-output", no_argument, 0, OPTION_BENCH_NO_OUTPUT },
		{ "bench-only-compile", no_argument, 0, OPTION_BENCH_ONLY_COMPILE },
		{ "bench-no-compile-time-limit", no_argument, 0, OPTION_BENCH_NO_COMPILE_TIME_LIMIT },
		{ "bench-no-backend", no_argument, 0, OPTION_BENCH_NO_BACKEND },
		{ "bench-render-count", required_argument, 0, OPTION_BENCH_RENDER_COUNT },
#ifdef MOVIES
		{ "frames", required_argument, 0, 'F' },
		{ "movie", required_argument, 0, 'M' },
#endif
		{ 0, 0, 0, 0 }
	    };

	int option, option_index;

	option = getopt_long(argc, argv, 
#ifdef MOVIES
			     "f:ioF:D:M:c:g:s:", 
#else
			     "f:ioD:c:g:s:",
#endif
			     long_options, &option_index);

	if (option == -1)
	    break;

	switch (option)
	{
	    case OPTION_VERSION :
		printf("MathMap " MATHMAP_VERSION "\n"
		       "\n"
		       "Copyright (C) 1997-2009 Mark Probst\n"
		       "\n"
		       "This program is free software; you can redistribute it and/or modify\n"
		       "it under the terms of the GNU General Public License as published by\n"
		       "the Free Software Foundation; either version 2 of the License, or\n"
		       "(at your option) any later version.\n"
		       "\n"
		       "This program is distributed in the hope that it will be useful,\n"
		       "but WITHOUT ANY WARRANTY; without even the implied warranty of\n"
		       "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n"
		       "GNU General Public License for more details.\n"
		       "\n"
		       "You should have received a copy of the GNU General Public License\n"
		       "along with this program; if not, write to the Free Software\n"
		       "Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.\n");
		return 0;

	    case OPTION_HELP :
		usage();
		return 0;

	    case OPTION_HTMLDOC :
		htmldoc = TRUE;
		break;

	    case 'f' :
		if (!g_file_get_contents(optarg, &script, NULL, NULL))
		{
		    fprintf(stderr, _("Error: The script file `%s' could not be read.\n"), optarg);
		    return 1;
		}
		break;

	    case 'i' :
		antialiasing = 1;
		break;

	    case 'o' :
		supersampling = 1;
		break;

	    case 'c' :
		cache_size = atoi(optarg);
		assert(cache_size > 0);
		break;

	    case 'D' :
		append_define(optarg, &defines);
		break;

	    case 'I' :
		alloc_cmdline_image_input_drawable(optarg);
		break;

	    case 'g' :
		generator = optarg;
		break;

	    case 's' :
		if (!parse_image_size(optarg, &img_width, &img_height))
		{
		    fprintf(stderr, _("Error: Invalid image size.  Syntax is <width>x<height>.  Example: 1024x768.\n"));
		    exit(1);
		}
		size_is_set = 1;
		break;

	    case OPTION_BENCH_RENDER_COUNT :
		bench_render_count = atoi(optarg);
		break;

	    case OPTION_BENCH_ONLY_COMPILE :
		bench_render_count = 0;
		break;

	    case OPTION_BENCH_NO_OUTPUT :
		bench_no_output = TRUE;
		break;

	    case OPTION_BENCH_NO_COMPILE_TIME_LIMIT :
		compile_time_limit = -1;
		break;

	    case OPTION_BENCH_NO_BACKEND :
		bench_no_backend = TRUE;
		break;

#ifdef MOVIES
	    case 'F' :
		generate_movie = 1;
		num_frames = atoi(optarg);
		assert(num_frames > 0);
		break;

	    case 'M' :
		alloc_cmdline_movie_input_drawable(optarg);
		break;
#endif
	}
    }

    if (script != NULL)
    {
	if (argc - optind != 1)
	{
	    usage();
	    return 1;
	}

	output_filename = argv[optind];
    }
    else
    {
	if (argc - optind != 2)
	{
	    usage();
	    return 1;
	}

	script = argv[optind];
	output_filename = argv[optind + 1];
    }

    init_tags();
    init_builtins();
    init_macros();
    init_compiler();

    if (htmldoc)
    {
	mathmap_t *mathmap = parse_mathmap(script);
	filter_t *filter;
	FILE *out;

	if (mathmap == NULL)
	{
	    fprintf(stderr, _("Error: Could not read MathMap script: %s\n"), error_string);
	    return 1;
	}

	out = fopen(output_filename, "w");
	if (out == NULL)
	{
	    fprintf(stderr, _("Error: Cannot open file `%s' for writing: %s\n"),
		    output_filename, strerror(errno));
	    return 1;
	}

	for (filter = mathmap->filters; filter != NULL; filter = filter->next)
	{
	    if (filter->kind != FILTER_MATHMAP)
		continue;

	    if (!write_filter_html_doc(out, filter->v.mathmap.decl))
		return 1;
	}

	fclose(out);
    }
    else if (generator == 0)
    {
	char *support_paths[4];
	mathmap_t *mathmap;
	mathmap_invocation_t *invocation;
	int current_frame;

	support_paths[0] = g_strdup_printf("%s/mathmap", GIMPDATADIR);
	support_paths[1] = g_strdup_printf("%s/.gimp-2.6/mathmap", getenv("HOME"));
	support_paths[2] = g_strdup_printf("%s/.gimp-2.4/mathmap", getenv("HOME"));
	support_paths[3] = NULL;

	mathmap = compile_mathmap(script, support_paths, compile_time_limit, bench_no_backend);

	if (bench_no_backend)
	    return 0;

	if (mathmap == 0)
	{
	    fprintf(stderr, _("Error: %s\n"), error_string);
	    exit(1);
	}

	if (bench_render_count == 0)
	    return 0;

	if (!size_is_set)
	    for (userval_info = mathmap->main_filter->userval_infos;
		 userval_info != NULL;
		 userval_info = userval_info->next)
	    {
		define_t *define;
		unsigned char *image;

		if (userval_info->type != USERVAL_IMAGE)
		    continue;

		define = lookup_define(defines, userval_info->name);
		if (define == NULL)
		{
		    fprintf(stderr, _("Error: No value defined for input image `%s'.\n"), userval_info->name);
		    return 1;
		}

		image = read_image(define->value, &img_width, &img_height);
		if (image == NULL)
		{
		    fprintf(stderr, _("Error: Could not read input image `%s'.\n"), define->value);
		    return 1;
		}
		free(image);

		size_is_set = TRUE;

		break;
	    }

	if (!size_is_set)
	{
	    fprintf(stderr, _("Error: Image size not set and no input images given.\n"));
	    exit(1);
	}

	invocation = invoke_mathmap(mathmap, NULL, img_width, img_height, TRUE);

	for (userval_info = mathmap->main_filter->userval_infos;
	     userval_info != NULL;
	     userval_info = userval_info->next)
	{
	    userval_t *userval = &invocation->uservals[userval_info->index];
	    define_t *define = lookup_define(defines, userval_info->name);

	    if (define == NULL)
	    {
		if (userval_info->type == USERVAL_IMAGE)
		{
		    fprintf(stderr, _("Error: No value defined for input image `%s'.\n"), userval_info->name);
		    return 1;
		}
	    }
	    else
		switch (userval_info->type)
		{
		    case USERVAL_INT_CONST :
			userval->v.int_const = atoi(define->value);
			break;

		    case USERVAL_FLOAT_CONST :
			userval->v.float_const = g_ascii_strtod(define->value, NULL);
			break;

		    case USERVAL_BOOL_CONST :
			userval->v.bool_const = (float)atoi(define->value);
			break;

		    case USERVAL_IMAGE :
			assign_image_userval_drawable(userval_info, userval,
						      alloc_cmdline_image_input_drawable(define->value));
			++num_input_drawables;
			break;

		    default :
			fprintf(stderr, _("Error: Can only define user values for types int, float, bool and image.\n"));
			return 1;
		}
	}

	for (render_num = 0; render_num < bench_render_count; ++render_num)
	{
#ifdef MOVIES
	    for (i = 0; i < num_input_drawables; ++i)
		if (input_drawables[i].type == DRAWABLE_MOVIE)
		{
		    assert(quicktime_video_width(input_drawables[i].v.movie, 0) == img_width);
		    assert(quicktime_video_height(input_drawables[i].v.movie, 0) == img_height);
		}
#endif

	    invocation_set_antialiasing(invocation, antialiasing);
	    invocation->supersampling = supersampling;

	    invocation->output_bpp = 4;

	    output = (guchar*)malloc((long)invocation->output_bpp * (long)img_width * (long)img_height);
	    assert(output != 0);

#ifdef MOVIES
	    if (generate_movie)
	    {
		output_movie = quicktime_open(output_filename, 0, 1);
		assert(output_movie != 0);

		quicktime_set_video(output_movie, 1, img_width, img_height, 25, QUICKTIME_JPEG);
		assert(quicktime_supported_video(output_movie, 0));
		quicktime_seek_start(output_movie);

		rows = (guchar**)malloc(sizeof(guchar*) * img_height);
		for (i = 0; i < img_height; ++i)
		    rows[i] = output + img_width * invocation->output_bpp * i;
	    }
#endif

	    for (current_frame = 0; current_frame < num_frames; ++current_frame)
	    {
		float current_t = (float)current_frame / (float)num_frames;
		image_t *closure = closure_image_alloc(&invocation->mathfuncs,
						       NULL,
						       invocation->mathmap->main_filter->num_uservals,
						       invocation->uservals,
						       img_width, img_height);
		mathmap_frame_t *frame = invocation_new_frame(invocation, closure,
							      current_frame, current_t);

		call_invocation_parallel_and_join(frame, closure, 0, 0, img_width, img_height, output, 1);

		invocation_free_frame(frame);

#ifdef MOVIES
		if (generate_movie && !bench_no_output)
		{
		    fprintf(stderr, _("writing frame %d\n"), current_frame);
		    assert(quicktime_encode_video(output_movie, rows, 0) == 0);
		}
#endif

		closure_image_free(closure);
	    }

	    if (!bench_no_output)
	    {
#ifdef MOVIES
		if (generate_movie)
		    quicktime_close(output_movie);
		else
#endif
		    write_image(output_filename, img_width, img_height, output,
				invocation->output_bpp, img_width * invocation->output_bpp, IMAGE_FORMAT_PNG);
	    }

	    free(output);
	}
    }
    else
    {
	if (strcmp(generator, "blender") == 0)
	{
	    if (!blender_generate_plug_in(script, output_filename))
		return 1;
	}
	/*
	else if (strcmp(generator, "pixeltree") == 0)
	{
	    if (!pixeltree_generate_plug_in(argv[optind], argv[optind + 1]))
		return 1;
	}
	*/
	else
	{
	    fprintf(stderr, _("Unknown generator `%s'\n"), generator);
	    return 1;
	}
    }

    return 0;
}