コード例 #1
0
int BC_Bitmap::initialize(BC_WindowBase *parent_window,
                          int w,
                          int h,
                          int color_model,
                          int use_shm)
{
    this->parent_window = parent_window;
    this->top_level = parent_window->top_level;
    this->w = w;
    this->h = h;
    this->color_model = color_model;
    this->use_shm = use_shm ? parent_window->get_resources()->use_shm : 0;
    this->bg_color = parent_window->bg_color;
    ximage[0] = 0;
    xv_image[0] = 0;
    data[0] = 0;
    last_pixmap_used = 0;
    last_pixmap = 0;
    current_ringbuffer = 0;
// Set ring buffers based on total memory used.
// The program icon must use multiple buffers but larger bitmaps may not fit
// in memory.
    int pixelsize = cmodel_calculate_pixelsize(color_model);
    int buffer_size = w * h * pixelsize;

    if(buffer_size < 0x40000)
        ring_buffers = 4;
    else
        ring_buffers = 1;

    allocate_data();
    return 0;
}
コード例 #2
0
ファイル: colormodels.c プロジェクト: beequ7et/cinelerra-cv
int cmodel_calculate_datasize(int w, int h, int bytes_per_line, int color_model)
{
	if(bytes_per_line < 0) bytes_per_line = w * 
		cmodel_calculate_pixelsize(color_model);
	switch(color_model)
	{
		case BC_YUV420P:
		case BC_YUV411P:
			return w * h + w * h / 2 + 4;
			break;

		case BC_YUV422P:
			return w * h * 2 + 4;
			break;

		case BC_YUV444P:
			return w * h * 3 + 4;
			break;

		default:
			return h * bytes_per_line + 4;
			break;
	}
	return 0;
}
コード例 #3
0
ファイル: colormodels.c プロジェクト: beequ7et/cinelerra-cv
void cmodel_transfer(unsigned char **output_rows, 
	unsigned char **input_rows,
	unsigned char *out_y_plane,
	unsigned char *out_u_plane,
	unsigned char *out_v_plane,
	unsigned char *in_y_plane,
	unsigned char *in_u_plane,
	unsigned char *in_v_plane,
	int in_x, 
	int in_y, 
	int in_w, 
	int in_h,
	int out_x, 
	int out_y, 
	int out_w, 
	int out_h,
	int in_colormodel, 
	int out_colormodel,
	int bg_color,
	int in_rowspan,
	int out_rowspan)
{
	int *column_table;
	int *row_table;
	int scale;
	int bg_r, bg_g, bg_b;
	int in_pixelsize = cmodel_calculate_pixelsize(in_colormodel);
	int out_pixelsize = cmodel_calculate_pixelsize(out_colormodel);

	bg_r = (bg_color & 0xff0000) >> 16;
	bg_g = (bg_color & 0xff00) >> 8;
	bg_b = (bg_color & 0xff);

// Initialize tables
	if(yuv_table == 0)
	{
		yuv_table = calloc(1, sizeof(cmodel_yuv_t));
		cmodel_init_yuv(yuv_table);
	}

// Get scaling
	scale = (out_w != in_w) || (in_x != 0);
	get_scale_tables(&column_table, &row_table, 
		in_x, in_y, in_x + in_w, in_y + in_h,
		out_x, out_y, out_x + out_w, out_y + out_h);

/*
 * printf("cmodel_transfer 1 %d %d %d,%d %d,%d %d,%d %d,%d\n", 
 * in_colormodel, 
 * out_colormodel, 
 * out_x, 
 * out_y, 
 * out_w, 
 * out_h, 
 * in_x, 
 * in_y, 
 * in_w, 
 * in_h);
 */


#define PERMUTATION_VALUES \
	output_rows,  \
	input_rows, \
	out_y_plane, \
	out_u_plane, \
	out_v_plane, \
	in_y_plane, \
	in_u_plane, \
	in_v_plane, \
	in_x,  \
	in_y,  \
	in_w,  \
	in_h, \
	out_x,  \
	out_y,  \
	out_w,  \
	out_h, \
	in_colormodel,  \
	out_colormodel, \
	bg_color, \
	in_rowspan, \
	out_rowspan, \
	scale, \
	out_pixelsize, \
	in_pixelsize, \
	row_table, \
	column_table, \
	bg_r, \
	bg_g, \
	bg_b

// Handle planar cmodels separately
	switch(in_colormodel)
	{
		case BC_RGB_FLOAT:
		case BC_RGBA_FLOAT:
			cmodel_float(PERMUTATION_VALUES);
			break;

		case BC_YUV420P:
		case BC_YUV422P:
			cmodel_yuv420p(PERMUTATION_VALUES);
			break;

		case BC_YUV9P:
			cmodel_yuv9p(PERMUTATION_VALUES);
			break;

		case BC_YUV444P:
			cmodel_yuv444p(PERMUTATION_VALUES);
			break;

		case BC_YUV422:
			cmodel_yuv422(PERMUTATION_VALUES);
			break;

		default:
			cmodel_default(PERMUTATION_VALUES);
			break;
	}

/*
 * printf("cmodel_transfer 100 %d %d\n", 
 * in_colormodel, 
 * out_colormodel);
 */

	free(column_table);
	free(row_table);
}
コード例 #4
0
int FFMPEG::convert_cmodel(AVPicture *picture_in, PixelFormat pix_fmt_in,
                           int width_in, int height_in, VFrame *frame_out) {

    // set up a temporary picture_out from frame_out
    AVPicture picture_out;
    init_picture_from_frame(&picture_out, frame_out);
    int cmodel_out = frame_out->get_color_model();
    PixelFormat pix_fmt_out = color_model_to_pix_fmt(cmodel_out);

#ifdef HAVE_SWSCALER
    // We need a context for swscale
    struct SwsContext *convert_ctx;
#endif
    int result;
#ifndef HAVE_SWSCALER
    // do conversion within libavcodec if possible
    if (pix_fmt_out != PIX_FMT_NB) {
        result = img_convert(&picture_out,
                             pix_fmt_out,
                             picture_in,
                             pix_fmt_in,
                             width_in,
                             height_in);
        if (result) {
            printf("FFMPEG::convert_cmodel img_convert() failed\n");
        }
        return result;
    }
#else
    convert_ctx = sws_getContext(width_in, height_in,pix_fmt_in,
                                 frame_out->get_w(),frame_out->get_h(),pix_fmt_out,
                                 SWS_BICUBIC, NULL, NULL, NULL);

    if(convert_ctx == NULL) {
        printf("FFMPEG::convert_cmodel : swscale context initialization failed\n");
        return 1;
    }

    result = sws_scale(convert_ctx,
                       picture_in->data, picture_in->linesize,
                       width_in, height_in,
                       picture_out.data, picture_out.linesize);


    sws_freeContext(convert_ctx);

    if(result) {
        printf("FFMPEG::convert_cmodel sws_scale() failed\n");
    }
#endif

    // make an intermediate temp frame only if necessary
    int cmodel_in = pix_fmt_to_color_model(pix_fmt_in);
    if (cmodel_in == BC_TRANSPARENCY) {
        if (pix_fmt_in == PIX_FMT_RGB32) {
            // avoid infinite recursion if things are broken
            printf("FFMPEG::convert_cmodel pix_fmt_in broken!\n");
            return 1;
        }

        // NOTE: choose RGBA8888 as a hopefully non-lossy colormodel
        VFrame *temp_frame = new VFrame(0, width_in, height_in,
                                        BC_RGBA8888);
        if (convert_cmodel(picture_in, pix_fmt_in,
                           width_in, height_in, temp_frame)) {
            delete temp_frame;
            return 1;  // recursed call will print error message
        }

        int result = convert_cmodel(temp_frame, frame_out);
        delete temp_frame;
        return result;
    }


    // NOTE: no scaling possible in img_convert() so none possible here
    if (frame_out->get_w() != width_in ||
            frame_out->get_h() != height_in) {
        printf("scaling from %dx%d to %dx%d not allowed\n",
               width_in, height_in,
               frame_out->get_w(), frame_out->get_h());
        return 1;
    }


    // if we reach here we know that cmodel_transfer() will work
    uint8_t *yuv_in[3] = {0,0,0};
    uint8_t *row_pointers_in[height_in];
    if (cmodel_is_planar(cmodel_in)) {
        yuv_in[0] = picture_in->data[0];
        yuv_in[1] = picture_in->data[1];
        yuv_in[2] = picture_in->data[2];
    }
    else {
        // set row pointers for picture_in
        uint8_t *data = picture_in->data[0];
        int bytes_per_line =
            cmodel_calculate_pixelsize(cmodel_in) * height_in;
        for (int i = 0; i < height_in; i++) {
            row_pointers_in[i] = data + i * bytes_per_line;
        }
    }

    cmodel_transfer
    (// Packed data out
        frame_out->get_rows(),
        // Packed data in
        row_pointers_in,

        // Planar data out
        frame_out->get_y(), frame_out->get_u(), frame_out->get_v(),
        // Planar data in
        yuv_in[0], yuv_in[1], yuv_in[2],

        // Dimensions in
        0, 0, width_in, height_in,  // NOTE: dimensions are same
        // Dimensions out
        0, 0, width_in, height_in,

        // Color model in, color model out
        cmodel_in, cmodel_out,

        // Background color
        0,

        // Rowspans in, out (of luma for YUV)
        width_in, width_in

    );

    return 0;
}
コード例 #5
0
int FileFFMPEG::read_frame(VFrame *frame)
{
	int error = 0;
	const int debug = 0;

	ffmpeg_lock->lock("FileFFMPEG::read_frame");
	
	
	FileFFMPEGStream *stream = video_streams.get(0);
	if(debug) printf("FileFFMPEG::read_frame %d stream=%p stream->ffmpeg_file_contex=%p\n", 
		__LINE__, 
		stream,
		stream->ffmpeg_file_context);
	AVStream *ffmpeg_stream = ((AVFormatContext*)stream->ffmpeg_file_context)->streams[stream->index];
	AVCodecContext *decoder_context = ffmpeg_stream->codec;

	if(debug) printf("FileFFMPEG::read_frame %d\n", __LINE__);

// if(file->current_frame == 100)
// {
// printf("FileFFMPEG::read_frame %d fake crash\n", __LINE__);
// 	exit(1);
// }


//dump_context(stream->codec);
	if(stream->first_frame)
	{
		stream->first_frame = 0;
		int got_it = 0;

		while(!got_it && !error)
		{
			AVPacket packet;
			if(debug) printf("FileFFMPEG::read_frame %d\n", __LINE__);
			error = av_read_frame((AVFormatContext*)stream->ffmpeg_file_context, 
				&packet);
			if(debug) printf("FileFFMPEG::read_frame %d\n", __LINE__);

			if(!error && packet.size > 0)
			{
				if(packet.stream_index == stream->index)
				{

					if(!ffmpeg_frame)
						ffmpeg_frame = avcodec_alloc_frame();
					int got_picture = 0;

					if(debug) printf("FileFFMPEG::read_frame %d\n", __LINE__);

                	avcodec_get_frame_defaults((AVFrame*)ffmpeg_frame);
					if(debug) printf("FileFFMPEG::read_frame %d decoder_context=%p ffmpeg_frame=%p\n", 
						__LINE__,
						decoder_context,
						ffmpeg_frame);

		        	int result = avcodec_decode_video(
						decoder_context,
                    	(AVFrame*)ffmpeg_frame, 
						&got_picture,
                    	packet.data, 
						packet.size);
					if(debug) printf("FileFFMPEG::read_frame %d\n", __LINE__);
					if(((AVFrame*)ffmpeg_frame)->data[0] && got_picture) got_it = 1;
					if(debug) printf("FileFFMPEG::read_frame %d\n", __LINE__);
				}
			}

			av_free_packet(&packet);
		}

		error = 0;
	}
	if(debug) printf("FileFFMPEG::read_frame %d\n", __LINE__);

#define SEEK_THRESHOLD 16

// printf("FileFFMPEG::read_frame %d current_frame=%lld file->current_frame=%lld\n", 
// __LINE__, 
// current_frame,
// file->current_frame);
	if(stream->current_frame != file->current_frame &&
		(file->current_frame < stream->current_frame ||
		file->current_frame > stream->current_frame + SEEK_THRESHOLD))
	{
		if(debug) printf("FileFFMPEG::read_frame %d stream->current_frame=%lld file->current_frame=%lld\n", 
		__LINE__, 
		(long long)stream->current_frame, 
		(long long)file->current_frame);

		int64_t timestamp = (int64_t)((double)file->current_frame * 
			ffmpeg_stream->time_base.den /
			ffmpeg_stream->time_base.num /
			asset->frame_rate);
// Want to seek to the nearest keyframe and read up to the current frame
// but ffmpeg doesn't support that kind of precision.
// Also, basing all the seeking on the same stream seems to be required for synchronization.
		av_seek_frame((AVFormatContext*)stream->ffmpeg_file_context, 
			/* stream->index */ 0, 
			timestamp, 
			AVSEEK_FLAG_ANY);
		stream->current_frame = file->current_frame - 1;
	}
	if(debug) printf("FileFFMPEG::read_frame %d\n", __LINE__);

	int got_it = 0;
// Read frames until we catch up to the current position.
// 	if(current_frame >= file->current_frame - SEEK_THRESHOLD &&
// 		current_frame < file->current_frame - 1)
// 	{
// 		printf("FileFFMPEG::read_frame %d current_frame=%lld file->current_frame=%lld\n", 
// 			__LINE__,
// 			current_frame,
// 			file->current_frame);
// 	}



	while(stream->current_frame < file->current_frame && !error)
	{
		got_it = 0;
		if(debug) printf("FileFFMPEG::read_frame %d stream->current_frame=%lld file->current_frame=%lld\n", 
			__LINE__,
			(long long)stream->current_frame,
			(long long)file->current_frame);

		while(!got_it && !error)
		{
			AVPacket packet;

			error = av_read_frame((AVFormatContext*)stream->ffmpeg_file_context, 
				&packet);

			if(!error && packet.size > 0)
			{
				if(packet.stream_index == stream->index)
				{

					if(!ffmpeg_frame)
						ffmpeg_frame = avcodec_alloc_frame();
					int got_picture = 0;
                	avcodec_get_frame_defaults((AVFrame*)ffmpeg_frame);


// printf("FileFFMPEG::read_frame %d current_frame=%lld ffmpeg_frame=%p packet.data=%p packet.size=%d\n",
// __LINE__,
// file->current_frame, 
// ffmpeg_frame, 
// packet.data, 
// packet.size);
// for(int i = 0; i < decoder_context->extradata_size; i++)
// printf("0x%02x, ", decoder_context->extradata[i]);
// printf("\n");
// 
// if(file->current_frame >= 200 && file->current_frame < 280)
// {
// char string[1024];
// sprintf(string, "/tmp/debug%03lld", file->current_frame);
// FILE *out = fopen(string, "w");
// fwrite(packet.data, packet.size, 1, out);
// fclose(out);
// }


		        	int result = avcodec_decode_video(
						decoder_context,
                    	(AVFrame*)ffmpeg_frame, 
						&got_picture,
                    	packet.data, 
						packet.size);


//printf("FileFFMPEG::read_frame %d result=%d\n", __LINE__, result);
					if(((AVFrame*)ffmpeg_frame)->data[0] && got_picture) got_it = 1;
//printf("FileFFMPEG::read_frame %d result=%d got_it=%d\n", __LINE__, result, got_it);
				}
			}
			
			
			av_free_packet(&packet);
		}

		if(got_it) stream->current_frame++;
	}

//PRINT_TRACE
// printf("FileFFMPEG::read_frame %d current_frame=%lld file->current_frame=%lld got_it=%d\n", 
// __LINE__, 
// current_frame,
// file->current_frame,
// got_it);
	if(debug) printf("FileFFMPEG::read_frame %d\n", __LINE__);

// Convert colormodel
	if(got_it)
	{
		int input_cmodel;
		AVFrame *input_frame = (AVFrame*)ffmpeg_frame;

		switch(decoder_context->pix_fmt)
		{
			case PIX_FMT_YUV420P:
				input_cmodel = BC_YUV420P;
				break;
#ifndef FFMPEG_2010
			case PIX_FMT_YUV422:
				input_cmodel = BC_YUV422;
				break;
#endif

			case PIX_FMT_YUV422P:
				input_cmodel = BC_YUV422P;
				break;
			case PIX_FMT_YUV410P:
				input_cmodel = BC_YUV9P;
				break;
			default:
				fprintf(stderr, 
					"quicktime_ffmpeg_decode: unrecognized color model %d\n", 
					decoder_context->pix_fmt);
				input_cmodel = BC_YUV420P;
				break;
		}



		unsigned char **input_rows = 
			(unsigned char**)malloc(sizeof(unsigned char*) * 
			decoder_context->height);


		for(int i = 0; i < decoder_context->height; i++)
			input_rows[i] = input_frame->data[0] + 
				i * 
				decoder_context->width * 
				cmodel_calculate_pixelsize(input_cmodel);


		cmodel_transfer(frame->get_rows(), /* Leave NULL if non existent */
			input_rows,
			frame->get_y(), /* Leave NULL if non existent */
			frame->get_u(),
			frame->get_v(),
			input_frame->data[0], /* Leave NULL if non existent */
			input_frame->data[1],
			input_frame->data[2],
			0,        /* Dimensions to capture from input frame */
			0, 
			decoder_context->width, 
			decoder_context->height,
			0,       /* Dimensions to project on output frame */
			0, 
			frame->get_w(), 
			frame->get_h(),
			input_cmodel, 
			frame->get_color_model(),
			0,         /* When transfering BC_RGBA8888 to non-alpha this is the background color in 0xRRGGBB hex */
			input_frame->linesize[0],       /* For planar use the luma rowspan */
			frame->get_w());
		free(input_rows);
	}
//PRINT_TRACE


	ffmpeg_lock->unlock();
	if(debug) printf("FileFFMPEG::read_frame %d\n", __LINE__);
	return error;
}
コード例 #6
0
ファイル: qtdv.c プロジェクト: knutj/cinelerra
static int decode(quicktime_t *file, unsigned char **row_pointers, int track)
{
    long bytes;
    quicktime_video_map_t *vtrack = &(file->vtracks[track]);
    quicktime_dv_codec_t *codec = ((quicktime_codec_t*)vtrack->codec)->priv;
    int width = vtrack->track->tkhd.track_width;
    int height = vtrack->track->tkhd.track_height;
    int result = 0;
    int i;
    int decode_colormodel = 0;
    int pitches[3] = { 720 * 2, 0, 0 };


    quicktime_set_video_position(file, vtrack->current_position, track);
    bytes = quicktime_frame_size(file, vtrack->current_position, track);
    result = !quicktime_read_data(file, (char*)codec->data, bytes);

    if( codec->dv_decoder && codec->parameters_changed )
    {
        dv_decoder_free( codec->dv_decoder );
        codec->dv_decoder = NULL;
        codec->parameters_changed = 0;
    }

    if( ! codec->dv_decoder )
    {
        pthread_mutex_lock( &libdv_init_mutex );


        codec->dv_decoder = dv_decoder_new( codec->add_ntsc_setup,
                                            codec->clamp_luma,
                                            codec->clamp_chroma );
        codec->dv_decoder->prev_frame_decoded = 0;

        codec->parameters_changed = 0;
        pthread_mutex_unlock( &libdv_init_mutex );
    }

    if(codec->dv_decoder)
    {
        int is_sequential =
            check_sequentiality( row_pointers,
                                 720 * cmodel_calculate_pixelsize(file->color_model),
                                 file->out_h );

        codec->dv_decoder->quality = codec->decode_quality;

        dv_parse_header( codec->dv_decoder, codec->data );

// Libdv improperly decodes RGB colormodels.
        if((file->color_model == BC_YUV422 ||
                file->color_model == BC_RGB888) &&
                file->in_x == 0 &&
                file->in_y == 0 &&
                file->in_w == width &&
                file->in_h == height &&
                file->out_w == width &&
                file->out_h == height &&
                is_sequential)
        {
            if( file->color_model == BC_YUV422 )
            {
                pitches[0] = 720 * 2;
                dv_decode_full_frame( codec->dv_decoder, codec->data,
                                      e_dv_color_yuv, row_pointers,
                                      pitches );
            }
            else if( file->color_model == BC_RGB888)
            {
                pitches[0] = 720 * 3;
                dv_decode_full_frame( codec->dv_decoder, codec->data,
                                      e_dv_color_rgb, row_pointers,
                                      pitches );
            }
        }
        else
        {
            if(!codec->temp_frame)
            {
                codec->temp_frame = malloc(720 * 576 * 2);
                codec->temp_rows = malloc(sizeof(unsigned char*) * 576);
                for(i = 0; i < 576; i++)
                    codec->temp_rows[i] = codec->temp_frame + 720 * 2 * i;
            }

            decode_colormodel = BC_YUV422;
            pitches[0] = 720 * 2;
            dv_decode_full_frame( codec->dv_decoder, codec->data,
                                  e_dv_color_yuv, codec->temp_rows,
                                  pitches );




            cmodel_transfer(row_pointers,
                            codec->temp_rows,
                            row_pointers[0],
                            row_pointers[1],
                            row_pointers[2],
                            codec->temp_rows[0],
                            codec->temp_rows[1],
                            codec->temp_rows[2],
                            file->in_x,
                            file->in_y,
                            file->in_w,
                            file->in_h,
                            0,
                            0,
                            file->out_w,
                            file->out_h,
                            decode_colormodel,
                            file->color_model,
                            0,
                            width,
                            file->out_w);
        }
    }

//printf(__FUNCTION__ " 2\n");
    return result;
}
コード例 #7
0
ファイル: qtdv.c プロジェクト: knutj/cinelerra
static int encode(quicktime_t *file, unsigned char **row_pointers, int track)
{
    //int64_t offset = quicktime_position(file);
    quicktime_video_map_t *vtrack = &(file->vtracks[track]);
    quicktime_dv_codec_t *codec = ((quicktime_codec_t*)vtrack->codec)->priv;
    quicktime_trak_t *trak = vtrack->track;
    int width = trak->tkhd.track_width;
    int height = trak->tkhd.track_height;
    int width_i = 720;
    int height_i = (height <= 480) ? 480 : 576;
    int i;
    unsigned char **input_rows;
    int is_pal = (height_i == 480) ? 0 : 1;
    int data_length = is_pal ? DV_PAL_SIZE : DV_NTSC_SIZE;
    int result = 0;
    dv_color_space_t encode_dv_colormodel = 0;
    quicktime_atom_t chunk_atom;

    if( codec->dv_encoder != NULL && codec->parameters_changed )
    {
        dv_encoder_free( codec->dv_encoder );
        codec->dv_encoder = NULL;
        codec->parameters_changed = 0;
    }

    if( ! codec->dv_encoder )
    {
        pthread_mutex_lock( &libdv_init_mutex );

        //printf( "dv.c encode: Alloc'ing encoder\n" );

        codec->dv_encoder = dv_encoder_new( codec->rem_ntsc_setup,
                                            codec->clamp_luma,
                                            codec->clamp_chroma );

        codec->parameters_changed = 0;
        pthread_mutex_unlock( &libdv_init_mutex );
    }

    if(codec->dv_encoder)
    {
        int is_sequential =
            check_sequentiality( row_pointers,
                                 width_i * cmodel_calculate_pixelsize(file->color_model),
                                 height );

        if( ( file->color_model == BC_YUV422
                || file->color_model == BC_RGB888 ) &&
                width == width_i &&
                height == height_i &&
                is_sequential )
        {
            input_rows = row_pointers;
            switch( file->color_model )
            {
            case BC_YUV422:
                encode_dv_colormodel = e_dv_color_yuv;
//printf( "dv.c encode: e_dv_color_yuv\n" );
                break;
            case BC_RGB888:
                encode_dv_colormodel = e_dv_color_rgb;
//printf( "dv.c encode: e_dv_color_rgb\n" );
                break;
            default:
                return 0;
                break;
            }
        }
        else
        {
// The best colormodel for encoding is YUV 422

            if(!codec->temp_frame)
            {
                codec->temp_frame = malloc(720 * 576 * 2);
                codec->temp_rows = malloc(sizeof(unsigned char*) * 576);
                for(i = 0; i < 576; i++)
                    codec->temp_rows[i] = codec->temp_frame + 720 * 2 * i;
            }

            cmodel_transfer(codec->temp_rows, /* Leave NULL if non existent */
                            row_pointers,
                            codec->temp_rows[0], /* Leave NULL if non existent */
                            codec->temp_rows[1],
                            codec->temp_rows[2],
                            row_pointers[0], /* Leave NULL if non existent */
                            row_pointers[1],
                            row_pointers[2],
                            0,   /* Dimensions to capture from input frame */
                            0,
                            MIN(width, width_i),
                            MIN(height, height_i),
                            0,   /* Dimensions to project on output frame */
                            0,
                            MIN(width, width_i),
                            MIN(height, height_i),
                            file->color_model,
                            BC_YUV422,
                            0,    /* When transfering BC_RGBA8888 to non-alpha this is the background color in 0xRRGGBB hex */
                            width,  /* For planar use the luma rowspan */
                            width_i);


            input_rows = codec->temp_rows;
            encode_dv_colormodel = e_dv_color_yuv;
        }

// Setup the encoder
        codec->dv_encoder->is16x9 = codec->anamorphic16x9;
        codec->dv_encoder->vlc_encode_passes = codec->vlc_encode_passes;
        codec->dv_encoder->static_qno = 0;
        codec->dv_encoder->force_dct = DV_DCT_AUTO;
        codec->dv_encoder->isPAL = is_pal;


//printf("dv.c encode: 1 %d %d %d\n", width_i, height_i, encode_dv_colormodel);
        dv_encode_full_frame( codec->dv_encoder,
                              input_rows, encode_dv_colormodel, codec->data );
//printf("dv.c encode: 2 %d %d\n", width_i, height_i);

        quicktime_write_chunk_header(file, trak, &chunk_atom);
        result = !quicktime_write_data(file, (char*)codec->data, data_length);
        quicktime_write_chunk_footer(file,
                                     trak,
                                     vtrack->current_chunk,
                                     &chunk_atom,
                                     1);
        vtrack->current_chunk++;
//printf("encode 3\n");
    }

    return result;
}
コード例 #8
0
ファイル: jpeg.c プロジェクト: knutj/cinelerra
static int decode(quicktime_t *file,
                  unsigned char **row_pointers,
                  int track)
{
    quicktime_video_map_t *vtrack = &(file->vtracks[track]);
    initialize(vtrack);
    quicktime_jpeg_codec_t *codec = ((quicktime_codec_t*)vtrack->codec)->priv;
    quicktime_trak_t *trak = vtrack->track;
    mjpeg_t *mjpeg = codec->mjpeg;
    long size, field2_offset = 0;
    int track_height = trak->tkhd.track_height;
    int track_width = trak->tkhd.track_width;
    int result = 0;
    int field_dominance = trak->mdia.minf.stbl.stsd.table[0].field_dominance;

    mjpeg_set_cpus(codec->mjpeg, file->cpus);
    if(file->row_span)
        mjpeg_set_rowspan(codec->mjpeg, file->row_span);
    else
        mjpeg_set_rowspan(codec->mjpeg, 0);

    quicktime_set_video_position(file, vtrack->current_position, track);
    size = quicktime_frame_size(file, vtrack->current_position, track);
    codec->buffer_size = size;

    if(size > codec->buffer_allocated)
    {
        codec->buffer_allocated = size;
        codec->buffer = realloc(codec->buffer, codec->buffer_allocated);
    }

    result = !quicktime_read_data(file, (char*)codec->buffer, size);
    /*
     * printf("decode 1 %02x %02x %02x %02x %02x %02x %02x %02x\n",
     * codec->buffer[0],
     * codec->buffer[1],
     * codec->buffer[2],
     * codec->buffer[3],
     * codec->buffer[4],
     * codec->buffer[5],
     * codec->buffer[6],
     * codec->buffer[7]
     * );
     */

    if(!result)
    {
        if(mjpeg_get_fields(mjpeg) == 2)
        {
            if(file->use_avi)
            {
                field2_offset = mjpeg_get_avi_field2(codec->buffer,
                                                     size,
                                                     &field_dominance);
            }
            else
            {
                field2_offset = mjpeg_get_quicktime_field2(codec->buffer,
                                size);
// Sanity check
                if(!field2_offset)
                {
                    printf("decode: FYI field2_offset=0\n");
                    field2_offset = mjpeg_get_field2(codec->buffer, size);
                }
            }
        }
        else
            field2_offset = 0;


//printf("decode 2 %d\n", field2_offset);
        /*
         * printf("decode result=%d field1=%llx field2=%llx size=%d %02x %02x %02x %02x\n",
         * result,
         * quicktime_position(file) - size,
         * quicktime_position(file) - size + field2_offset,
         * size,
         * codec->buffer[0],
         * codec->buffer[1],
         * codec->buffer[field2_offset + 0],
         * codec->buffer[field2_offset + 1]);
         */

        if(file->in_x == 0 &&
                file->in_y == 0 &&
                file->in_w == track_width &&
                file->in_h == track_height &&
                file->out_w == track_width &&
                file->out_h == track_height)
        {
            mjpeg_decompress(codec->mjpeg,
                             codec->buffer,
                             size,
                             field2_offset,
                             row_pointers,
                             row_pointers[0],
                             row_pointers[1],
                             row_pointers[2],
                             file->color_model,
                             file->cpus);
        }
        else
        {
            int i;
            unsigned char **temp_rows;
            int temp_cmodel = BC_YUV888;
            int temp_rowsize = cmodel_calculate_pixelsize(temp_cmodel) * track_width;

            if(!codec->temp_video)
                codec->temp_video = malloc(temp_rowsize * track_height);
            temp_rows = malloc(sizeof(unsigned char*) * track_height);
            for(i = 0; i < track_height; i++)
                temp_rows[i] = codec->temp_video + i * temp_rowsize;

//printf("decode 10\n");
            mjpeg_decompress(codec->mjpeg,
                             codec->buffer,
                             size,
                             field2_offset,
                             temp_rows,
                             temp_rows[0],
                             temp_rows[1],
                             temp_rows[2],
                             temp_cmodel,
                             file->cpus);

            cmodel_transfer(row_pointers,
                            temp_rows,
                            row_pointers[0],
                            row_pointers[1],
                            row_pointers[2],
                            temp_rows[0],
                            temp_rows[1],
                            temp_rows[2],
                            file->in_x,
                            file->in_y,
                            file->in_w,
                            file->in_h,
                            0,
                            0,
                            file->out_w,
                            file->out_h,
                            temp_cmodel,
                            file->color_model,
                            0,
                            track_width,
                            file->out_w);

//printf("decode 30\n");
            free(temp_rows);

//printf("decode 40\n");
        }
    }
//printf("decode 2 %d\n", result);

    return result;
}