コード例 #1
0
ファイル: yuv2.c プロジェクト: knutj/cinelerra
static int encode(quicktime_t *file, unsigned char **row_pointers, int track)
{
	int bytes;
	quicktime_video_map_t *vtrack = &(file->vtracks[track]);
	quicktime_trak_t *trak = vtrack->track;
	quicktime_yuv2_codec_t *codec = ((quicktime_codec_t*)vtrack->codec)->priv;
	int result = 1;
	int width = vtrack->track->tkhd.track_width;
	int height = vtrack->track->tkhd.track_height;
	quicktime_atom_t chunk_atom;
	initialize(vtrack, codec, width, height);
	bytes = height * codec->bytes_per_line;

	cmodel_transfer(codec->rows, row_pointers,
		0, 0, 0,
		row_pointers[0], row_pointers[1], row_pointers[2],
		0, 0, width, height,
		0, 0, width, height,
		file->color_model, BC_YUV422,
		0, width, codec->coded_w);

	if( codec->is_2vuy )
       		convert_encode_2vuy(codec);
	else
		convert_encode_yuv2(codec);

	quicktime_write_chunk_header(file, trak, &chunk_atom);
	result = !quicktime_write_data(file, (char*)codec->work_buffer, bytes);
	quicktime_write_chunk_footer(file, trak,
		vtrack->current_chunk, &chunk_atom, 1);

	vtrack->current_chunk++;
	return result;
}
コード例 #2
0
int FFMPEG::convert_cmodel_transfer(VFrame *frame_in, VFrame *frame_out) {

    // WARNING: cmodel_transfer is said to be broken with BC_YUV411P
    cmodel_transfer
    (// Packed data out
        frame_out->get_rows(),
        // Packed data in
        frame_in->get_rows(),

        // Planar data out
        frame_out->get_y(), frame_out->get_u(), frame_out->get_v(),
        // Planar data in
        frame_in->get_y(), frame_in->get_u(), frame_in->get_v(),

        // Dimensions in
        0, 0, frame_in->get_w(), frame_in->get_h(),
        // Dimensions out
        0, 0, frame_out->get_w(), frame_out->get_h(),

        // Color models
        frame_in->get_color_model(), frame_out->get_color_model(),

        // Background color
        0,

        // Rowspans (of luma for YUV)
        frame_in->get_w(), frame_out->get_w()

    );

    return 0;
}
コード例 #3
0
ファイル: yuv2.c プロジェクト: knutj/cinelerra
static int decode(quicktime_t *file, unsigned char **row_pointers, int track)
{
	int bytes;
	quicktime_video_map_t *vtrack = &(file->vtracks[track]);
	quicktime_yuv2_codec_t *codec = ((quicktime_codec_t*)vtrack->codec)->priv;
	int width = quicktime_video_width(file, track);
	int height = quicktime_video_height(file, track);
	int result = 0;

	initialize(vtrack, codec, width, height);
	quicktime_set_video_position(file, vtrack->current_position, track);
	bytes = quicktime_frame_size(file, vtrack->current_position, track);

        result = !quicktime_read_data(file, (char*)codec->work_buffer, bytes);
	if(codec->is_2vuy)
		convert_decode_2vuy(codec);
	else
		convert_decode_yuv2(codec);

	cmodel_transfer(row_pointers, codec->rows,
		row_pointers[0], row_pointers[1], row_pointers[2],
		0, 0, 0,
		file->in_x, file->in_y, file->in_w, file->in_h,
		0, 0, file->out_w, file->out_h,
		BC_YUV422, file->color_model,
		0, codec->coded_w, file->out_w);

	return result;
}
コード例 #4
0
ファイル: v308.c プロジェクト: beequ7et/cinelerra-cv
static int encode(quicktime_t *file, unsigned char **row_pointers, int track)
{
	int64_t offset = quicktime_position(file);
	quicktime_video_map_t *vtrack = &(file->vtracks[track]);
	quicktime_v308_codec_t *codec = ((quicktime_codec_t*)vtrack->codec)->priv;
	quicktime_trak_t *trak = vtrack->track;
	int width = vtrack->track->tkhd.track_width;
	int height = vtrack->track->tkhd.track_height;
	int bytes = width * height * 3;
	int result = 0;
	unsigned char **output_rows;
	int i;
	quicktime_atom_t chunk_atom;
	if(!codec->work_buffer)
		codec->work_buffer = malloc(vtrack->track->tkhd.track_width * 
			vtrack->track->tkhd.track_height *
			3);


	output_rows = malloc(sizeof(unsigned char*) * height);
	for(i = 0; i < height; i++)
		output_rows[i] = codec->work_buffer + i * width * 3;

	cmodel_transfer(output_rows, 
		row_pointers,
		0,
		0,
		0,
		row_pointers[0],
		row_pointers[1],
		row_pointers[2],
		0, 
		0, 
		width, 
		height,
		0, 
		0, 
		width, 
		height,
		file->color_model,
		BC_VYU888, 
		0,
		width,
		width);

	quicktime_write_chunk_header(file, trak, &chunk_atom);
	result = !quicktime_write_data(file, codec->work_buffer, bytes);
	quicktime_write_chunk_footer(file, 
		trak,
		vtrack->current_chunk,
		&chunk_atom, 
		1);

	vtrack->current_chunk++;
	
	free(output_rows);
	return result;
}
コード例 #5
0
ファイル: v308.c プロジェクト: beequ7et/cinelerra-cv
static int decode(quicktime_t *file, unsigned char **row_pointers, int track)
{
	int i;
	int64_t bytes;
	int result = 0;
	quicktime_video_map_t *vtrack = &(file->vtracks[track]);
	quicktime_v308_codec_t *codec = ((quicktime_codec_t*)vtrack->codec)->priv;
	int width = vtrack->track->tkhd.track_width;
	int height = vtrack->track->tkhd.track_height;
	unsigned char **input_rows;
	if(!codec->work_buffer)
		codec->work_buffer = malloc(vtrack->track->tkhd.track_width * 
			vtrack->track->tkhd.track_height *
			3);



	quicktime_set_video_position(file, vtrack->current_position, track);
	bytes = quicktime_frame_size(file, vtrack->current_position, track);
	result = !quicktime_read_data(file, codec->work_buffer, bytes);



	input_rows = malloc(sizeof(unsigned char*) * height);
	for(i = 0; i < height; i++)
		input_rows[i] = codec->work_buffer + i * width * 3;

	cmodel_transfer(row_pointers, 
		input_rows,
		row_pointers[0],
		row_pointers[1],
		row_pointers[2],
		0,
		0,
		0,
		file->in_x, 
		file->in_y, 
		file->in_w, 
		file->in_h,
		0, 
		0, 
		file->out_w, 
		file->out_h,
		BC_VYU888, 
		file->color_model,
		0,
		width,
		file->out_w);

	free(input_rows);

	return result;
}
コード例 #6
0
int FFMPEG::convert_cmodel(AVPicture *picture_in, PixelFormat pix_fmt_in,
                           int width_in, int height_in, VFrame *frame_out) {

    // set up a temporary picture_out from frame_out
    AVPicture picture_out;
    init_picture_from_frame(&picture_out, frame_out);
    int cmodel_out = frame_out->get_color_model();
    PixelFormat pix_fmt_out = color_model_to_pix_fmt(cmodel_out);

#ifdef HAVE_SWSCALER
    // We need a context for swscale
    struct SwsContext *convert_ctx;
#endif
    int result;
#ifndef HAVE_SWSCALER
    // do conversion within libavcodec if possible
    if (pix_fmt_out != PIX_FMT_NB) {
        result = img_convert(&picture_out,
                             pix_fmt_out,
                             picture_in,
                             pix_fmt_in,
                             width_in,
                             height_in);
        if (result) {
            printf("FFMPEG::convert_cmodel img_convert() failed\n");
        }
        return result;
    }
#else
    convert_ctx = sws_getContext(width_in, height_in,pix_fmt_in,
                                 frame_out->get_w(),frame_out->get_h(),pix_fmt_out,
                                 SWS_BICUBIC, NULL, NULL, NULL);

    if(convert_ctx == NULL) {
        printf("FFMPEG::convert_cmodel : swscale context initialization failed\n");
        return 1;
    }

    result = sws_scale(convert_ctx,
                       picture_in->data, picture_in->linesize,
                       width_in, height_in,
                       picture_out.data, picture_out.linesize);


    sws_freeContext(convert_ctx);

    if(result) {
        printf("FFMPEG::convert_cmodel sws_scale() failed\n");
    }
#endif

    // make an intermediate temp frame only if necessary
    int cmodel_in = pix_fmt_to_color_model(pix_fmt_in);
    if (cmodel_in == BC_TRANSPARENCY) {
        if (pix_fmt_in == PIX_FMT_RGB32) {
            // avoid infinite recursion if things are broken
            printf("FFMPEG::convert_cmodel pix_fmt_in broken!\n");
            return 1;
        }

        // NOTE: choose RGBA8888 as a hopefully non-lossy colormodel
        VFrame *temp_frame = new VFrame(0, width_in, height_in,
                                        BC_RGBA8888);
        if (convert_cmodel(picture_in, pix_fmt_in,
                           width_in, height_in, temp_frame)) {
            delete temp_frame;
            return 1;  // recursed call will print error message
        }

        int result = convert_cmodel(temp_frame, frame_out);
        delete temp_frame;
        return result;
    }


    // NOTE: no scaling possible in img_convert() so none possible here
    if (frame_out->get_w() != width_in ||
            frame_out->get_h() != height_in) {
        printf("scaling from %dx%d to %dx%d not allowed\n",
               width_in, height_in,
               frame_out->get_w(), frame_out->get_h());
        return 1;
    }


    // if we reach here we know that cmodel_transfer() will work
    uint8_t *yuv_in[3] = {0,0,0};
    uint8_t *row_pointers_in[height_in];
    if (cmodel_is_planar(cmodel_in)) {
        yuv_in[0] = picture_in->data[0];
        yuv_in[1] = picture_in->data[1];
        yuv_in[2] = picture_in->data[2];
    }
    else {
        // set row pointers for picture_in
        uint8_t *data = picture_in->data[0];
        int bytes_per_line =
            cmodel_calculate_pixelsize(cmodel_in) * height_in;
        for (int i = 0; i < height_in; i++) {
            row_pointers_in[i] = data + i * bytes_per_line;
        }
    }

    cmodel_transfer
    (// Packed data out
        frame_out->get_rows(),
        // Packed data in
        row_pointers_in,

        // Planar data out
        frame_out->get_y(), frame_out->get_u(), frame_out->get_v(),
        // Planar data in
        yuv_in[0], yuv_in[1], yuv_in[2],

        // Dimensions in
        0, 0, width_in, height_in,  // NOTE: dimensions are same
        // Dimensions out
        0, 0, width_in, height_in,

        // Color model in, color model out
        cmodel_in, cmodel_out,

        // Background color
        0,

        // Rowspans in, out (of luma for YUV)
        width_in, width_in

    );

    return 0;
}
コード例 #7
0
int FileFFMPEG::read_frame(VFrame *frame)
{
	int error = 0;
	const int debug = 0;

	ffmpeg_lock->lock("FileFFMPEG::read_frame");
	
	
	FileFFMPEGStream *stream = video_streams.get(0);
	if(debug) printf("FileFFMPEG::read_frame %d stream=%p stream->ffmpeg_file_contex=%p\n", 
		__LINE__, 
		stream,
		stream->ffmpeg_file_context);
	AVStream *ffmpeg_stream = ((AVFormatContext*)stream->ffmpeg_file_context)->streams[stream->index];
	AVCodecContext *decoder_context = ffmpeg_stream->codec;

	if(debug) printf("FileFFMPEG::read_frame %d\n", __LINE__);

// if(file->current_frame == 100)
// {
// printf("FileFFMPEG::read_frame %d fake crash\n", __LINE__);
// 	exit(1);
// }


//dump_context(stream->codec);
	if(stream->first_frame)
	{
		stream->first_frame = 0;
		int got_it = 0;

		while(!got_it && !error)
		{
			AVPacket packet;
			if(debug) printf("FileFFMPEG::read_frame %d\n", __LINE__);
			error = av_read_frame((AVFormatContext*)stream->ffmpeg_file_context, 
				&packet);
			if(debug) printf("FileFFMPEG::read_frame %d\n", __LINE__);

			if(!error && packet.size > 0)
			{
				if(packet.stream_index == stream->index)
				{

					if(!ffmpeg_frame)
						ffmpeg_frame = avcodec_alloc_frame();
					int got_picture = 0;

					if(debug) printf("FileFFMPEG::read_frame %d\n", __LINE__);

                	avcodec_get_frame_defaults((AVFrame*)ffmpeg_frame);
					if(debug) printf("FileFFMPEG::read_frame %d decoder_context=%p ffmpeg_frame=%p\n", 
						__LINE__,
						decoder_context,
						ffmpeg_frame);

		        	int result = avcodec_decode_video(
						decoder_context,
                    	(AVFrame*)ffmpeg_frame, 
						&got_picture,
                    	packet.data, 
						packet.size);
					if(debug) printf("FileFFMPEG::read_frame %d\n", __LINE__);
					if(((AVFrame*)ffmpeg_frame)->data[0] && got_picture) got_it = 1;
					if(debug) printf("FileFFMPEG::read_frame %d\n", __LINE__);
				}
			}

			av_free_packet(&packet);
		}

		error = 0;
	}
	if(debug) printf("FileFFMPEG::read_frame %d\n", __LINE__);

#define SEEK_THRESHOLD 16

// printf("FileFFMPEG::read_frame %d current_frame=%lld file->current_frame=%lld\n", 
// __LINE__, 
// current_frame,
// file->current_frame);
	if(stream->current_frame != file->current_frame &&
		(file->current_frame < stream->current_frame ||
		file->current_frame > stream->current_frame + SEEK_THRESHOLD))
	{
		if(debug) printf("FileFFMPEG::read_frame %d stream->current_frame=%lld file->current_frame=%lld\n", 
		__LINE__, 
		(long long)stream->current_frame, 
		(long long)file->current_frame);

		int64_t timestamp = (int64_t)((double)file->current_frame * 
			ffmpeg_stream->time_base.den /
			ffmpeg_stream->time_base.num /
			asset->frame_rate);
// Want to seek to the nearest keyframe and read up to the current frame
// but ffmpeg doesn't support that kind of precision.
// Also, basing all the seeking on the same stream seems to be required for synchronization.
		av_seek_frame((AVFormatContext*)stream->ffmpeg_file_context, 
			/* stream->index */ 0, 
			timestamp, 
			AVSEEK_FLAG_ANY);
		stream->current_frame = file->current_frame - 1;
	}
	if(debug) printf("FileFFMPEG::read_frame %d\n", __LINE__);

	int got_it = 0;
// Read frames until we catch up to the current position.
// 	if(current_frame >= file->current_frame - SEEK_THRESHOLD &&
// 		current_frame < file->current_frame - 1)
// 	{
// 		printf("FileFFMPEG::read_frame %d current_frame=%lld file->current_frame=%lld\n", 
// 			__LINE__,
// 			current_frame,
// 			file->current_frame);
// 	}



	while(stream->current_frame < file->current_frame && !error)
	{
		got_it = 0;
		if(debug) printf("FileFFMPEG::read_frame %d stream->current_frame=%lld file->current_frame=%lld\n", 
			__LINE__,
			(long long)stream->current_frame,
			(long long)file->current_frame);

		while(!got_it && !error)
		{
			AVPacket packet;

			error = av_read_frame((AVFormatContext*)stream->ffmpeg_file_context, 
				&packet);

			if(!error && packet.size > 0)
			{
				if(packet.stream_index == stream->index)
				{

					if(!ffmpeg_frame)
						ffmpeg_frame = avcodec_alloc_frame();
					int got_picture = 0;
                	avcodec_get_frame_defaults((AVFrame*)ffmpeg_frame);


// printf("FileFFMPEG::read_frame %d current_frame=%lld ffmpeg_frame=%p packet.data=%p packet.size=%d\n",
// __LINE__,
// file->current_frame, 
// ffmpeg_frame, 
// packet.data, 
// packet.size);
// for(int i = 0; i < decoder_context->extradata_size; i++)
// printf("0x%02x, ", decoder_context->extradata[i]);
// printf("\n");
// 
// if(file->current_frame >= 200 && file->current_frame < 280)
// {
// char string[1024];
// sprintf(string, "/tmp/debug%03lld", file->current_frame);
// FILE *out = fopen(string, "w");
// fwrite(packet.data, packet.size, 1, out);
// fclose(out);
// }


		        	int result = avcodec_decode_video(
						decoder_context,
                    	(AVFrame*)ffmpeg_frame, 
						&got_picture,
                    	packet.data, 
						packet.size);


//printf("FileFFMPEG::read_frame %d result=%d\n", __LINE__, result);
					if(((AVFrame*)ffmpeg_frame)->data[0] && got_picture) got_it = 1;
//printf("FileFFMPEG::read_frame %d result=%d got_it=%d\n", __LINE__, result, got_it);
				}
			}
			
			
			av_free_packet(&packet);
		}

		if(got_it) stream->current_frame++;
	}

//PRINT_TRACE
// printf("FileFFMPEG::read_frame %d current_frame=%lld file->current_frame=%lld got_it=%d\n", 
// __LINE__, 
// current_frame,
// file->current_frame,
// got_it);
	if(debug) printf("FileFFMPEG::read_frame %d\n", __LINE__);

// Convert colormodel
	if(got_it)
	{
		int input_cmodel;
		AVFrame *input_frame = (AVFrame*)ffmpeg_frame;

		switch(decoder_context->pix_fmt)
		{
			case PIX_FMT_YUV420P:
				input_cmodel = BC_YUV420P;
				break;
#ifndef FFMPEG_2010
			case PIX_FMT_YUV422:
				input_cmodel = BC_YUV422;
				break;
#endif

			case PIX_FMT_YUV422P:
				input_cmodel = BC_YUV422P;
				break;
			case PIX_FMT_YUV410P:
				input_cmodel = BC_YUV9P;
				break;
			default:
				fprintf(stderr, 
					"quicktime_ffmpeg_decode: unrecognized color model %d\n", 
					decoder_context->pix_fmt);
				input_cmodel = BC_YUV420P;
				break;
		}



		unsigned char **input_rows = 
			(unsigned char**)malloc(sizeof(unsigned char*) * 
			decoder_context->height);


		for(int i = 0; i < decoder_context->height; i++)
			input_rows[i] = input_frame->data[0] + 
				i * 
				decoder_context->width * 
				cmodel_calculate_pixelsize(input_cmodel);


		cmodel_transfer(frame->get_rows(), /* Leave NULL if non existent */
			input_rows,
			frame->get_y(), /* Leave NULL if non existent */
			frame->get_u(),
			frame->get_v(),
			input_frame->data[0], /* Leave NULL if non existent */
			input_frame->data[1],
			input_frame->data[2],
			0,        /* Dimensions to capture from input frame */
			0, 
			decoder_context->width, 
			decoder_context->height,
			0,       /* Dimensions to project on output frame */
			0, 
			frame->get_w(), 
			frame->get_h(),
			input_cmodel, 
			frame->get_color_model(),
			0,         /* When transfering BC_RGBA8888 to non-alpha this is the background color in 0xRRGGBB hex */
			input_frame->linesize[0],       /* For planar use the luma rowspan */
			frame->get_w());
		free(input_rows);
	}
//PRINT_TRACE


	ffmpeg_lock->unlock();
	if(debug) printf("FileFFMPEG::read_frame %d\n", __LINE__);
	return error;
}
コード例 #8
0
ファイル: libdv.c プロジェクト: Cuchulain/cinelerra
int dv_read_video(dv_t *dv, 
		unsigned char **output_rows, 
		unsigned char *data, 
		long bytes,
		int color_model)
{
	int dif = 0;
	int lost_coeffs = 0;
	long offset = 0;
	int isPAL = 0;
	int is61834 = 0;
	int numDIFseq;
	int ds;
	int i, v, b, m;
	dv_block_t *bl;
	long mb_offset;
	dv_sample_t sampling;
	dv_macroblock_t *mb;
	int pixel_size;
	int pitches[3];
	int use_temp = color_model != BC_YUV422;
	unsigned char *pixels[3];

//printf("dv_read_video 1 %d\n", color_model);
	pthread_mutex_lock(&dv_lock);
	switch(bytes)
	{
		case DV_PAL_SIZE:
			break;
		case DV_NTSC_SIZE:
			break;
		default:
			return 1;
			break;
	}

	if(data[0] != 0x1f) return 1;

	pitches[0] = DV_WIDTH * 2;
	pitches[1] = 0;
	pitches[2] = 0;
	pixels[1] = 0;
	pixels[2] = 0;

	dv_parse_header(dv->decoder, data);

	if(!use_temp)
	{
//printf("dv_read_video 1\n");
		pixels[0] = output_rows[0];
		dv_decode_full_frame(dv->decoder, 
			data, 
			e_dv_color_yuv, 
			output_rows, 
			pitches);
//printf("dv_read_video 2\n");
	}
	else
	{
		unsigned char *temp_rows[DV_HEIGHT];
		if(!dv->temp_video)
			dv->temp_video = calloc(1, DV_WIDTH * DV_HEIGHT * 2);

		for(i = 0; i < DV_HEIGHT; i++)
		{
			temp_rows[i] = dv->temp_video + i * DV_WIDTH * 2;
		}

		pixels[0] = dv->temp_video;
//printf("dv_read_video 3 %p\n", data);
		dv_decode_full_frame(dv->decoder, 
			data, 
			e_dv_color_yuv, 
			pixels, 
			pitches);
//printf("dv_read_video 4\n");

		cmodel_transfer(output_rows, 
			temp_rows,
			output_rows[0],
			output_rows[1],
			output_rows[2],
			0,
			0,
			0,
			0, 
			0, 
			DV_WIDTH, 
			dv->decoder->height,
			0, 
			0, 
			DV_WIDTH, 
			dv->decoder->height,
			BC_YUV422, 
			color_model,
			0,
			DV_WIDTH,
			DV_WIDTH);
	}
	dv->decoder->prev_frame_decoded = 1;
	pthread_mutex_unlock(&dv_lock);
	return 0;
}
コード例 #9
0
ファイル: filethread.C プロジェクト: rasilon/cinelerra-cv
int FileThread::read_frame(VFrame *frame)
{
	FileThreadFrame *local_frame = 0;
	int got_it = 0;
	int number = 0;

//printf("FileThread::read_frame 1\n");

// Search thread for frame
	while(!got_it && !disable_read)
	{
		frame_lock->lock("FileThread::read_frame 1");
// printf("FileThread::read_frame: 1 read_position=%lld ", read_position);
// for(int i = 0; i < total_frames; i++)
// printf("%lld ", read_frames[i]->position);
// printf("\n");
		for(int i = 0; i < total_frames; i++)
		{
			local_frame = read_frames[i];
			if(local_frame->position == read_position &&
				local_frame->layer == layer &&
				local_frame->frame &&
				local_frame->frame->equal_stacks(frame))
			{
				got_it = 1;
				number = i;
//printf("FileThread::read_frame 1 %lld\n", local_frame->position);
				break;
			}
		}
		frame_lock->unlock();

// Not decoded yet but thread active
		if(!got_it && !disable_read)
		{
			user_wait_lock->lock("FileThread::read_frame");
		}
	}
//printf("FileThread::read_frame 2\n");

	if(got_it)
	{
// Copy image
		cmodel_transfer(frame->get_rows(), 
			local_frame->frame->get_rows(),
			frame->get_y(),
			frame->get_u(),
			frame->get_v(),
			local_frame->frame->get_y(),
			local_frame->frame->get_u(),
			local_frame->frame->get_v(),
			0, 
			0, 
			local_frame->frame->get_w(), 
			local_frame->frame->get_h(),
			0, 
			0, 
			frame->get_w(), 
			frame->get_h(),
			local_frame->frame->get_color_model(), 
			frame->get_color_model(),
			0,
			local_frame->frame->get_w(),
			frame->get_w());
// Can't copy stacks because the stack is needed by the plugin requestor.
		frame->copy_params(local_frame->frame);

// Recycle all frames before current one but not including current one.
// This handles redrawing of a single frame but because FileThread has no
// notion of a still frame, it has to call read_frame for those.
		frame_lock->lock("FileThread::read_frame 1");
		FileThreadFrame *new_table[MAX_READ_FRAMES];
		int k = 0;
		for(int j = number; j < total_frames; j++, k++)
		{
			new_table[k] = read_frames[j];
		}
		for(int j = 0; j < number; j++, k++)
		{
			new_table[k] = read_frames[j];
		}
		memcpy(read_frames, new_table, sizeof(FileThreadFrame*) * total_frames);
		total_frames -= number;

		start_position = read_position;
		read_position++;
		frame_lock->unlock();
		read_wait_lock->unlock();
		return 0;
	}
	else
	{
// printf("FileThread::read_frame 1 color_model=%d disable_read=%d\n", 
// frame->get_color_model(), 
// disable_read);
// Use traditional read function
		file->set_video_position(read_position, -1, 1);
		file->set_layer(layer, 1);
		read_position++;
		return file->read_frame(frame, 1);
	}
//printf("FileThread::read_frame 100\n");
}
コード例 #10
0
ファイル: filetiff.C プロジェクト: rasilon/cinelerra-cv
int FileTIFF::write_frame(VFrame *frame, VFrame *data, FrameWriterUnit *unit)
{
//printf("FileTIFF::write_frame 1\n");
	FileTIFFUnit *tiff_unit = (FileTIFFUnit*)unit;
	int result = 0;
	TIFF *stream;
	tiff_unit->offset = 0;
	tiff_unit->data = data;
	tiff_unit->data->set_compressed_size(0);

	stream = TIFFClientOpen("FileTIFF", 
		"w",
	    (void*)tiff_unit,
	    tiff_read, 
		tiff_write,
	    tiff_seek, 
		tiff_close,
	    tiff_size,
	    tiff_mmap, 
		tiff_unmap);

	int components, color_model, bits, type, compression;
	int sampleformat = SAMPLEFORMAT_UINT;
	int bytesperrow;
	switch(asset->tiff_cmodel)
	{
		case FileTIFF::RGB_888: 
			components = 3;
			color_model = BC_RGB888;
			bits = 8;
			type = TIFF_BYTE;
			bytesperrow = 3 * asset->width;
			break;
		case FileTIFF::RGB_161616: 
			components = 3;
			color_model = BC_RGB_FLOAT;
			bits = 16;
			type = TIFF_SHORT;
			bytesperrow = 6 * asset->width;
			break;
		case FileTIFF::RGBA_8888: 
			components = 4;
			color_model = BC_RGBA8888;
			bits = 8;
			type = TIFF_BYTE;
			bytesperrow = 4 * asset->width;
			break;
		case FileTIFF::RGBA_16161616: 
			components = 4;
			color_model = BC_RGBA_FLOAT;
			bits = 16;
			type = TIFF_SHORT;
			bytesperrow = 8 * asset->width;
			break;
		case FileTIFF::RGB_FLOAT: 
			components = 3;
			color_model = BC_RGB_FLOAT;
			bits = 32;
			type = TIFF_FLOAT;
			sampleformat = SAMPLEFORMAT_IEEEFP;
			bytesperrow = 12 * asset->width;
			break;
		case FileTIFF::RGBA_FLOAT: 
			components = 4;
			color_model = BC_RGBA_FLOAT;
			bits = 32;
			type = TIFF_FLOAT;
			sampleformat = SAMPLEFORMAT_IEEEFP;
			bytesperrow = 16 * asset->width;
			break;
		default: 
			components = 3;
			color_model = BC_RGB888;
			bits = 8;
			type = TIFF_BYTE;
			bytesperrow = 3 * asset->width;
			break;
	}


	switch(asset->tiff_compression)
	{
		case FileTIFF::LZW:
			compression = COMPRESSION_LZW;
			break;
		case FileTIFF::PACK_BITS:
			compression = COMPRESSION_PACKBITS;
			break;
		case FileTIFF::DEFLATE:
			compression = COMPRESSION_DEFLATE;
			break;
		case FileTIFF::JPEG:
			compression = COMPRESSION_JPEG;
			break;
		default:
			compression = COMPRESSION_NONE;
			break;
	}

	TIFFSetField(stream, TIFFTAG_IMAGEWIDTH, asset->width);
	TIFFSetField(stream, TIFFTAG_IMAGELENGTH, asset->height);
	TIFFSetField(stream, TIFFTAG_ORIENTATION, ORIENTATION_TOPLEFT);
	TIFFSetField(stream, TIFFTAG_SAMPLESPERPIXEL, components);
	TIFFSetField(stream, TIFFTAG_BITSPERSAMPLE, bits);
    TIFFSetField(stream, TIFFTAG_SAMPLEFORMAT, sampleformat);
	TIFFSetField(stream, TIFFTAG_COMPRESSION, compression);
	TIFFSetField(stream, TIFFTAG_PLANARCONFIG, PLANARCONFIG_CONTIG);
 	TIFFSetField(stream, TIFFTAG_ROWSPERSTRIP, 
 		TIFFDefaultStripSize(stream, (uint32_t)-1));
//  	TIFFSetField(stream, TIFFTAG_ROWSPERSTRIP, 
// 		(8 * 1024) / bytesperrow);
	TIFFSetField(stream, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_RGB);

	if(frame->get_color_model() == color_model)
	{
		for(int i = 0; i < asset->height; i++)
		{
			TIFFWriteScanline(stream, frame->get_rows()[i], i, 0);
		}
	}
	else
	{
		if(tiff_unit->temp &&
			tiff_unit->temp->get_color_model() != color_model)
		{
			delete tiff_unit->temp;
			tiff_unit->temp = 0;
		}
		if(!tiff_unit->temp)
		{
			tiff_unit->temp = new VFrame(0,
				asset->width,
				asset->height,
				color_model);
		}

		cmodel_transfer(tiff_unit->temp->get_rows(), 
			frame->get_rows(),
			tiff_unit->temp->get_y(),
			tiff_unit->temp->get_u(),
			tiff_unit->temp->get_v(),
			frame->get_y(),
			frame->get_u(),
			frame->get_v(),
			0, 
			0, 
			frame->get_w(), 
			frame->get_h(),
			0, 
			0, 
			frame->get_w(), 
			frame->get_h(),
			frame->get_color_model(), 
			color_model,
			0,
			frame->get_w(),
			frame->get_w());
		for(int i = 0; i < asset->height; i++)
		{
			TIFFWriteScanline(stream, tiff_unit->temp->get_rows()[i], i, 0);
		}
	}

	TIFFClose(stream);

//printf("FileTIFF::write_frame 10\n");
	return result;
}
コード例 #11
0
ファイル: filepng.C プロジェクト: rasilon/cinelerra-cv
int FilePNG::write_frame(VFrame *frame, VFrame *data, FrameWriterUnit *unit)
{
	PNGUnit *png_unit = (PNGUnit*)unit;
	int result = 0;
	png_structp png_ptr;
	png_infop info_ptr;
	png_infop end_info = 0;	
	VFrame *output_frame;
	data->set_compressed_size(0);

//printf("FilePNG::write_frame 1\n");
	png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, 0, 0, 0);
	info_ptr = png_create_info_struct(png_ptr);
	png_set_write_fn(png_ptr,
               data, 
			   (png_rw_ptr)write_function,
               (png_flush_ptr)flush_function);
	png_set_compression_level(png_ptr, 9);

	png_set_IHDR(png_ptr, 
		info_ptr, 
		asset->width, 
		asset->height,
    	8, 
		asset->png_use_alpha ? 
		  PNG_COLOR_TYPE_RGB_ALPHA : 
		  PNG_COLOR_TYPE_RGB, 
		PNG_INTERLACE_NONE, 
		PNG_COMPRESSION_TYPE_DEFAULT, 
		PNG_FILTER_TYPE_DEFAULT);
	png_write_info(png_ptr, info_ptr);

//printf("FilePNG::write_frame 1\n");
	native_cmodel = asset->png_use_alpha ? BC_RGBA8888 : BC_RGB888;
	if(frame->get_color_model() != native_cmodel)
	{
		if(!png_unit->temp_frame) png_unit->temp_frame = new VFrame(0, 
			asset->width, 
			asset->height, 
			native_cmodel);

		cmodel_transfer(png_unit->temp_frame->get_rows(), /* Leave NULL if non existent */
			frame->get_rows(),
			png_unit->temp_frame->get_y(), /* Leave NULL if non existent */
			png_unit->temp_frame->get_u(),
			png_unit->temp_frame->get_v(),
			frame->get_y(), /* Leave NULL if non existent */
			frame->get_u(),
			frame->get_v(),
			0,        /* Dimensions to capture from input frame */
			0, 
			asset->width, 
			asset->height,
			0,       /* Dimensions to project on output frame */
			0, 
			asset->width, 
			asset->height,
			frame->get_color_model(), 
			png_unit->temp_frame->get_color_model(),
			0,         /* When transfering BC_RGBA8888 to non-alpha this is the background color in 0xRRGGBB hex */
			asset->width,       /* For planar use the luma rowspan */
			asset->height);
		
		output_frame = png_unit->temp_frame;
	}
	else
		output_frame = frame;


//printf("FilePNG::write_frame 2\n");
	png_write_image(png_ptr, output_frame->get_rows());
	png_write_end(png_ptr, info_ptr);
	png_destroy_write_struct(&png_ptr, &info_ptr);
//printf("FilePNG::write_frame 3 %d\n", data->get_compressed_size());

	return result;
}
コード例 #12
0
ファイル: qtdv.c プロジェクト: knutj/cinelerra
static int decode(quicktime_t *file, unsigned char **row_pointers, int track)
{
    long bytes;
    quicktime_video_map_t *vtrack = &(file->vtracks[track]);
    quicktime_dv_codec_t *codec = ((quicktime_codec_t*)vtrack->codec)->priv;
    int width = vtrack->track->tkhd.track_width;
    int height = vtrack->track->tkhd.track_height;
    int result = 0;
    int i;
    int decode_colormodel = 0;
    int pitches[3] = { 720 * 2, 0, 0 };


    quicktime_set_video_position(file, vtrack->current_position, track);
    bytes = quicktime_frame_size(file, vtrack->current_position, track);
    result = !quicktime_read_data(file, (char*)codec->data, bytes);

    if( codec->dv_decoder && codec->parameters_changed )
    {
        dv_decoder_free( codec->dv_decoder );
        codec->dv_decoder = NULL;
        codec->parameters_changed = 0;
    }

    if( ! codec->dv_decoder )
    {
        pthread_mutex_lock( &libdv_init_mutex );


        codec->dv_decoder = dv_decoder_new( codec->add_ntsc_setup,
                                            codec->clamp_luma,
                                            codec->clamp_chroma );
        codec->dv_decoder->prev_frame_decoded = 0;

        codec->parameters_changed = 0;
        pthread_mutex_unlock( &libdv_init_mutex );
    }

    if(codec->dv_decoder)
    {
        int is_sequential =
            check_sequentiality( row_pointers,
                                 720 * cmodel_calculate_pixelsize(file->color_model),
                                 file->out_h );

        codec->dv_decoder->quality = codec->decode_quality;

        dv_parse_header( codec->dv_decoder, codec->data );

// Libdv improperly decodes RGB colormodels.
        if((file->color_model == BC_YUV422 ||
                file->color_model == BC_RGB888) &&
                file->in_x == 0 &&
                file->in_y == 0 &&
                file->in_w == width &&
                file->in_h == height &&
                file->out_w == width &&
                file->out_h == height &&
                is_sequential)
        {
            if( file->color_model == BC_YUV422 )
            {
                pitches[0] = 720 * 2;
                dv_decode_full_frame( codec->dv_decoder, codec->data,
                                      e_dv_color_yuv, row_pointers,
                                      pitches );
            }
            else if( file->color_model == BC_RGB888)
            {
                pitches[0] = 720 * 3;
                dv_decode_full_frame( codec->dv_decoder, codec->data,
                                      e_dv_color_rgb, row_pointers,
                                      pitches );
            }
        }
        else
        {
            if(!codec->temp_frame)
            {
                codec->temp_frame = malloc(720 * 576 * 2);
                codec->temp_rows = malloc(sizeof(unsigned char*) * 576);
                for(i = 0; i < 576; i++)
                    codec->temp_rows[i] = codec->temp_frame + 720 * 2 * i;
            }

            decode_colormodel = BC_YUV422;
            pitches[0] = 720 * 2;
            dv_decode_full_frame( codec->dv_decoder, codec->data,
                                  e_dv_color_yuv, codec->temp_rows,
                                  pitches );




            cmodel_transfer(row_pointers,
                            codec->temp_rows,
                            row_pointers[0],
                            row_pointers[1],
                            row_pointers[2],
                            codec->temp_rows[0],
                            codec->temp_rows[1],
                            codec->temp_rows[2],
                            file->in_x,
                            file->in_y,
                            file->in_w,
                            file->in_h,
                            0,
                            0,
                            file->out_w,
                            file->out_h,
                            decode_colormodel,
                            file->color_model,
                            0,
                            width,
                            file->out_w);
        }
    }

//printf(__FUNCTION__ " 2\n");
    return result;
}
コード例 #13
0
ファイル: qtdv.c プロジェクト: knutj/cinelerra
static int encode(quicktime_t *file, unsigned char **row_pointers, int track)
{
    //int64_t offset = quicktime_position(file);
    quicktime_video_map_t *vtrack = &(file->vtracks[track]);
    quicktime_dv_codec_t *codec = ((quicktime_codec_t*)vtrack->codec)->priv;
    quicktime_trak_t *trak = vtrack->track;
    int width = trak->tkhd.track_width;
    int height = trak->tkhd.track_height;
    int width_i = 720;
    int height_i = (height <= 480) ? 480 : 576;
    int i;
    unsigned char **input_rows;
    int is_pal = (height_i == 480) ? 0 : 1;
    int data_length = is_pal ? DV_PAL_SIZE : DV_NTSC_SIZE;
    int result = 0;
    dv_color_space_t encode_dv_colormodel = 0;
    quicktime_atom_t chunk_atom;

    if( codec->dv_encoder != NULL && codec->parameters_changed )
    {
        dv_encoder_free( codec->dv_encoder );
        codec->dv_encoder = NULL;
        codec->parameters_changed = 0;
    }

    if( ! codec->dv_encoder )
    {
        pthread_mutex_lock( &libdv_init_mutex );

        //printf( "dv.c encode: Alloc'ing encoder\n" );

        codec->dv_encoder = dv_encoder_new( codec->rem_ntsc_setup,
                                            codec->clamp_luma,
                                            codec->clamp_chroma );

        codec->parameters_changed = 0;
        pthread_mutex_unlock( &libdv_init_mutex );
    }

    if(codec->dv_encoder)
    {
        int is_sequential =
            check_sequentiality( row_pointers,
                                 width_i * cmodel_calculate_pixelsize(file->color_model),
                                 height );

        if( ( file->color_model == BC_YUV422
                || file->color_model == BC_RGB888 ) &&
                width == width_i &&
                height == height_i &&
                is_sequential )
        {
            input_rows = row_pointers;
            switch( file->color_model )
            {
            case BC_YUV422:
                encode_dv_colormodel = e_dv_color_yuv;
//printf( "dv.c encode: e_dv_color_yuv\n" );
                break;
            case BC_RGB888:
                encode_dv_colormodel = e_dv_color_rgb;
//printf( "dv.c encode: e_dv_color_rgb\n" );
                break;
            default:
                return 0;
                break;
            }
        }
        else
        {
// The best colormodel for encoding is YUV 422

            if(!codec->temp_frame)
            {
                codec->temp_frame = malloc(720 * 576 * 2);
                codec->temp_rows = malloc(sizeof(unsigned char*) * 576);
                for(i = 0; i < 576; i++)
                    codec->temp_rows[i] = codec->temp_frame + 720 * 2 * i;
            }

            cmodel_transfer(codec->temp_rows, /* Leave NULL if non existent */
                            row_pointers,
                            codec->temp_rows[0], /* Leave NULL if non existent */
                            codec->temp_rows[1],
                            codec->temp_rows[2],
                            row_pointers[0], /* Leave NULL if non existent */
                            row_pointers[1],
                            row_pointers[2],
                            0,   /* Dimensions to capture from input frame */
                            0,
                            MIN(width, width_i),
                            MIN(height, height_i),
                            0,   /* Dimensions to project on output frame */
                            0,
                            MIN(width, width_i),
                            MIN(height, height_i),
                            file->color_model,
                            BC_YUV422,
                            0,    /* When transfering BC_RGBA8888 to non-alpha this is the background color in 0xRRGGBB hex */
                            width,  /* For planar use the luma rowspan */
                            width_i);


            input_rows = codec->temp_rows;
            encode_dv_colormodel = e_dv_color_yuv;
        }

// Setup the encoder
        codec->dv_encoder->is16x9 = codec->anamorphic16x9;
        codec->dv_encoder->vlc_encode_passes = codec->vlc_encode_passes;
        codec->dv_encoder->static_qno = 0;
        codec->dv_encoder->force_dct = DV_DCT_AUTO;
        codec->dv_encoder->isPAL = is_pal;


//printf("dv.c encode: 1 %d %d %d\n", width_i, height_i, encode_dv_colormodel);
        dv_encode_full_frame( codec->dv_encoder,
                              input_rows, encode_dv_colormodel, codec->data );
//printf("dv.c encode: 2 %d %d\n", width_i, height_i);

        quicktime_write_chunk_header(file, trak, &chunk_atom);
        result = !quicktime_write_data(file, (char*)codec->data, data_length);
        quicktime_write_chunk_footer(file,
                                     trak,
                                     vtrack->current_chunk,
                                     &chunk_atom,
                                     1);
        vtrack->current_chunk++;
//printf("encode 3\n");
    }

    return result;
}
コード例 #14
0
ファイル: filelist.C プロジェクト: beequ7et/cinelerra-cv
int FileList::read_frame(VFrame *frame)
{
	int result = 0;
	if(file->current_frame < 0 || 
		(asset->use_header && file->current_frame >= path_list.total &&
			asset->format == list_type))
		return 1;

	if(asset->format == list_type)
	{
		char string[BCTEXTLEN];
		char *path;
		if(asset->use_header)
		{
			path = path_list.values[file->current_frame];
		}
		else
		{
			path = calculate_path(file->current_frame, string);
		}
		FILE *in;


// Fix path for VFS
		if(!strncmp(asset->path, RENDERFARM_FS_PREFIX, strlen(RENDERFARM_FS_PREFIX)))
			sprintf(string, "%s%s", RENDERFARM_FS_PREFIX, path);
		else
			strcpy(string, path);

		if(!(in = fopen(string, "rb")))
		{
			eprintf("Error while opening \"%s\" for reading. \n%m\n", string);
		}
		else
		{
			struct stat ostat;
			stat(string, &ostat);

			switch(frame->get_color_model())
			{
				case BC_COMPRESSED:
					frame->allocate_compressed_data(ostat.st_size);
					frame->set_compressed_size(ostat.st_size);
					fread(frame->get_data(), ostat.st_size, 1, in);
					break;
				default:
					data->allocate_compressed_data(ostat.st_size);
					data->set_compressed_size(ostat.st_size);
					fread(data->get_data(), ostat.st_size, 1, in);
					result = read_frame(frame, data);
					break;
			}


			fclose(in);
		}
	}
	else
	{
// Allocate and decompress once into temporary
//printf("FileList::read_frame %d\n", frame->get_color_model());
		if(!temp || temp->get_color_model() != frame->get_color_model())
		{
			if(temp) delete temp;
			temp = 0;
		
			FILE *fd = fopen(asset->path, "rb");
			if(fd)
			{
				struct stat ostat;
				stat(asset->path, &ostat);

				switch(frame->get_color_model())
				{
					case BC_COMPRESSED:
						frame->allocate_compressed_data(ostat.st_size);
						frame->set_compressed_size(ostat.st_size);
						fread(frame->get_data(), ostat.st_size, 1, fd);
						break;
					default:
						data->allocate_compressed_data(ostat.st_size);
						data->set_compressed_size(ostat.st_size);
						fread(data->get_data(), ostat.st_size, 1, fd);
						temp = new VFrame(0, 
							asset->width, 
							asset->height, 
							frame->get_color_model());
						read_frame(temp, data);
						break;
				}

				fclose(fd);
			}
			else
			{
				eprintf("Error while opening \"%s\" for reading. \n%m\n", asset->path);
				result = 1;
			}
		}

		if(!temp) return result;

// printf("FileList::read_frame frame=%d temp=%d\n", 
// frame->get_color_model(),
// temp->get_color_model());
		if(frame->get_color_model() == temp->get_color_model())
		{
			frame->copy_from(temp);
		}
		else
		{
// Never happens
			cmodel_transfer(frame->get_rows(), /* Leave NULL if non existent */
				temp->get_rows(),
				frame->get_y(), /* Leave NULL if non existent */
				frame->get_u(),
				frame->get_v(),
				temp->get_y(), /* Leave NULL if non existent */
				temp->get_u(),
				temp->get_v(),
				0,        /* Dimensions to capture from input frame */
				0, 
				asset->width, 
				asset->height,
				0,       /* Dimensions to project on output frame */
				0, 
				asset->width, 
				asset->height,
				temp->get_color_model(), 
				frame->get_color_model(),
				0,         /* When transfering BC_RGBA8888 to non-alpha this is the background color in 0xRRGGBB hex */
				temp->get_w(),       /* For planar use the luma rowspan */
				frame->get_w());
		}
	}


//printf("FileList::read_frame 5 %d\n", result);


	return result;
}
コード例 #15
0
ファイル: jpeg.c プロジェクト: knutj/cinelerra
static int decode(quicktime_t *file,
                  unsigned char **row_pointers,
                  int track)
{
    quicktime_video_map_t *vtrack = &(file->vtracks[track]);
    initialize(vtrack);
    quicktime_jpeg_codec_t *codec = ((quicktime_codec_t*)vtrack->codec)->priv;
    quicktime_trak_t *trak = vtrack->track;
    mjpeg_t *mjpeg = codec->mjpeg;
    long size, field2_offset = 0;
    int track_height = trak->tkhd.track_height;
    int track_width = trak->tkhd.track_width;
    int result = 0;
    int field_dominance = trak->mdia.minf.stbl.stsd.table[0].field_dominance;

    mjpeg_set_cpus(codec->mjpeg, file->cpus);
    if(file->row_span)
        mjpeg_set_rowspan(codec->mjpeg, file->row_span);
    else
        mjpeg_set_rowspan(codec->mjpeg, 0);

    quicktime_set_video_position(file, vtrack->current_position, track);
    size = quicktime_frame_size(file, vtrack->current_position, track);
    codec->buffer_size = size;

    if(size > codec->buffer_allocated)
    {
        codec->buffer_allocated = size;
        codec->buffer = realloc(codec->buffer, codec->buffer_allocated);
    }

    result = !quicktime_read_data(file, (char*)codec->buffer, size);
    /*
     * printf("decode 1 %02x %02x %02x %02x %02x %02x %02x %02x\n",
     * codec->buffer[0],
     * codec->buffer[1],
     * codec->buffer[2],
     * codec->buffer[3],
     * codec->buffer[4],
     * codec->buffer[5],
     * codec->buffer[6],
     * codec->buffer[7]
     * );
     */

    if(!result)
    {
        if(mjpeg_get_fields(mjpeg) == 2)
        {
            if(file->use_avi)
            {
                field2_offset = mjpeg_get_avi_field2(codec->buffer,
                                                     size,
                                                     &field_dominance);
            }
            else
            {
                field2_offset = mjpeg_get_quicktime_field2(codec->buffer,
                                size);
// Sanity check
                if(!field2_offset)
                {
                    printf("decode: FYI field2_offset=0\n");
                    field2_offset = mjpeg_get_field2(codec->buffer, size);
                }
            }
        }
        else
            field2_offset = 0;


//printf("decode 2 %d\n", field2_offset);
        /*
         * printf("decode result=%d field1=%llx field2=%llx size=%d %02x %02x %02x %02x\n",
         * result,
         * quicktime_position(file) - size,
         * quicktime_position(file) - size + field2_offset,
         * size,
         * codec->buffer[0],
         * codec->buffer[1],
         * codec->buffer[field2_offset + 0],
         * codec->buffer[field2_offset + 1]);
         */

        if(file->in_x == 0 &&
                file->in_y == 0 &&
                file->in_w == track_width &&
                file->in_h == track_height &&
                file->out_w == track_width &&
                file->out_h == track_height)
        {
            mjpeg_decompress(codec->mjpeg,
                             codec->buffer,
                             size,
                             field2_offset,
                             row_pointers,
                             row_pointers[0],
                             row_pointers[1],
                             row_pointers[2],
                             file->color_model,
                             file->cpus);
        }
        else
        {
            int i;
            unsigned char **temp_rows;
            int temp_cmodel = BC_YUV888;
            int temp_rowsize = cmodel_calculate_pixelsize(temp_cmodel) * track_width;

            if(!codec->temp_video)
                codec->temp_video = malloc(temp_rowsize * track_height);
            temp_rows = malloc(sizeof(unsigned char*) * track_height);
            for(i = 0; i < track_height; i++)
                temp_rows[i] = codec->temp_video + i * temp_rowsize;

//printf("decode 10\n");
            mjpeg_decompress(codec->mjpeg,
                             codec->buffer,
                             size,
                             field2_offset,
                             temp_rows,
                             temp_rows[0],
                             temp_rows[1],
                             temp_rows[2],
                             temp_cmodel,
                             file->cpus);

            cmodel_transfer(row_pointers,
                            temp_rows,
                            row_pointers[0],
                            row_pointers[1],
                            row_pointers[2],
                            temp_rows[0],
                            temp_rows[1],
                            temp_rows[2],
                            file->in_x,
                            file->in_y,
                            file->in_w,
                            file->in_h,
                            0,
                            0,
                            file->out_w,
                            file->out_h,
                            temp_cmodel,
                            file->color_model,
                            0,
                            track_width,
                            file->out_w);

//printf("decode 30\n");
            free(temp_rows);

//printf("decode 40\n");
        }
    }
//printf("decode 2 %d\n", result);

    return result;
}
コード例 #16
0
int BC_Bitmap::read_frame(VFrame *frame,
                          int in_x,
                          int in_y,
                          int in_w,
                          int in_h,
                          int out_x,
                          int out_y,
                          int out_w,
                          int out_h)
{
    switch(color_model)
    {
// Hardware accelerated bitmap
    case BC_YUV420P:
        if(frame->get_color_model() == color_model)
        {
            memcpy(get_y_plane(), frame->get_y(), w * h);
            memcpy(get_u_plane(), frame->get_u(), w * h / 4);
            memcpy(get_v_plane(), frame->get_v(), w * h / 4);
            break;
        }

    case BC_YUV422P:
        if(frame->get_color_model() == color_model)
        {
            memcpy(get_y_plane(), frame->get_y(), w * h);
            memcpy(get_u_plane(), frame->get_u(), w * h / 2);
            memcpy(get_v_plane(), frame->get_v(), w * h / 2);
            break;
        }

    case BC_YUV422:
        if(frame->get_color_model() == color_model)
        {
            memcpy(get_data(), frame->get_data(), w * h + w * h);
            break;
        }

// Software only
    default:
// printf("BC_Bitmap::read_frame %d -> %d %d %d %d %d -> %d %d %d %d\n",
// 				frame->get_color_model(),
// 				color_model,
// 				in_x,
// 				in_y,
// 				in_w,
// 				in_h,
// 				out_x,
// 				out_y,
// 				out_w,
// 				out_h);
//if(color_model == 6 && frame->get_color_model() == 19)
//printf("BC_Bitmap::read_frame 1 %d %d %d %d\n", frame->get_w(), frame->get_h(), get_w(), get_h());
        cmodel_transfer(row_data[current_ringbuffer],
                        frame->get_rows(),
                        get_y_plane(),
                        get_u_plane(),
                        get_v_plane(),
                        frame->get_y(),
                        frame->get_u(),
                        frame->get_v(),
                        in_x,
                        in_y,
                        in_w,
                        in_h,
                        out_x,
                        out_y,
                        out_w,
                        out_h,
                        frame->get_color_model(),
                        color_model,
                        bg_color,
                        frame->get_w(),
                        w);
// color model transfer_*_to_TRANSPARENCY don't care about endianness
// so buffer bitswaped here if needed.
        if ((color_model == BC_TRANSPARENCY) && (!top_level->server_byte_order))
            transparency_bitswap();


//if(color_model == 6 && frame->get_color_model() == 19)
//printf("BC_Bitmap::read_frame 2\n");
        break;
    }


    return 0;
}
コード例 #17
0
ファイル: svg.C プロジェクト: petterreinholdtsen/cinelerra-hv
int SvgMain::process_realtime(VFrame *input_ptr, VFrame *output_ptr)
{
	int fh_lockfile;
	char filename_png[1024], filename_lock[1024];
	struct stat st_svg, st_png;
	int result_stat_png;
	VFrame *input, *output;
	input = input_ptr;
	output = output_ptr;

	
	need_reconfigure |= load_configuration();

	if (config.svg_file[0] == 0) {
		output->copy_from(input);
		return(0);
	}

	
	strcpy(filename_png, config.svg_file);
	strcat(filename_png, ".png");
	// get the time of the last PNG change
	result_stat_png = stat (filename_png, &st_png);
	


//	printf("PNg mtime: %li, last_load: %li\n", st_png.st_mtime, config.last_load);
	if (need_reconfigure || result_stat_png || (st_png.st_mtime > config.last_load)) {
		if (temp_frame)
			delete temp_frame;
		temp_frame = 0;
	}
	need_reconfigure = 0;
	
	if(!temp_frame) 
	{
		int result;
		VFrame *tmp2;
//		printf("PROCESSING: %s %li\n", filename_png, config.last_load);

		if (result = stat (config.svg_file, &st_svg)) 
		{
			printf(_("Error calling stat() on svg file: %s\n"), config.svg_file); 
		}
		if (force_png_render || result_stat_png || 
			st_png.st_mtime < st_svg.st_mtime) 
		{
			char command[1024];
			sprintf(command,
				"sodipodi --export-png=%s --export-width=%i --export-height=%i %s",
				filename_png, (int)config.in_w, (int)config.in_h, config.svg_file);
			printf(_("Running command %s\n"), command);
			system(command);
			stat(filename_png, &st_png);
			force_png_render = 0;
		}

		// advisory lock, so we wait for sodipodi to finish
		strcpy(filename_lock, filename_png);
		strcat(filename_lock, ".lock");
//		printf("Cinelerra locking %s\n", filename_lock);
		fh_lockfile = open (filename_lock, O_CREAT | O_RDWR);
		int res = lockf(fh_lockfile, F_LOCK, 10);    // Blocking call - will wait for sodipodi to finish!
//		printf("Cinelerra: filehandle: %i, cineres: %i, errno: %i\n", fh_lockfile, res, errno);
//		perror("Cineerror");
		int fh = open(filename_png, O_RDONLY);
		unsigned char *pngdata;
		// get the size again
		result_stat_png = fstat (fh, &st_png);

		pngdata = (unsigned char*) malloc(st_png.st_size + 4);
		*((int32_t *)pngdata) = st_png.st_size; 
//		printf("PNG size: %i\n", st_png.st_size);
		result = read(fh, pngdata+4, st_png.st_size);
		close(fh);
		// unlock the file
		lockf(fh_lockfile, F_ULOCK, 0);
		close(fh_lockfile);
//		printf("Cinelerra unlocking\n");

		config.last_load = st_png.st_mtime; // we just updated
		
		tmp2 = new VFrame(pngdata);
		temp_frame = new VFrame(0, 
				        tmp2->get_w(),
					tmp2->get_h(),
					output_ptr->get_color_model());
		free (pngdata);
	        cmodel_transfer(temp_frame->get_rows(),
	                tmp2->get_rows(),
	                0,
	                0,
	                0,
	                0,
	                0,
	                0,
	                0,
	                0,
	                tmp2->get_w(),
	                tmp2->get_h(),
	                0,
	                0,
	                temp_frame->get_w(),
	                temp_frame->get_h(),
	               	tmp2->get_color_model(),
	                temp_frame->get_color_model(),
	                0,
	                tmp2->get_w(),
	                temp_frame->get_w());

		delete tmp2;

	}

//printf("SvgMain::process_realtime 2 %p\n", input);


	if(!overlayer)
	{
		overlayer = new OverlayFrame(smp + 1);
	}

//	output->clear_frame();


// printf("SvgMain::process_realtime 3 output=%p input=%p config.w=%f config.h=%f"
// 	"%f %f %f %f -> %f %f %f %f\n", 
// 	output,
// 	input,
// 	config.w, 
// 	config.h,
// 	in_x1, 
// 	in_y1, 
// 	in_x2, 
// 	in_y2,
// 	out_x1, 
// 	out_y1, 
// 	out_x2, 
// 	out_y2);
		output->copy_from(input);
		overlayer->overlay(output, 
			temp_frame,
			config.in_x, 
			config.in_y, 
			config.in_x + config.in_w,
			config.in_y + config.in_h,
			config.out_x, 
			config.out_y, 
			config.out_x + config.out_w,
			config.out_y + config.out_h,
			1,
			TRANSFER_NORMAL,
			get_interpolation_type());

	return(0);
}
コード例 #18
0
ファイル: qth264.c プロジェクト: Cuchulain/cinelerra
static int encode(quicktime_t *file, unsigned char **row_pointers, int track)
{
	int64_t offset = quicktime_position(file);
	quicktime_video_map_t *vtrack = &(file->vtracks[track]);
	quicktime_h264_codec_t *codec = ((quicktime_codec_t*)vtrack->codec)->priv;
	quicktime_trak_t *trak = vtrack->track;
	int width = quicktime_video_width(file, track);
	int height = quicktime_video_height(file, track);
	int w_2 = quicktime_quantize2(width);
// ffmpeg interprets the codec height as the presentation height
	int h_2 = quicktime_quantize2(height);
	int i;
	int result = 0;
	int bytes = 0;
	int is_keyframe = 0;
	int current_field = vtrack->current_position % codec->total_fields;
	quicktime_atom_t chunk_atom;
	unsigned char header[1024];
	int header_size = 0;
	int got_pps = 0;
	int got_sps = 0;
	quicktime_avcc_t *avcc = &trak->mdia.minf.stbl.stsd.table[0].avcc;






	pthread_mutex_lock(&h264_lock);

	if(!codec->encode_initialized[current_field])
	{
		codec->encode_initialized[current_field] = 1;
		codec->param.i_width = w_2;
		codec->param.i_height = h_2;
		codec->param.i_fps_num = quicktime_frame_rate_n(file, track);
		codec->param.i_fps_den = quicktime_frame_rate_d(file, track);

#if X264_BUILD >= 48
		codec->param.rc.i_rc_method = X264_RC_CQP;
#endif
// Reset quantizer if fixed bitrate
		x264_param_t default_params;
		x264_param_default(&default_params);
#if X264_BUILD < 48
		if(codec->param.rc.b_cbr)
#else
		if(codec->param.rc.i_qp_constant)
#endif
		{
			codec->param.rc.i_qp_constant = default_params.rc.i_qp_constant;
			codec->param.rc.i_qp_min = default_params.rc.i_qp_min;
			codec->param.rc.i_qp_max = default_params.rc.i_qp_max;
		}


		if(file->cpus > 1)
		{
			codec->param.i_threads = file->cpus;
		}

		codec->encoder[current_field] = x264_encoder_open(&codec->param);
		codec->pic[current_field] = calloc(1, sizeof(x264_picture_t));
//printf("encode 1 %d %d\n", codec->param.i_width, codec->param.i_height);
  		x264_picture_alloc(codec->pic[current_field],
			X264_CSP_I420,
			codec->param.i_width,
			codec->param.i_height);
	}






	codec->pic[current_field]->i_type = X264_TYPE_AUTO;
	codec->pic[current_field]->i_qpplus1 = 0;


	if(codec->header_only)
	{
		bzero(codec->pic[current_field]->img.plane[0], w_2 * h_2);
		bzero(codec->pic[current_field]->img.plane[1], w_2 * h_2 / 4);
		bzero(codec->pic[current_field]->img.plane[2], w_2 * h_2 / 4);
	}
	else
	if(file->color_model == BC_YUV420P)
	{
		memcpy(codec->pic[current_field]->img.plane[0], row_pointers[0], w_2 * h_2);
		memcpy(codec->pic[current_field]->img.plane[1], row_pointers[1], w_2 * h_2 / 4);
		memcpy(codec->pic[current_field]->img.plane[2], row_pointers[2], w_2 * h_2 / 4);
	}
	else
	{
//printf("encode 2 %p %p %p\n", codec->pic[current_field]->img.plane[0], codec->pic[current_field]->img.plane[1], codec->pic[current_field]->img.plane[2]);
		cmodel_transfer(0, /* Leave NULL if non existent */
			row_pointers,
			codec->pic[current_field]->img.plane[0], /* Leave NULL if non existent */
			codec->pic[current_field]->img.plane[1],
			codec->pic[current_field]->img.plane[2],
			row_pointers[0], /* Leave NULL if non existent */
			row_pointers[1],
			row_pointers[2],
			0,        /* Dimensions to capture from input frame */
			0,
			width,
			height,
			0,       /* Dimensions to project on output frame */
			0,
			width,
			height,
			file->color_model,
			BC_YUV420P,
			0,         /* When transfering BC_RGBA8888 to non-alpha this is the background color in 0xRRGGBB hex */
			width,       /* For planar use the luma rowspan */
			codec->pic[current_field]->img.i_stride[0]);

	}












    x264_picture_t pic_out;
    x264_nal_t *nals;
	int nnal = 0;
	do
	{
		x264_encoder_encode(codec->encoder[current_field],
			&nals,
			&nnal,
			codec->pic[current_field],
			&pic_out);
//printf("encode %d nnal=%d\n", __LINE__, nnal);
	} while(codec->header_only && !nnal);
	int allocation = w_2 * h_2 * 3;
	if(!codec->work_buffer)
	{
		codec->work_buffer = calloc(1, allocation);
	}

	codec->buffer_size = 0;
//printf("encode %d nnal=%d\n", __LINE__, nnal);
	for(i = 0; i < nnal; i++)
	{
#if X264_BUILD >= 76
                int size = nals[i].i_payload;
                memcpy(codec->work_buffer + codec->buffer_size,
			nals[i].p_payload,
			nals[i].i_payload);
#else
		int size_return = 0;
		int size = x264_nal_encode(codec->work_buffer + codec->buffer_size,
			&size_return,
			1,
			nals + i);
#endif
		unsigned char *ptr = codec->work_buffer + codec->buffer_size;

//printf("encode %d size=%d\n", __LINE__, size);
		if(size > 0)
		{
			if(size + codec->buffer_size > allocation)
			{
				printf("qth264.c %d: overflow size=%d allocation=%d\n",
					__LINE__,
					size,
					allocation);
			}

// Size of NAL for avc
			uint64_t avc_size = size - 4;

// Synthesize header.
// Hopefully all the parameter set NAL's are present in the first frame.
			if(!avcc->data_size)
			{
				if(header_size < 6)
				{
					header[header_size++] = 0x01;
					header[header_size++] = 0x4d;
					header[header_size++] = 0x40;
					header[header_size++] = 0x1f;
					header[header_size++] = 0xff;
					header[header_size++] = 0xe1;
				}

				int nal_type = (ptr[4] & 0x1f);
// Picture parameter or sequence parameter set
				if(nal_type == 0x7 && !got_sps)
				{
					got_sps = 1;
					header[header_size++] = (avc_size & 0xff00) >> 8;
					header[header_size++] = (avc_size & 0xff);
					memcpy(&header[header_size],
						ptr + 4,
						avc_size);
					header_size += avc_size;
				}
				else
				if(nal_type == 0x8 && !got_pps)
				{
					got_pps = 1;
// Number of sps nal's.
					header[header_size++] = 0x1;
					header[header_size++] = (avc_size & 0xff00) >> 8;
					header[header_size++] = (avc_size & 0xff);
					memcpy(&header[header_size],
						ptr + 4,
						avc_size);
					header_size += avc_size;
				}

// Write header
				if(got_sps && got_pps)
				{
/*
 * printf("encode %d\n", __LINE__);
 * int j;
 * for(j = 0; j < header_size; j++)
 * {
 * printf("%02x ", header[j]);
 * }
 * printf("\n");
 */
					quicktime_set_avcc_header(avcc,
		  				header,
		  				header_size);
				}
			}