Exemplo n.º 1
0
int FFMPEG::init_picture_from_frame(AVPicture *picture, VFrame *frame) {
    int cmodel = frame->get_color_model();
    PixelFormat pix_fmt = color_model_to_pix_fmt(cmodel);

    int size = avpicture_fill(picture, frame->get_data(), pix_fmt,
                              frame->get_w(), frame->get_h());

    if (size < 0) {
        printf("FFMPEG::init_picture failed\n");
        return 1;
    }

    if (cmodel_is_planar(frame->get_color_model())) {
        // override avpicture_fill() for planar types
        picture->data[0] = frame->get_y();
        picture->data[1] = frame->get_u();
        picture->data[2] = frame->get_v();
    }

    return size;
}
Exemplo n.º 2
0
int FileYUV::read_frame(VFrame *frame)
{
	int result;
	VFrame *input = frame;

	// short cut for direct copy routines
	if (frame->get_color_model() == BC_COMPRESSED) {
		long frame_size = (long) // w*h + w*h/4 + w*h/4
			(stream->get_height() *	stream->get_width() * 1.5); 
		frame->allocate_compressed_data(frame_size);
		frame->set_compressed_size(frame_size);
		return stream->read_frame_raw(frame->get_data(), frame_size);
	}
	

	// process through a temp frame if necessary
	if (! cmodel_is_planar(frame->get_color_model()) ||
	    (frame->get_w() != stream->get_width()) ||
	    (frame->get_h() != stream->get_height())) 
	{
		ensure_temp(stream->get_width(), stream->get_height());
		input = temp;
	}

	uint8_t *yuv[3];
	yuv[0] = input->get_y();
	yuv[1] = input->get_u();
	yuv[2] = input->get_v();
	result = stream->read_frame(yuv);
	if (result) return result;

	// transfer from the temp frame to the real one
	if (input != frame) 
	{
		FFMPEG::convert_cmodel(input, frame);
	}
	
	return 0;
}
Exemplo n.º 3
0
int FFMPEG::convert_cmodel(AVPicture *picture_in, PixelFormat pix_fmt_in,
                           int width_in, int height_in, VFrame *frame_out) {

    // set up a temporary picture_out from frame_out
    AVPicture picture_out;
    init_picture_from_frame(&picture_out, frame_out);
    int cmodel_out = frame_out->get_color_model();
    PixelFormat pix_fmt_out = color_model_to_pix_fmt(cmodel_out);

#ifdef HAVE_SWSCALER
    // We need a context for swscale
    struct SwsContext *convert_ctx;
#endif
    int result;
#ifndef HAVE_SWSCALER
    // do conversion within libavcodec if possible
    if (pix_fmt_out != PIX_FMT_NB) {
        result = img_convert(&picture_out,
                             pix_fmt_out,
                             picture_in,
                             pix_fmt_in,
                             width_in,
                             height_in);
        if (result) {
            printf("FFMPEG::convert_cmodel img_convert() failed\n");
        }
        return result;
    }
#else
    convert_ctx = sws_getContext(width_in, height_in,pix_fmt_in,
                                 frame_out->get_w(),frame_out->get_h(),pix_fmt_out,
                                 SWS_BICUBIC, NULL, NULL, NULL);

    if(convert_ctx == NULL) {
        printf("FFMPEG::convert_cmodel : swscale context initialization failed\n");
        return 1;
    }

    result = sws_scale(convert_ctx,
                       picture_in->data, picture_in->linesize,
                       width_in, height_in,
                       picture_out.data, picture_out.linesize);


    sws_freeContext(convert_ctx);

    if(result) {
        printf("FFMPEG::convert_cmodel sws_scale() failed\n");
    }
#endif

    // make an intermediate temp frame only if necessary
    int cmodel_in = pix_fmt_to_color_model(pix_fmt_in);
    if (cmodel_in == BC_TRANSPARENCY) {
        if (pix_fmt_in == PIX_FMT_RGB32) {
            // avoid infinite recursion if things are broken
            printf("FFMPEG::convert_cmodel pix_fmt_in broken!\n");
            return 1;
        }

        // NOTE: choose RGBA8888 as a hopefully non-lossy colormodel
        VFrame *temp_frame = new VFrame(0, width_in, height_in,
                                        BC_RGBA8888);
        if (convert_cmodel(picture_in, pix_fmt_in,
                           width_in, height_in, temp_frame)) {
            delete temp_frame;
            return 1;  // recursed call will print error message
        }

        int result = convert_cmodel(temp_frame, frame_out);
        delete temp_frame;
        return result;
    }


    // NOTE: no scaling possible in img_convert() so none possible here
    if (frame_out->get_w() != width_in ||
            frame_out->get_h() != height_in) {
        printf("scaling from %dx%d to %dx%d not allowed\n",
               width_in, height_in,
               frame_out->get_w(), frame_out->get_h());
        return 1;
    }


    // if we reach here we know that cmodel_transfer() will work
    uint8_t *yuv_in[3] = {0,0,0};
    uint8_t *row_pointers_in[height_in];
    if (cmodel_is_planar(cmodel_in)) {
        yuv_in[0] = picture_in->data[0];
        yuv_in[1] = picture_in->data[1];
        yuv_in[2] = picture_in->data[2];
    }
    else {
        // set row pointers for picture_in
        uint8_t *data = picture_in->data[0];
        int bytes_per_line =
            cmodel_calculate_pixelsize(cmodel_in) * height_in;
        for (int i = 0; i < height_in; i++) {
            row_pointers_in[i] = data + i * bytes_per_line;
        }
    }

    cmodel_transfer
    (// Packed data out
        frame_out->get_rows(),
        // Packed data in
        row_pointers_in,

        // Planar data out
        frame_out->get_y(), frame_out->get_u(), frame_out->get_v(),
        // Planar data in
        yuv_in[0], yuv_in[1], yuv_in[2],

        // Dimensions in
        0, 0, width_in, height_in,  // NOTE: dimensions are same
        // Dimensions out
        0, 0, width_in, height_in,

        // Color model in, color model out
        cmodel_in, cmodel_out,

        // Background color
        0,

        // Rowspans in, out (of luma for YUV)
        width_in, width_in

    );

    return 0;
}
Exemplo n.º 4
0
int FileYUV::write_frames(VFrame ***layers, int len)
{
	int result;

	// only one layer supported
	VFrame **frames = layers[0];
	VFrame *frame;

	for (int n = 0; n < len; n++) 
	{
		frame = frames[n];

		// short cut for direct copy routines
		if (frame->get_color_model() == BC_COMPRESSED) 
		{
			long frame_size = frame->get_compressed_size();
			if (incoming_asset->format == FILE_YUV) 
				return stream->write_frame_raw(frame->get_data(), frame_size);

			// decode and write an encoded frame
			if (FFMPEG::codec_id(incoming_asset->vcodec) != CODEC_ID_NONE) 
			{
				if (! ffmpeg) 
				{
					ffmpeg = new FFMPEG(incoming_asset);
					ffmpeg->init(incoming_asset->vcodec);
				}
				
				ensure_temp(incoming_asset->width, incoming_asset->height); 
				int result = ffmpeg->decode(frame->get_data(), frame_size, temp);

				// some formats are decoded one frame later
				if (result == FFMPEG_LATENCY) 
				{
					// remember to write the last frame
					pipe_latency++;
					return 0;
				}

				if (result) 
				{
					delete ffmpeg;
					ffmpeg = 0;
					return 1;
				}


				uint8_t *yuv[3];
				yuv[0] = temp->get_y();
				yuv[1] = temp->get_u();
				yuv[2] = temp->get_v();
				return stream->write_frame(yuv);
			}
		}

		// process through a temp frame only if necessary
		if (! cmodel_is_planar(frame->get_color_model()) ||
		    (frame->get_w() != stream->get_width()) ||
		    (frame->get_h() != stream->get_height())) 
		{
			ensure_temp(asset->width, asset->height);
			FFMPEG::convert_cmodel(frame, temp);
			frame = temp;
		}

		uint8_t *yuv[3];
		yuv[0] = frame->get_y();
		yuv[1] = frame->get_u();
		yuv[2] = frame->get_v();
		result = stream->write_frame(yuv);
		if (result) return result;
	}

	return 0;
}