예제 #1
0
piximage * pix_alloc(pixosi pix, unsigned width, unsigned height) {
	piximage * image = (piximage *) malloc(sizeof(piximage));

	avcodec_init();

	image->width = width;
	image->height = height;
	image->palette = pix;

	image->data = (uint8_t *) av_malloc(pix_size(pix, width, height) * sizeof(uint8_t));
	if (!image->data) {
		free(image);
		image = NULL;
	}

	return image;
}
예제 #2
0
pixerrorcode pix_convert(int flags, piximage * img_dst, piximage * img_src) {

	uint8_t * buf_source = img_src->data;
	int need_avfree = 0;

	//If the format is NV12, transforming it
	if (img_src->palette == PIX_OSI_NV12) {
		buf_source = _nv12_to_yuv420p(img_src->data, img_src->width, img_src->height);
		need_avfree = 1;
		img_src->palette = PIX_OSI_YUV420P;
	}
	////

	int need_resize = 0;

	//Check if the piximage needs to be resized
	if ((img_src->width != img_dst->width) || (img_src->height != img_dst->height)) {
		need_resize = 1;
	}
	////

	int len_target = pix_size(img_dst->palette, img_src->width, img_src->height);

	int pix_fmt_source = pix_ffmpeg_from_pix_osi(img_src->palette);
	int pix_fmt_target = pix_ffmpeg_from_pix_osi(img_dst->palette);

	AVPicture avp_source, avp_target;
	avpicture_fill(&avp_source,  buf_source, pix_fmt_source, img_src->width, img_src->height);
	avpicture_fill(&avp_target, img_dst->data, pix_fmt_target, img_dst->width, img_dst->height);

	//FIXME Only flip other planes if the destination palette is YUV420
	if ((flags & PIX_FLIP_HORIZONTALLY) && (img_src->palette == PIX_OSI_YUV420P)) {
		avp_source.data[0] += avp_source.linesize[0] * (img_src->height - 1);
		avp_source.linesize[0] *= -1;

		if (pix_fmt_source == PIX_FMT_YUV420P) {
			avp_source.data[1] += avp_source.linesize[1] * (img_src->height / 2 - 1);
			avp_source.linesize[1] *= -1;
			avp_source.data[2] += avp_source.linesize[2] * (img_src->height / 2 - 1);
			avp_source.linesize[2] *= -1;
		}
	}

	//Resizing picture if needed. Needs test
	if (need_resize) {

		//resampling only works yuv420P -> yuv420P in current ffmpeg

		if (pix_fmt_source != PIX_FMT_YUV420P) {
			return PIX_NOK;
		}

		//TODO optimize this part but will need the preparation of contexts
		ImgReSampleContext * resample_context = img_resample_init(img_dst->width, img_dst->height,
			img_src->width, img_src->height);

		if (!resample_context) {
			return PIX_NOK;
		}

		AVPicture avp_tmp_target;

		//we need to prepare a tmp buffer
		uint8_t * buf_tmp_target = (uint8_t *)av_malloc(avpicture_get_size(pix_fmt_source, img_dst->width, img_dst->height)  * sizeof(uint8_t));
		avpicture_fill(&avp_tmp_target, buf_tmp_target, pix_fmt_source, img_dst->width, img_dst->height);
		//

		//do the resampling
		img_resample(resample_context, &avp_tmp_target, &avp_source);
		img_resample_close(resample_context);
		//

		//do the conversion
		if (img_convert(&avp_target, pix_fmt_target,
			&avp_tmp_target, pix_fmt_source,
			img_dst->width, img_dst->height) == -1) {

			av_free(buf_tmp_target);
			return PIX_NOK;
		}
		av_free(buf_tmp_target);
		//

	} else {
		if (img_convert(&avp_target, pix_fmt_target,
			&avp_source, pix_fmt_source,
			img_src->width, img_src->height) == -1) {
			return PIX_NOK;
		}
	}
	////

	if (need_avfree) {
		av_free(buf_source);
	}

	return PIX_OK;
}
예제 #3
0
piximage * pix_copy(piximage * src) {
	piximage * result = pix_alloc(src->palette, src->width, src->height);
	memcpy(result->data, src->data, pix_size(src->palette, src->width, src->height));

	return result;
}
예제 #4
0
/**
 * PDF images are handles as inline, not XObject streams...
 */
void PDF_PLOTTER::PlotImage( const wxImage & aImage, const wxPoint& aPos,
                            double aScaleFactor )
{
    wxASSERT( workFile );
    wxSize pix_size( aImage.GetWidth(), aImage.GetHeight() );

    // Requested size (in IUs)
    DPOINT drawsize( aScaleFactor * pix_size.x,
                     aScaleFactor * pix_size.y );

    // calculate the bitmap start position
    wxPoint start( aPos.x - drawsize.x / 2,
                   aPos.y + drawsize.y / 2);

    DPOINT dev_start = userToDeviceCoordinates( start );

    /* PDF has an uhm... simplified coordinate system handling. There is
       *one* operator to do everything (the PS concat equivalent). At least
       they kept the matrix stack to save restore environments. Also images
       are always emitted at the origin with a size of 1x1 user units.
       What we need to do is:
       1) save the CTM end estabilish the new one
       2) plot the image
       3) restore the CTM
       4) profit
     */
    fprintf( workFile, "q %g 0 0 %g %g %g cm\n", // Step 1
            userToDeviceSize( drawsize.x ),
            userToDeviceSize( drawsize.y ),
            dev_start.x, dev_start.y );

    /* An inline image is a cross between a dictionary and a stream.
       A real ugly construct (compared with the elegance of the PDF
       format). Also it accepts some 'abbreviations', which is stupid
       since the content stream is usually compressed anyway... */
    fprintf( workFile,
             "BI\n"
             "  /BPC 8\n"
             "  /CS %s\n"
             "  /W %d\n"
             "  /H %d\n"
             "ID\n", colorMode ? "/RGB" : "/G", pix_size.x, pix_size.y );

    /* Here comes the stream (in binary!). I *could* have hex or ascii84
       encoded it, but who cares? I'll go through zlib anyway */
    for( int y = 0; y < pix_size.y; y++ )
    {
        for( int x = 0; x < pix_size.x; x++ )
        {
            unsigned char r = aImage.GetRed( x, y ) & 0xFF;
            unsigned char g = aImage.GetGreen( x, y ) & 0xFF;
            unsigned char b = aImage.GetBlue( x, y ) & 0xFF;
            // As usual these days, stdio buffering has to suffeeeeerrrr
            if( colorMode )
            {
            putc( r, workFile );
            putc( g, workFile );
            putc( b, workFile );
            }
            else
            {
                // Grayscale conversion
                putc( (r + g + b) / 3, workFile );
            }
        }
    }

    fputs( "EI Q\n", workFile ); // Finish step 2 and do step 3
}