示例#1
0
bool
test_exact()
{
	int i, Bpp, channels;
	float *tmp_float;
	GLubyte *data, *observed;
	GLint tex_width, tex_height;
	bool pass = true;

	if (format->data_type == GL_NONE) {
		piglit_report_subtest_result(PIGLIT_SKIP,
					     "Exact upload-download of %s",
					     piglit_get_gl_enum_name(format->internal_format));
		return true;
	}

	channels = num_channels(format->format);
	Bpp = bytes_per_pixel(format->format, format->data_type);

	if (format->data_type == GL_FLOAT) {
		/* Sanatize so we don't get invalid floating point values */
		tmp_float = malloc(texture_size * texture_size *
				   channels * sizeof(float));
		for (i = 0; i < texture_size * texture_size * channels; ++i)
			tmp_float[i] = sn_to_float(32, ((GLint *)rand_data)[i]);
		data = (GLubyte *)tmp_float;
	} else {
		tmp_float = NULL;
		data = rand_data;
	}

	glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
	glTexImage2D(GL_TEXTURE_2D, 0, format->internal_format,
		     texture_size, texture_size, 0, format->format,
		     format->data_type, data);
	pass &= piglit_check_gl_error(GL_NO_ERROR);

	glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &tex_width);
	glGetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &tex_height);
	glPixelStorei(GL_PACK_ALIGNMENT, 1);
	observed = malloc(tex_width * tex_height * Bpp);

	glGetTexImage(GL_TEXTURE_2D, 0, format->format, format->data_type,
		      observed);
	pass &= piglit_check_gl_error(GL_NO_ERROR);

	for (i = 0; i < texture_size; ++i)
		pass &= memcmp(&data[i * texture_size * Bpp],
			       &observed[i * tex_width * Bpp],
			       texture_size * Bpp) == 0;

	free(observed);
	free(tmp_float);

	piglit_report_subtest_result(pass ? PIGLIT_PASS : PIGLIT_FAIL,
				     "Exact upload-download of %s",
				     piglit_get_gl_enum_name(format->internal_format));

	return pass;
}
示例#2
0
/*
 * This function is called to initialize a buffer for logical IPU channel.
 *
 * @param       channel         Input parameter for the logical channel ID.
 *
 * @param       type            Input parameter which buffer to initialize.
 *
 * @param       pixel_fmt       Input parameter for pixel format of buffer.
 *                              Pixel format is a FOURCC ASCII code.
 *
 * @param       width           Input parameter for width of buffer in pixels.
 *
 * @param       height          Input parameter for height of buffer in pixels.
 *
 * @param       stride          Input parameter for stride length of buffer
 *                              in pixels.
 *
 * @param       phyaddr_0       Input parameter buffer 0 physical address.
 *
 * @param       phyaddr_1       Input parameter buffer 1 physical address.
 *                              Setting this to a value other than NULL enables
 *                              double buffering mode.
 *
 * @param       u		private u offset for additional cropping,
 *				zero if not used.
 *
 * @param       v		private v offset for additional cropping,
 *				zero if not used.
 *
 * @return      Returns 0 on success or negative error code on fail
 */
int32_t ipu_init_channel_buffer(ipu_channel_t channel, ipu_buffer_t type,
				uint32_t pixel_fmt,
				uint16_t width, uint16_t height,
				uint32_t stride,
				dma_addr_t phyaddr_0, dma_addr_t phyaddr_1,
				uint32_t u, uint32_t v)
{
	uint32_t reg;
	uint32_t dma_chan;

	dma_chan = channel_2_dma(channel, type);
	if (!idma_is_valid(dma_chan))
		return -EINVAL;

	if (stride < width * bytes_per_pixel(pixel_fmt))
		stride = width * bytes_per_pixel(pixel_fmt);

	if (stride % 4) {
		printf(
			"Stride not 32-bit aligned, stride = %d\n", stride);
		return -EINVAL;
	}
	/* Build parameter memory data for DMA channel */
	ipu_ch_param_init(dma_chan, pixel_fmt, width, height, stride, u, v, 0,
			   phyaddr_0, phyaddr_1);

	if (ipu_is_dmfc_chan(dma_chan)) {
		ipu_dmfc_set_wait4eot(dma_chan, width);
	}

	if (idma_is_set(IDMAC_CHA_PRI, dma_chan))
		ipu_ch_param_set_high_priority(dma_chan);

	ipu_ch_param_dump(dma_chan);

	reg = __raw_readl(IPU_CHA_DB_MODE_SEL(dma_chan));
	if (phyaddr_1)
		reg |= idma_mask(dma_chan);
	else
		reg &= ~idma_mask(dma_chan);
	__raw_writel(reg, IPU_CHA_DB_MODE_SEL(dma_chan));

	/* Reset to buffer 0 */
	__raw_writel(idma_mask(dma_chan), IPU_CHA_CUR_BUF(dma_chan));

	return 0;
}
示例#3
0
static int get_fbinfo(l4re_video_view_info_t *vinfo)
{
    vinfo->width               = width();
    vinfo->height              = height();
    vinfo->bytes_per_line      = bytes_per_pixel() * vinfo->width;

    vinfo->pixel_info.bytes_per_pixel = bytes_per_pixel();
    vinfo->pixel_info.r.shift         = 11;
    vinfo->pixel_info.r.size          = 5;
    vinfo->pixel_info.g.shift         = 5;
    vinfo->pixel_info.g.size          = 6;
    vinfo->pixel_info.b.shift         = 0;
    vinfo->pixel_info.b.size          = 5;
    vinfo->pixel_info.a.shift         = 0;
    vinfo->pixel_info.a.size          = 0;
    return 0;
}
/*
 * Set clipping. Try new values to fit, if they don't return -EINVAL
 */
static int set_clipping(struct vino_device *v, int x, int y, int w, int h,
			int d)
{
	int maxwidth, maxheight, lsize;

	if (d < 1)
		d = 1;
	if (d > 8)
		d = 8;
	if (w / d < VINO_MIN_WIDTH || h / d < VINO_MIN_HEIGHT)
		return -EINVAL;
	if (get_capture_norm(v) == VIDEO_MODE_NTSC) {
		maxwidth = VINO_NTSC_WIDTH;
		maxheight = VINO_NTSC_HEIGHT;
	} else {
		maxwidth = VINO_PAL_WIDTH;
		maxheight = VINO_PAL_HEIGHT;
	}
	if (x < 0)
		x = 0;
	if (y < 0)
		y = 0;
	y &= ~1;	/* odd/even fields */
	if (x + w > maxwidth) {
		w = maxwidth - x;
		if (w / d < VINO_MIN_WIDTH)
			x = maxwidth - VINO_MIN_WIDTH * d;
	}
	if (y + h > maxheight) {
		h = maxheight - y;
		if (h / d < VINO_MIN_HEIGHT)
			y = maxheight - VINO_MIN_HEIGHT * d;
	}
	/* line size must be multiple of 8 bytes */
	lsize = (bytes_per_pixel(v) * w / d) & ~7;
	w = lsize * d / bytes_per_pixel(v);
	v->left = x;
	v->top = y;
	v->right = x + w;
	v->bottom = y + h;
	v->decimation = d;
	v->line_size = lsize;
	DEBUG("VINO: clipping %d, %d, %d, %d / %d - %d\n", v->left, v->top,
	      v->right, v->bottom, v->decimation, v->line_size);
	return 0;
}
Image::Image(const Image* rhs) {
	initialize(
		rhs->color_encoding(), rhs->width(), rhs->height(), rhs->depth()
		) ;
	::memcpy(
		base_mem(), rhs->base_mem(),
		bytes_per_pixel() * width() * height() * depth() 
		) ;
}
	void TGAReader::read_run_length_encoded(std::vector<std::uint8_t> &dest)
	{
		std::size_t pos = 0;

		while (pos < dest.size())
		{
			if (rle_state.bytes_left == 0)
			{
				std::uint8_t repetition_count;
				read(&repetition_count, 1);

				rle_state.raw = repetition_count < 0x80;

				if (rle_state.raw)
				{
					rle_state.bytes_left = (repetition_count + 1u) * bytes_per_pixel();
				}
				else
				{
					rle_state.bytes_left = (repetition_count - 127u) * bytes_per_pixel();
					rle_state.pixel_pos = 0;
					read(rle_state.pixel, bytes_per_pixel());
				}
			}

			if (rle_state.raw)
			{
				auto size = std::min(dest.size() - pos, static_cast<std::size_t>(rle_state.bytes_left));
				read(&dest[pos], size);
				rle_state.bytes_left -= size;
				pos += size;
			}
			else
			{
				while (pos < dest.size() && rle_state.bytes_left > 0)
				{
					dest[pos++] = rle_state.pixel[rle_state.pixel_pos++];
					rle_state.bytes_left--;
					if (rle_state.pixel_pos >= bytes_per_pixel())
						rle_state.pixel_pos = 0;
				}
			}
		}
	}
示例#7
0
bool ImageSurface::allocate(const IntSize& size, SurfaceFormat f)
{
    printf("ImageSurface::allocate\n");
    uint32_t bpp = bytes_per_pixel(f);
    _size = size;
    _stride = bpp * size.width;
    uint32_t buf_size = size.width * size.height * bpp;
    if (buf_size) {
        _data = new byte[buf_size];
    }
}
示例#8
0
文件: _curve.c 项目: 5um1th/thumbor
static PyObject*
_curve_apply(PyObject *self, PyObject *args)
{
    char *image_mode;
    PyObject *buffer = NULL, *curve_a = NULL, *curve_r = NULL, *curve_g = NULL, *curve_b = NULL;

    if (!PyArg_ParseTuple(args, "sOOOOO:apply", &image_mode, &buffer, &curve_a, &curve_r, &curve_g, &curve_b)) {
        return NULL;
    }

    unsigned char *points_a = cubic_spline_interpolation(get_curve(curve_a), PyTuple_Size(curve_a), 256),
                  *points_r = cubic_spline_interpolation(get_curve(curve_r), PyTuple_Size(curve_r), 256),
                  *points_g = cubic_spline_interpolation(get_curve(curve_g), PyTuple_Size(curve_g), 256),
                  *points_b = cubic_spline_interpolation(get_curve(curve_b), PyTuple_Size(curve_b), 256);

    Py_ssize_t size = PyString_Size(buffer);
    unsigned char *ptr = (unsigned char *) PyString_AsString(buffer);
    int num_bytes = bytes_per_pixel(image_mode);

    int r_idx = rgb_order(image_mode, 'R'),
        g_idx = rgb_order(image_mode, 'G'),
        b_idx = rgb_order(image_mode, 'B'),
        i = 0, r, g, b;

    size -= num_bytes;

    for (; i <= size; i += num_bytes) {
        r = ptr[i + r_idx];
        g = ptr[i + g_idx];
        b = ptr[i + b_idx];

        r = points_r[r];
        g = points_g[g];
        b = points_b[b];

        r = points_a[r];
        g = points_a[g];
        b = points_a[b];

        ptr[i + r_idx] = ADJUST_COLOR(r);
        ptr[i + g_idx] = ADJUST_COLOR(g);
        ptr[i + b_idx] = ADJUST_COLOR(b);
    }

    free(points_a);
    free(points_r);
    free(points_g);
    free(points_b);

    Py_INCREF(buffer);
    return buffer;
}
示例#9
0
SkPngCodec::SkPngCodec(const SkEncodedInfo& encodedInfo, const SkImageInfo& imageInfo,
                       SkStream* stream, SkPngChunkReader* chunkReader, png_structp png_ptr,
                       png_infop info_ptr, int bitDepth, int numberPasses)
    : INHERITED(encodedInfo, imageInfo, stream)
    , fPngChunkReader(SkSafeRef(chunkReader))
    , fPng_ptr(png_ptr)
    , fInfo_ptr(info_ptr)
    , fSwizzlerSrcRow(nullptr)
    , fColorXformSrcRow(nullptr)
    , fSrcRowBytes(imageInfo.width() * (bytes_per_pixel(this->getEncodedInfo().bitsPerPixel())))
    , fNumberPasses(numberPasses)
    , fBitDepth(bitDepth)
{}
示例#10
0
static int set_scaling(struct vino_device *v, int w, int h)
{
	int maxwidth, maxheight, lsize, d;

	if (w < VINO_MIN_WIDTH || h < VINO_MIN_HEIGHT)
		return -EINVAL;
	if (get_capture_norm(v) == VIDEO_MODE_NTSC) {
		maxwidth = VINO_NTSC_WIDTH;
		maxheight = VINO_NTSC_HEIGHT;
	} else {
		maxwidth = VINO_PAL_WIDTH;
		maxheight = VINO_PAL_HEIGHT;
	}
	if (w > maxwidth)
		w = maxwidth;
	if (h > maxheight)
		h = maxheight;
	d = max(maxwidth / w, maxheight / h);
	if (d > 8)
		d = 8;
	/* line size must be multiple of 8 bytes */
	lsize = (bytes_per_pixel(v) * w) & ~7;
	w = lsize * d / bytes_per_pixel(v);
	h *= d;
	if (v->left + w > maxwidth)
		v->left = maxwidth - w;
	if (v->top + h > maxheight)
		v->top = (maxheight - h) & ~1;	/* odd/even fields */
	/* FIXME: -1 bug... Verify clipping with video signal generator */
	v->right = v->left + w;
	v->bottom = v->top + h;
	v->decimation = d;
	v->line_size = lsize;
	DEBUG("VINO: scaling %d, %d, %d, %d / %d - %d\n", v->left, v->top,
	      v->right, v->bottom, v->decimation, v->line_size);

	return 0;
}
	Image TGAReader::read_image_data()
	{
		Image image(header.image.width, header.image.height);
		std::vector<std::uint8_t> row(bytes_per_pixel() * header.image.width);

		for (unsigned int y = 0; y < header.image.height; y++)
		{
			if (header.run_length_encoded)
				read_run_length_encoded(row);
			else
				read(row);

			for (unsigned int x = 0; x < header.image.width; x++)
				image.write_pixel(x_to_image_x(x), y_to_image_y(y), pixel_to_color(row, x));
		}

		return image;
	}
示例#12
0
    Result onStartScanlineDecode(const SkImageInfo& dstInfo, const Options& options,
            SkPMColor ctable[], int* ctableCount) override {
        if (!conversion_possible(dstInfo, this->getInfo())) {
            return kInvalidConversion;
        }

        const Result result = this->initializeSwizzler(dstInfo, options, ctable,
                                                       ctableCount);
        if (result != kSuccess) {
            return result;
        }

        fStorage.reset(this->getInfo().width() *
                (bytes_per_pixel(this->getEncodedInfo().bitsPerPixel())));
        fSrcRow = fStorage.get();

        return kSuccess;
    }
示例#13
0
void NQD_bitblt(uint32 p)
{
	D(bug("accl_bitblt %08x\n", p));

	// Get blitting parameters
	int16 src_X  = (int16)ReadMacInt16(p + acclSrcRect + 2) - (int16)ReadMacInt16(p + acclSrcBoundsRect + 2);
	int16 src_Y  = (int16)ReadMacInt16(p + acclSrcRect + 0) - (int16)ReadMacInt16(p + acclSrcBoundsRect + 0);
	int16 dest_X = (int16)ReadMacInt16(p + acclDestRect + 2) - (int16)ReadMacInt16(p + acclDestBoundsRect + 2);
	int16 dest_Y = (int16)ReadMacInt16(p + acclDestRect + 0) - (int16)ReadMacInt16(p + acclDestBoundsRect + 0);
	int16 width  = (int16)ReadMacInt16(p + acclDestRect + 6) - (int16)ReadMacInt16(p + acclDestRect + 2);
	int16 height = (int16)ReadMacInt16(p + acclDestRect + 4) - (int16)ReadMacInt16(p + acclDestRect + 0);
	D(bug(" src addr %08x, dest addr %08x\n", ReadMacInt32(p + acclSrcBaseAddr), ReadMacInt32(p + acclDestBaseAddr)));
	D(bug(" src X %d, src Y %d, dest X %d, dest Y %d\n", src_X, src_Y, dest_X, dest_Y));
	D(bug(" width %d, height %d\n", width, height));

	// And perform the blit
	const int bpp = bytes_per_pixel(ReadMacInt32(p + acclSrcPixelSize));
	width *= bpp;
	if ((int32)ReadMacInt32(p + acclSrcRowBytes) > 0) {
		const int src_row_bytes = (int32)ReadMacInt32(p + acclSrcRowBytes);
		const int dst_row_bytes = (int32)ReadMacInt32(p + acclDestRowBytes);
		uint8 *src = Mac2HostAddr(ReadMacInt32(p + acclSrcBaseAddr) + (src_Y * src_row_bytes) + (src_X * bpp));
		uint8 *dst = Mac2HostAddr(ReadMacInt32(p + acclDestBaseAddr) + (dest_Y * dst_row_bytes) + (dest_X * bpp));
		for (int i = 0; i < height; i++) {
			memmove(dst, src, width);
			src += src_row_bytes;
			dst += dst_row_bytes;
		}
	}
	else {
		const int src_row_bytes = -(int32)ReadMacInt32(p + acclSrcRowBytes);
		const int dst_row_bytes = -(int32)ReadMacInt32(p + acclDestRowBytes);
		uint8 *src = Mac2HostAddr(ReadMacInt32(p + acclSrcBaseAddr) + ((src_Y + height - 1) * src_row_bytes) + (src_X * bpp));
		uint8 *dst = Mac2HostAddr(ReadMacInt32(p + acclDestBaseAddr) + ((dest_Y + height - 1) * dst_row_bytes) + (dest_X * bpp));
		for (int i = height - 1; i >= 0; i--) {
			memmove(dst, src, width);
			src -= src_row_bytes;
			dst -= dst_row_bytes;
		}
	}
}
示例#14
0
void NQD_fillrect(uint32 p)
{
	D(bug("accl_fillrect %08x\n", p));

	// Get filling parameters
	int16 dest_X = (int16)ReadMacInt16(p + acclDestRect + 2) - (int16)ReadMacInt16(p + acclDestBoundsRect + 2);
	int16 dest_Y = (int16)ReadMacInt16(p + acclDestRect + 0) - (int16)ReadMacInt16(p + acclDestBoundsRect + 0);
	int16 width  = (int16)ReadMacInt16(p + acclDestRect + 6) - (int16)ReadMacInt16(p + acclDestRect + 2);
	int16 height = (int16)ReadMacInt16(p + acclDestRect + 4) - (int16)ReadMacInt16(p + acclDestRect + 0);
	uint32 color = htonl(ReadMacInt32(p + acclPenMode) == 8 ? ReadMacInt32(p + acclForePen) : ReadMacInt32(p + acclBackPen));
	D(bug(" dest X %d, dest Y %d\n", dest_X, dest_Y));
	D(bug(" width %d, height %d\n", width, height));
	D(bug(" bytes_per_row %d color %08x\n", (int32)ReadMacInt32(p + acclDestRowBytes), color));

	// And perform the fill
	const int bpp = bytes_per_pixel(ReadMacInt32(p + acclDestPixelSize));
	const int dest_row_bytes = (int32)ReadMacInt32(p + acclDestRowBytes);
	uint8 *dest = Mac2HostAddr(ReadMacInt32(p + acclDestBaseAddr) + (dest_Y * dest_row_bytes) + (dest_X * bpp));
	width *= bpp;
	switch (bpp) {
	case 1:
		for (int i = 0; i < height; i++) {
			memset(dest, color, width);
			dest += dest_row_bytes;
		}
		break;
	case 2:
		for (int i = 0; i < height; i++) {
			do_fillrect<16>(dest, color, width);
			dest += dest_row_bytes;
		}
		break;
	case 4:
		for (int i = 0; i < height; i++) {
			do_fillrect<32>(dest, color, width);
			dest += dest_row_bytes;
		}
		break;
	}
}
示例#15
0
文件: _sharpen.c 项目: 5um1th/thumbor
static PyObject*
_sharpen_apply(PyObject *self, PyObject *args)
{
    PyObject *buffer_py = NULL, *image_mode = NULL, *amount = NULL, *radius = NULL,
             *luminance_only = NULL, *width_py = NULL, *height_py = NULL;

    if (!PyArg_UnpackTuple(args, "apply", 7, 7, &image_mode, &width_py, &height_py, &amount, &radius, &luminance_only, &buffer_py)) {
        return NULL;
    }

    char *image_mode_str = PyString_AsString(image_mode);
    unsigned char *buffer = (unsigned char *) PyString_AsString(buffer_py);
    double amount_double = PyFloat_AsDouble(amount),
           radius_double = PyFloat_AsDouble(radius);

    char luminance_only_bool = (char) PyObject_IsTrue(luminance_only);

    int width = (int) PyInt_AsLong(width_py),
        height = (int) PyInt_AsLong(height_py);

    int num_bytes = bytes_per_pixel(image_mode_str);
    int r_idx = rgb_order(image_mode_str, 'R'),
        g_idx = rgb_order(image_mode_str, 'G'),
        b_idx = rgb_order(image_mode_str, 'B');

    sharpen_info info = {
      amount_double,
      radius_double,
      luminance_only_bool,
      width, height,
      buffer,
      {r_idx, g_idx, b_idx},
      num_bytes
    };

    run_sharpen(&info);

    Py_INCREF(buffer_py);
    return buffer_py;
}
示例#16
0
void NQD_invrect(uint32 p)
{
	D(bug("accl_invrect %08x\n", p));

	// Get inversion parameters
	int16 dest_X = (int16)ReadMacInt16(p + acclDestRect + 2) - (int16)ReadMacInt16(p + acclDestBoundsRect + 2);
	int16 dest_Y = (int16)ReadMacInt16(p + acclDestRect + 0) - (int16)ReadMacInt16(p + acclDestBoundsRect + 0);
	int16 width  = (int16)ReadMacInt16(p + acclDestRect + 6) - (int16)ReadMacInt16(p + acclDestRect + 2);
	int16 height = (int16)ReadMacInt16(p + acclDestRect + 4) - (int16)ReadMacInt16(p + acclDestRect + 0);
	D(bug(" dest X %d, dest Y %d\n", dest_X, dest_Y));
	D(bug(" width %d, height %d, bytes_per_row %d\n", width, height, (int32)ReadMacInt32(p + acclDestRowBytes)));

	//!!?? pen_mode == 14

	// And perform the inversion
	const int bpp = bytes_per_pixel(ReadMacInt32(p + acclDestPixelSize));
	const int dest_row_bytes = (int32)ReadMacInt32(p + acclDestRowBytes);
	uint8 *dest = Mac2HostAddr(ReadMacInt32(p + acclDestBaseAddr) + (dest_Y * dest_row_bytes) + (dest_X * bpp));
	width *= bpp;
	switch (bpp) {
	case 1:
		for (int i = 0; i < height; i++) {
			do_invrect<8>(dest, width);
			dest += dest_row_bytes;
		}
		break;
	case 2:
		for (int i = 0; i < height; i++) {
			do_invrect<16>(dest, width);
			dest += dest_row_bytes;
		}
		break;
	case 4:
		for (int i = 0; i < height; i++) {
			do_invrect<32>(dest, width);
			dest += dest_row_bytes;
		}
		break;
	}
}
示例#17
0
文件: display.c 项目: qioixiy/notes
int setup_display_params(Sourceparams_t * sourceparams,
			 Displaydata_t * displaydata)
{
  displaydata->window_width = sourceparams->image_width;
  displaydata->window_height = sourceparams->image_height;

  displaydata->texture_width =
    compute_texture_dimension(displaydata->window_width);
  
  displaydata->texture_height =
    compute_texture_dimension(displaydata->window_height);

  displaydata->bytes_per_pixel = bytes_per_pixel(sourceparams->encoding);
 
  displaydata->internal_format =
    texture_internal_format(sourceparams->encoding);

  displaydata->pixelformat = texture_pixel_format(sourceparams->encoding);
  /* assign texture coordinates  */
  displaydata->t0[0] = 0.0;
  displaydata->t0[1] = 0.0;

  displaydata->t1[0] = (float)displaydata->window_width/
    (float)displaydata->texture_width;
  displaydata->t1[1] = 0.0;

  displaydata->t2[0] = (float)displaydata->window_width /
    (float)displaydata->texture_width;
  displaydata->t2[1] = (float)displaydata->window_height /
    (float)displaydata->texture_height;

  displaydata->t3[0] = 0.0;
  displaydata->t3[1] = (float)displaydata->window_height /
    (float)displaydata->texture_height;
  
 
   
  
  return(0);
}
示例#18
0
    Result onStartScanlineDecode(const SkImageInfo& dstInfo, const Options& options,
            SkPMColor ctable[], int* ctableCount) override {
        if (!conversion_possible(dstInfo, this->getInfo())) {
            return kInvalidConversion;
        }

        const Result result = this->initializeSwizzler(dstInfo, options, ctable,
                                                       ctableCount);
        if (result != kSuccess) {
            return result;
        }

        fHeight = dstInfo.height();
        // FIXME: This need not be called on a second call to onStartScanlineDecode.
        fSrcRowBytes = this->getInfo().width() *
                (bytes_per_pixel(this->getEncodedInfo().bitsPerPixel()));
        fGarbageRow.reset(fSrcRowBytes);
        fGarbageRowPtr = static_cast<uint8_t*>(fGarbageRow.get());
        fCanSkipRewind = true;

        return SkCodec::kSuccess;
    }
示例#19
0
static PyObject*
_brightness_apply(PyObject *self, PyObject *args)
{
    PyObject *buffer = NULL, *delta = NULL, *image_mode = NULL;

    if (!PyArg_UnpackTuple(args, "apply", 3, 3, &image_mode, &delta, &buffer)) {
        return NULL;
    }

    char *image_mode_str = PyString_AsString(image_mode);
    Py_ssize_t size = PyString_Size(buffer);
    unsigned char *ptr = (unsigned char *) PyString_AsString(buffer);
    int delta_int = (int) PyInt_AsLong(delta);

    int num_bytes = bytes_per_pixel(image_mode_str);

    delta_int = (255 * delta_int) / 100;

    int i = 0, r, g, b;
    size -= num_bytes;
    for (; i <= size; i += num_bytes) {
        r = ptr[i];
        g = ptr[i + 1];
        b = ptr[i + 2];

        r += delta_int;
        g += delta_int;
        b += delta_int;

        ptr[i] = ADJUST_COLOR(r);
        ptr[i + 1] = ADJUST_COLOR(g);
        ptr[i + 2] = ADJUST_COLOR(b);
    }

    Py_INCREF(buffer);
    return buffer;
}
	Color TGAReader::pixel_to_color(const std::vector<std::uint8_t> &bytes, unsigned int pixel_offset) const
	{
		unsigned int byte_offset = pixel_offset * bytes_per_pixel();
		assert(byte_offset < bytes.size());
		const std::uint8_t *pixel = &bytes[byte_offset];

		if (header.image_type == ImageType::RGB)
		{
			if (header.image.pixel_size == 24)
				return Color(real_t(pixel[2]) / 255, real_t(pixel[1]) / 255, real_t(pixel[0]) / 255);

			if (header.image.pixel_size == 32)
				return Color(real_t(pixel[2] * pixel[3]) / (255 * 255),
				             real_t(pixel[1] * pixel[3]) / (255 * 255),
				             real_t(pixel[0] * pixel[3]) / (255 * 255));
		}
		else if (header.image_type == ImageType::MONO)
		{
			if (header.image.pixel_size == 8)
				return Color(real_t(pixel[0]) / 255, real_t(pixel[0]) / 255, real_t(pixel[0]) / 255);
		}

		throw Exception(error_prefix, "unsupported TGA image type");
	}
示例#21
0
static unsigned int fbmem_size(void)
{
    return height() * width() * bytes_per_pixel();
}
示例#22
0
文件: display.c 项目: qioixiy/notes
int setup_texture(Displaydata_t * displaydata, Sourceparams_t * sourceparams)
{
  int status;
  int texture_size, luma_size, chroma_size, chroma_width, chroma_height;
  GLint internal_format;
  GLenum pixelformat;
  
  internal_format = (GLint)displaydata->internal_format;
  
  pixelformat = (GLenum)displaydata->pixelformat;
      

  displaydata->bytes_per_pixel = bytes_per_pixel(sourceparams->encoding);

  texture_size = displaydata->texture_width * displaydata->texture_height *
	displaydata->bytes_per_pixel;
  printf("displaydata->texture_width=%d,displaydata->texture_height=%d,displaydata->bytes_per_pixel=%d\n",
         displaydata->texture_width, displaydata->texture_height, displaydata->bytes_per_pixel);

  /* if we have a planar encoding, add extra memory for the other  */
  /* planes. if we do all planes in one malloc the memory will be  */
  /* contiguous and we can copy in the data source with one copy   */
  /* instead of copying each plane separately  */
  
  if (YUV420 == sourceparams->encoding)
    {
      luma_size = texture_size;
      chroma_width = displaydata->texture_width / 2;
      chroma_height  = displaydata->texture_height / 2;
      chroma_size = texture_size / 4;
      texture_size = luma_size + 2 * chroma_size;
    }
  else
    {
      /* all in one texture: no extras required  */
      chroma_width = 0;
      chroma_height = 0;
      chroma_size = 0;
      luma_size = 0;
    }
 
  displaydata->texture = malloc(texture_size);

 
  if (NULL == displaydata->texture)
    {
      status = -1; /* error  */
      perror("Error: can't allocate texture memory");
    }
  else
    {
      if (YUV420 == sourceparams->encoding)
	{
	  /* need three textures: do U, V here; get Y from the  */
	  /* one we set up outside this if statement.   */
	  displaydata->u_texture = (char *)displaydata->texture + luma_size;
	  displaydata->v_texture = (char *)displaydata->u_texture +
	    chroma_size;


	  glGenTextures(1, &(displaydata->u_texturename));
	  check_error("after glGenTextures");
	  glGenTextures(1, &(displaydata->v_texturename));
	  check_error("after glGenTextures");
	  
	    
	  displaydata->v_texture_unit = 2; /* GL_TEXTURE2 */
	  displaydata->u_texture_unit = 1; /* GL_TEXTURE1; */

	  setup_texture_unit(GL_TEXTURE2, chroma_width,
			     chroma_height, displaydata->v_texturename,
			     displaydata->v_texture, internal_format,
			     pixelformat);
	  
	  setup_texture_unit(GL_TEXTURE1, chroma_width,
			     chroma_height, displaydata->u_texturename,
			     displaydata->u_texture, internal_format,
			     pixelformat);
	  
			     
			     
	}
      else
	{
	  displaydata->u_texture = NULL;
	  displaydata->v_texture = NULL;
	  displaydata->u_texturename = 0;
	  displaydata->v_texturename = 0;
	  displaydata->v_texture_unit = 0;
	  displaydata->u_texture_unit = 0;
	  
	}

      /* set up either the last texture for YUV420 or the only texture  */
      /* for the other formats.   */
      
      /* do this one last so we leave it as default  */
      displaydata->primary_texture_unit = 0; /* GL_TEXTURE0  */

      glGenTextures(1, &(displaydata->texturename));
      check_error("after glGenTextures");

      setup_texture_unit(GL_TEXTURE0,
			 displaydata->texture_width,
			 displaydata->texture_height,
			 displaydata->texturename, displaydata->texture,
			 internal_format, pixelformat);

      /* turn it black. this works because all the formats i'm  */
      /* dealing with right now are RGB or YUV. in the RGB case  */
      /* setting each component to zero -> black. in the YUV case  */
      /* setting the intensities (Y component) to zero -> black  */
      
      memset(displaydata->texture, 0, texture_size);
      
      status = 0; /* okay  */
    }
  return(status);
}
示例#23
0
GLint GLAPIENTRY
gluBuild2DMipmaps(GLenum target, GLint components,
		  GLsizei width, GLsizei height, GLenum format,
		  GLenum type, const void *data)
{
   GLint w, h, maxsize;
   void *image, *newimage;
   GLint neww, newh, level, bpp;
   int error;
   GLboolean done;
   GLint retval = 0;
   GLint unpackrowlength, unpackalignment, unpackskiprows, unpackskippixels;
   GLint packrowlength, packalignment, packskiprows, packskippixels;

   if (width < 1 || height < 1)
      return GLU_INVALID_VALUE;

   glGetIntegerv(GL_MAX_TEXTURE_SIZE, &maxsize);

   w = round2(width);
   if (w > maxsize) {
      w = maxsize;
   }
   h = round2(height);
   if (h > maxsize) {
      h = maxsize;
   }

   bpp = bytes_per_pixel(format, type);
   if (bpp == 0) {
      /* probably a bad format or type enum */
      return GLU_INVALID_ENUM;
   }

   /* Get current glPixelStore values */
   glGetIntegerv(GL_UNPACK_ROW_LENGTH, &unpackrowlength);
   glGetIntegerv(GL_UNPACK_ALIGNMENT, &unpackalignment);
   glGetIntegerv(GL_UNPACK_SKIP_ROWS, &unpackskiprows);
   glGetIntegerv(GL_UNPACK_SKIP_PIXELS, &unpackskippixels);
   glGetIntegerv(GL_PACK_ROW_LENGTH, &packrowlength);
   glGetIntegerv(GL_PACK_ALIGNMENT, &packalignment);
   glGetIntegerv(GL_PACK_SKIP_ROWS, &packskiprows);
   glGetIntegerv(GL_PACK_SKIP_PIXELS, &packskippixels);

   /* set pixel packing */
   glPixelStorei(GL_PACK_ROW_LENGTH, 0);
   glPixelStorei(GL_PACK_ALIGNMENT, 1);
   glPixelStorei(GL_PACK_SKIP_ROWS, 0);
   glPixelStorei(GL_PACK_SKIP_PIXELS, 0);

   done = GL_FALSE;

   if (w != width || h != height) {
      /* must rescale image to get "top" mipmap texture image */
      image = malloc((w + 4) * h * bpp);
      if (!image) {
	 return GLU_OUT_OF_MEMORY;
      }
      error = gluScaleImage(format, width, height, type, data,
			    w, h, type, image);
      if (error) {
	 retval = error;
	 done = GL_TRUE;
      }
   }
   else {
      image = (void *) data;
   }

   level = 0;
   while (!done) {
      if (image != data) {
	 /* set pixel unpacking */
	 glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
	 glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
	 glPixelStorei(GL_UNPACK_SKIP_ROWS, 0);
	 glPixelStorei(GL_UNPACK_SKIP_PIXELS, 0);
      }

      glTexImage2D(target, level, components, w, h, 0, format, type, image);

      if (w == 1 && h == 1)
	 break;

      neww = (w < 2) ? 1 : w / 2;
      newh = (h < 2) ? 1 : h / 2;
      newimage = malloc((neww + 4) * newh * bpp);
      if (!newimage) {
	 return GLU_OUT_OF_MEMORY;
      }

      error = gluScaleImage(format, w, h, type, image,
			    neww, newh, type, newimage);
      if (error) {
	 retval = error;
	 done = GL_TRUE;
      }

      if (image != data) {
	 free(image);
      }
      image = newimage;

      w = neww;
      h = newh;
      level++;
   }

   if (image != data) {
      free(image);
   }

   /* Restore original glPixelStore state */
   glPixelStorei(GL_UNPACK_ROW_LENGTH, unpackrowlength);
   glPixelStorei(GL_UNPACK_ALIGNMENT, unpackalignment);
   glPixelStorei(GL_UNPACK_SKIP_ROWS, unpackskiprows);
   glPixelStorei(GL_UNPACK_SKIP_PIXELS, unpackskippixels);
   glPixelStorei(GL_PACK_ROW_LENGTH, packrowlength);
   glPixelStorei(GL_PACK_ALIGNMENT, packalignment);
   glPixelStorei(GL_PACK_SKIP_ROWS, packskiprows);
   glPixelStorei(GL_PACK_SKIP_PIXELS, packskippixels);

   return retval;
}
示例#24
0
/* process */
void process(IplImage *source, IplImage *destination) {
  uchar *pS, *pD, *pP0, *pP1;
  uchar *dataS, *dataD, *dataP0, *dataP1;
  int bpp;
  int step;
  CvSize size;
  int x, y;
  double intensityS;
  int intensityS_d;
  double intensityP0;
  int intensityP0_d;
  double intensityP1;
  int intensityP1_d;
  int intensity_diff0;
  int intensity_diff1;

  /* for outer region */
  cvCopy( source, destination, NULL );

  /* bpp 3 */
  bpp = bytes_per_pixel(source);

  /* step 1920, size 640,480 */
  cvGetRawData(source, &dataS, &step, &size);    
  cvGetRawData(destination, &dataD, NULL, NULL);
  cvGetRawData(previous0, &dataP0, NULL, NULL);
  cvGetRawData(previous1, &dataP1, NULL, NULL);

  /* inner region */
  for(y=size.height/8; y<(size.height*7/8); y++) {
    pS = dataS+step*size.height/8+bpp*size.width/8;
    pD = dataD+step*size.height/8+bpp*size.width/8;
    pP0 = dataP0+step*size.height/8+bpp*size.width/8;
    pP1 = dataP1+step*size.height/8+bpp*size.width/8;
    
    for(x=size.width/8; x<(size.width*7/8); x++) {
      intensityS = (0.114 * pS[0] + 0.587 * pS[1] + 0.299 * pS[2]) / 255.0;
      intensityS_d = (int)(219.0 * intensityS) + 16;
      intensityP0 = (0.114 * pP0[0] + 0.587 * pP0[1] + 0.299 * pP0[2]) / 255.0;
      intensityP0_d = (int)(219.0 * intensityP0) + 16;
      intensityP1 = (0.114 * pP1[0] + 0.587 * pP1[1] + 0.299 * pP1[2]) / 255.0;
      intensityP1_d = (int)(219.0 * intensityP1) + 16;

      intensity_diff0 = intensityS_d - intensityP0_d;
      intensity_diff0 = ((intensity_diff0<0) ? -intensity_diff0 : intensity_diff0);
      intensity_diff1 = intensityP0_d - intensityP1_d;
      intensity_diff1 = ((intensity_diff1<0) ? -intensity_diff1 : intensity_diff1);

      if( (intensity_diff0 > DIFFERENCE_THRESHOLD) 
	  && (intensity_diff1 > DIFFERENCE_THRESHOLD) ) {
	*pD = pP0[0];
	*(pD+1) = pP0[1];
	*(pD+2) = pP0[2];
      }
      else {
	*pD = 0;
	*(pD+1) = 0;
	*(pD+2) = 0;
      }
	
      pS += bpp; // next pixel of the source
      pD += bpp; // next pixel of the destination
      pP0 += bpp; // next pixel of the source
      pP1 += bpp; // next pixel of the source
    }
    dataS += step; // next line of the source
    dataD += step; // next line of the destination
    dataP0 += step; // next line of the source
    dataP1 += step; // next line of the source
  }    

  /* for next */
  cvCopy( previous0, previous1, NULL );
  cvCopy( source, previous0, NULL );
}
示例#25
0
/*!
 * PrpENC enable channel setup function
 *
 * @param cam       struct cam_data * mxc capture instance
 *
 * @return  status
 */
static int prp_enc_setup(cam_data *cam)
{
	ipu_channel_params_t enc;
	int err = 0;
	dma_addr_t dummy = 0xdeadbeaf;

	CAMERA_TRACE("In prp_enc_setup\n");
	if (!cam) {
		printk(KERN_ERR "cam private is NULL\n");
		return -ENXIO;
	}
	memset(&enc, 0, sizeof(ipu_channel_params_t));

	ipu_csi_get_window_size(&enc.csi_prp_enc_mem.in_width,
				&enc.csi_prp_enc_mem.in_height, cam->csi);

	enc.csi_prp_enc_mem.in_pixel_fmt = IPU_PIX_FMT_UYVY;
	enc.csi_prp_enc_mem.out_width = cam->v2f.fmt.pix.width;
	enc.csi_prp_enc_mem.out_height = cam->v2f.fmt.pix.height;
	enc.csi_prp_enc_mem.csi = cam->csi;
	if (cam->rotation >= IPU_ROTATE_90_RIGHT) {
		enc.csi_prp_enc_mem.out_width = cam->v2f.fmt.pix.height;
		enc.csi_prp_enc_mem.out_height = cam->v2f.fmt.pix.width;
	}

	if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YUV420) {
		enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_YUV420P;
		pr_info("YUV420\n");
	} else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YUV422P) {
		enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_YUV422P;
		pr_info("YUV422P\n");
	} else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV) {
		enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_YUYV;
		pr_info("YUYV\n");
	} else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_UYVY) {
		enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_UYVY;
		pr_info("UYVY\n");
	} else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_NV12) {
		enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_NV12;
		pr_info("NV12\n");
	} else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_BGR24) {
		enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_BGR24;
		pr_info("BGR24\n");
	} else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB24) {
		enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_RGB24;
		pr_info("RGB24\n");
	} else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB565) {
		enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_RGB565;
		pr_info("RGB565\n");
	} else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_BGR32) {
		enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_BGR32;
		pr_info("BGR32\n");
	} else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB32) {
		enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_RGB32;
		pr_info("RGB32\n");
	} else {
		printk(KERN_ERR "format not supported\n");
		return -EINVAL;
	}

	err = ipu_init_channel(CSI_PRP_ENC_MEM, &enc);
	if (err != 0) {
		printk(KERN_ERR "ipu_init_channel %d\n", err);
		return err;
	}

	ipu_csi_enable_mclk_if(CSI_MCLK_ENC, cam->csi, true, true);

	grotation = cam->rotation;
	if (cam->rotation >= IPU_ROTATE_90_RIGHT) {
		if (cam->rot_enc_bufs_vaddr[0]) {
			dma_free_coherent(0, cam->rot_enc_buf_size[0],
					  cam->rot_enc_bufs_vaddr[0],
					  cam->rot_enc_bufs[0]);
		}
		if (cam->rot_enc_bufs_vaddr[1]) {
			dma_free_coherent(0, cam->rot_enc_buf_size[1],
					  cam->rot_enc_bufs_vaddr[1],
					  cam->rot_enc_bufs[1]);
		}
		cam->rot_enc_buf_size[0] =
		    PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage);
		cam->rot_enc_bufs_vaddr[0] =
		    (void *)dma_alloc_coherent(0, cam->rot_enc_buf_size[0],
					       &cam->rot_enc_bufs[0],
					       GFP_DMA | GFP_KERNEL);
		if (!cam->rot_enc_bufs_vaddr[0]) {
			printk(KERN_ERR "alloc enc_bufs0\n");
			return -ENOMEM;
		}
		cam->rot_enc_buf_size[1] =
		    PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage);
		cam->rot_enc_bufs_vaddr[1] =
		    (void *)dma_alloc_coherent(0, cam->rot_enc_buf_size[1],
					       &cam->rot_enc_bufs[1],
					       GFP_DMA | GFP_KERNEL);
		if (!cam->rot_enc_bufs_vaddr[1]) {
			dma_free_coherent(0, cam->rot_enc_buf_size[0],
					  cam->rot_enc_bufs_vaddr[0],
					  cam->rot_enc_bufs[0]);
			cam->rot_enc_bufs_vaddr[0] = NULL;
			cam->rot_enc_bufs[0] = 0;
			printk(KERN_ERR "alloc enc_bufs1\n");
			return -ENOMEM;
		}

		err = ipu_init_channel_buffer(CSI_PRP_ENC_MEM,
					      IPU_OUTPUT_BUFFER,
					      enc.csi_prp_enc_mem.out_pixel_fmt,
					      enc.csi_prp_enc_mem.out_width,
					      enc.csi_prp_enc_mem.out_height,
					      enc.csi_prp_enc_mem.out_width,
					      IPU_ROTATE_NONE,
					      cam->rot_enc_bufs[0],
					      cam->rot_enc_bufs[1], 0, 0, 0);
		if (err != 0) {
			printk(KERN_ERR "CSI_PRP_ENC_MEM err\n");
			return err;
		}

		err = ipu_init_channel(MEM_ROT_ENC_MEM, NULL);
		if (err != 0) {
			printk(KERN_ERR "MEM_ROT_ENC_MEM channel err\n");
			return err;
		}

		err = ipu_init_channel_buffer(MEM_ROT_ENC_MEM, IPU_INPUT_BUFFER,
					      enc.csi_prp_enc_mem.out_pixel_fmt,
					      enc.csi_prp_enc_mem.out_width,
					      enc.csi_prp_enc_mem.out_height,
					      enc.csi_prp_enc_mem.out_width,
					      cam->rotation,
					      cam->rot_enc_bufs[0],
					      cam->rot_enc_bufs[1], 0, 0, 0);
		if (err != 0) {
			printk(KERN_ERR "MEM_ROT_ENC_MEM input buffer\n");
			return err;
		}

		err =
		    ipu_init_channel_buffer(MEM_ROT_ENC_MEM, IPU_OUTPUT_BUFFER,
					    enc.csi_prp_enc_mem.out_pixel_fmt,
					    enc.csi_prp_enc_mem.out_height,
					    enc.csi_prp_enc_mem.out_width,
					    cam->v2f.fmt.pix.bytesperline /
					    bytes_per_pixel(enc.csi_prp_enc_mem.
							    out_pixel_fmt),
					    IPU_ROTATE_NONE,
					    dummy, dummy, 0,
					    cam->offset.u_offset,
					    cam->offset.v_offset);
		if (err != 0) {
			printk(KERN_ERR "MEM_ROT_ENC_MEM output buffer\n");
			return err;
		}

		err = ipu_link_channels(CSI_PRP_ENC_MEM, MEM_ROT_ENC_MEM);
		if (err < 0) {
			printk(KERN_ERR
			       "link CSI_PRP_ENC_MEM-MEM_ROT_ENC_MEM\n");
			return err;
		}

		err = ipu_enable_channel(CSI_PRP_ENC_MEM);
		if (err < 0) {
			printk(KERN_ERR "ipu_enable_channel CSI_PRP_ENC_MEM\n");
			return err;
		}
		err = ipu_enable_channel(MEM_ROT_ENC_MEM);
		if (err < 0) {
			printk(KERN_ERR "ipu_enable_channel MEM_ROT_ENC_MEM\n");
			return err;
		}

		ipu_select_buffer(CSI_PRP_ENC_MEM, IPU_OUTPUT_BUFFER, 0);
		ipu_select_buffer(CSI_PRP_ENC_MEM, IPU_OUTPUT_BUFFER, 1);
	} else {
		err =
		    ipu_init_channel_buffer(CSI_PRP_ENC_MEM, IPU_OUTPUT_BUFFER,
					    enc.csi_prp_enc_mem.out_pixel_fmt,
					    enc.csi_prp_enc_mem.out_width,
					    enc.csi_prp_enc_mem.out_height,
					    cam->v2f.fmt.pix.bytesperline /
					    bytes_per_pixel(enc.csi_prp_enc_mem.
							    out_pixel_fmt),
					    cam->rotation,
					    dummy, dummy, 0,
					    cam->offset.u_offset,
					    cam->offset.v_offset);
		if (err != 0) {
			printk(KERN_ERR "CSI_PRP_ENC_MEM output buffer\n");
			return err;
		}
		err = ipu_enable_channel(CSI_PRP_ENC_MEM);
		if (err < 0) {
			printk(KERN_ERR "ipu_enable_channel CSI_PRP_ENC_MEM\n");
			return err;
		}
	}

	return err;
}
示例#26
0
SkCodec::Result SkPngCodec::onGetPixels(const SkImageInfo& requestedInfo, void* dst,
                                        size_t dstRowBytes, const Options& options,
                                        SkPMColor ctable[], int* ctableCount,
                                        int* rowsDecoded) {
    if (!conversion_possible(requestedInfo, this->getInfo())) {
        return kInvalidConversion;
    }
    if (options.fSubset) {
        // Subsets are not supported.
        return kUnimplemented;
    }

    // Note that ctable and ctableCount may be modified if there is a color table
    const Result result = this->initializeSwizzler(requestedInfo, options, ctable, ctableCount);
    if (result != kSuccess) {
        return result;
    }

    const int width = requestedInfo.width();
    const int height = requestedInfo.height();
    const int bpp = bytes_per_pixel(this->getEncodedInfo().bitsPerPixel());
    const size_t srcRowBytes = width * bpp;

    // FIXME: Could we use the return value of setjmp to specify the type of
    // error?
    int row = 0;
    // This must be declared above the call to setjmp to avoid memory leaks on incomplete images.
    SkAutoTMalloc<uint8_t> storage;
    if (setjmp(png_jmpbuf(fPng_ptr))) {
        // Assume that any error that occurs while reading rows is caused by an incomplete input.
        if (fNumberPasses > 1) {
            // FIXME (msarett): Handle incomplete interlaced pngs.
            return (row == height) ? kSuccess : kInvalidInput;
        }
        // FIXME: We do a poor job on incomplete pngs compared to other decoders (ex: Chromium,
        // Ubuntu Image Viewer).  This is because we use the default buffer size in libpng (8192
        // bytes), and if we can't fill the buffer, we immediately fail.
        // For example, if we try to read 8192 bytes, and the image (incorrectly) only contains
        // half that, which may have been enough to contain a non-zero number of lines, we fail
        // when we could have decoded a few more lines and then failed.
        // The read function that we provide for libpng has no way of indicating that we have
        // made a partial read.
        // Making our buffer size smaller improves our incomplete decodes, but what impact does
        // it have on regular decode performance?  Should we investigate using a different API
        // instead of png_read_row?  Chromium uses png_process_data.
        *rowsDecoded = row;
        return (row == height) ? kSuccess : kIncompleteInput;
    }

    // FIXME: We could split these out based on subclass.
    void* dstRow = dst;
    if (fNumberPasses > 1) {
        storage.reset(height * srcRowBytes);
        uint8_t* const base = storage.get();

        for (int i = 0; i < fNumberPasses; i++) {
            uint8_t* srcRow = base;
            for (int y = 0; y < height; y++) {
                png_read_row(fPng_ptr, srcRow, nullptr);
                srcRow += srcRowBytes;
            }
        }

        // Now swizzle it.
        uint8_t* srcRow = base;
        for (; row < height; row++) {
            fSwizzler->swizzle(dstRow, srcRow);
            dstRow = SkTAddOffset<void>(dstRow, dstRowBytes);
            srcRow += srcRowBytes;
        }
    } else {
        storage.reset(srcRowBytes);
        uint8_t* srcRow = storage.get();
        for (; row < height; row++) {
            png_read_row(fPng_ptr, srcRow, nullptr);
            fSwizzler->swizzle(dstRow, srcRow);
            dstRow = SkTAddOffset<void>(dstRow, dstRowBytes);
        }
    }

    // read rest of file, and get additional comment and time chunks in info_ptr
    png_read_end(fPng_ptr, fInfo_ptr);

    return kSuccess;
}
/*!
 * PrpENC enable channel setup function
 *
 * @param cam       struct cam_data * mxc capture instance
 *
 * @return  status
 */
static int prp_enc_setup(cam_data *cam)
{
	ipu_channel_params_t enc;
	int err = 0;
	dma_addr_t dummy = cam->dummy_frame.buffer.m.offset;
#ifdef CONFIG_MXC_MIPI_CSI2
	void *mipi_csi2_info;
	int ipu_id;
	int csi_id;
#endif

	CAMERA_TRACE("In prp_enc_setup\n");
	if (!cam) {
		printk(KERN_ERR "cam private is NULL\n");
		return -ENXIO;
	}
	memset(&enc, 0, sizeof(ipu_channel_params_t));

	ipu_csi_get_window_size(cam->ipu, &enc.csi_prp_enc_mem.in_width,
				&enc.csi_prp_enc_mem.in_height, cam->csi);

	enc.csi_prp_enc_mem.in_pixel_fmt = IPU_PIX_FMT_UYVY;
	enc.csi_prp_enc_mem.out_width = cam->v2f.fmt.pix.width;
	enc.csi_prp_enc_mem.out_height = cam->v2f.fmt.pix.height;
	enc.csi_prp_enc_mem.csi = cam->csi;
	if (cam->rotation >= IPU_ROTATE_90_RIGHT) {
		enc.csi_prp_enc_mem.out_width = cam->v2f.fmt.pix.height;
		enc.csi_prp_enc_mem.out_height = cam->v2f.fmt.pix.width;
	}

	if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YUV420) {
		enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_YUV420P;
		pr_info("YUV420\n");
	} else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YVU420) {
		enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_YVU420P;
		pr_info("YVU420\n");
	} else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YUV422P) {
		enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_YUV422P;
		pr_info("YUV422P\n");
	} else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV) {
		enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_YUYV;
		pr_info("YUYV\n");
	} else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_UYVY) {
		enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_UYVY;
		pr_info("UYVY\n");
	} else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_NV12) {
		enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_NV12;
		pr_info("NV12\n");
	} else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_BGR24) {
		enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_BGR24;
		pr_info("BGR24\n");
	} else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB24) {
		enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_RGB24;
		pr_info("RGB24\n");
	} else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB565) {
		enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_RGB565;
		pr_info("RGB565\n");
	} else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_BGR32) {
		enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_BGR32;
		pr_info("BGR32\n");
	} else if (cam->v2f.fmt.pix.pixelformat == V4L2_PIX_FMT_RGB32) {
		enc.csi_prp_enc_mem.out_pixel_fmt = IPU_PIX_FMT_RGB32;
		pr_info("RGB32\n");
	} else {
		printk(KERN_ERR "format not supported\n");
		return -EINVAL;
	}

#ifdef CONFIG_MXC_MIPI_CSI2
	mipi_csi2_info = mipi_csi2_get_info();

	if (mipi_csi2_info) {
		if (mipi_csi2_get_status(mipi_csi2_info)) {
			ipu_id = mipi_csi2_get_bind_ipu(mipi_csi2_info);
			csi_id = mipi_csi2_get_bind_csi(mipi_csi2_info);

			if (cam->ipu == ipu_get_soc(ipu_id)
				&& cam->csi == csi_id) {
				enc.csi_prp_enc_mem.mipi_en = true;
				enc.csi_prp_enc_mem.mipi_vc =
				mipi_csi2_get_virtual_channel(mipi_csi2_info);
				enc.csi_prp_enc_mem.mipi_id =
				mipi_csi2_get_datatype(mipi_csi2_info);

				mipi_csi2_pixelclk_enable(mipi_csi2_info);
			} else {
				enc.csi_prp_enc_mem.mipi_en = false;
				enc.csi_prp_enc_mem.mipi_vc = 0;
				enc.csi_prp_enc_mem.mipi_id = 0;
			}
		} else {
			enc.csi_prp_enc_mem.mipi_en = false;
			enc.csi_prp_enc_mem.mipi_vc = 0;
			enc.csi_prp_enc_mem.mipi_id = 0;
		}
	}
#endif

	err = ipu_init_channel(cam->ipu, CSI_PRP_ENC_MEM, &enc);
	if (err != 0) {
		printk(KERN_ERR "ipu_init_channel %d\n", err);
		return err;
	}

	grotation = cam->rotation;
	if (cam->rotation >= IPU_ROTATE_90_RIGHT) {
		if (cam->rot_enc_bufs_vaddr[0]) {
			dma_free_coherent(0, cam->rot_enc_buf_size[0],
					  cam->rot_enc_bufs_vaddr[0],
					  cam->rot_enc_bufs[0]);
		}
		if (cam->rot_enc_bufs_vaddr[1]) {
			dma_free_coherent(0, cam->rot_enc_buf_size[1],
					  cam->rot_enc_bufs_vaddr[1],
					  cam->rot_enc_bufs[1]);
		}
		cam->rot_enc_buf_size[0] =
		    PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage);
		cam->rot_enc_bufs_vaddr[0] =
		    (void *)dma_alloc_coherent(0, cam->rot_enc_buf_size[0],
					       &cam->rot_enc_bufs[0],
					       GFP_DMA | GFP_KERNEL);
		if (!cam->rot_enc_bufs_vaddr[0]) {
			printk(KERN_ERR "alloc enc_bufs0\n");
			return -ENOMEM;
		}
		cam->rot_enc_buf_size[1] =
		    PAGE_ALIGN(cam->v2f.fmt.pix.sizeimage);
		cam->rot_enc_bufs_vaddr[1] =
		    (void *)dma_alloc_coherent(0, cam->rot_enc_buf_size[1],
					       &cam->rot_enc_bufs[1],
					       GFP_DMA | GFP_KERNEL);
		if (!cam->rot_enc_bufs_vaddr[1]) {
			dma_free_coherent(0, cam->rot_enc_buf_size[0],
					  cam->rot_enc_bufs_vaddr[0],
					  cam->rot_enc_bufs[0]);
			cam->rot_enc_bufs_vaddr[0] = NULL;
			cam->rot_enc_bufs[0] = 0;
			printk(KERN_ERR "alloc enc_bufs1\n");
			return -ENOMEM;
		}

		err = ipu_init_channel_buffer(cam->ipu, CSI_PRP_ENC_MEM,
					      IPU_OUTPUT_BUFFER,
					      enc.csi_prp_enc_mem.out_pixel_fmt,
					      enc.csi_prp_enc_mem.out_width,
					      enc.csi_prp_enc_mem.out_height,
					      enc.csi_prp_enc_mem.out_width,
					      IPU_ROTATE_NONE,
					      cam->rot_enc_bufs[0],
					      cam->rot_enc_bufs[1], 0, 0, 0);
		if (err != 0) {
			printk(KERN_ERR "CSI_PRP_ENC_MEM err\n");
			return err;
		}

		err = ipu_init_channel(cam->ipu, MEM_ROT_ENC_MEM, NULL);
		if (err != 0) {
			printk(KERN_ERR "MEM_ROT_ENC_MEM channel err\n");
			return err;
		}

		err = ipu_init_channel_buffer(cam->ipu, MEM_ROT_ENC_MEM,
					      IPU_INPUT_BUFFER,
					      enc.csi_prp_enc_mem.out_pixel_fmt,
					      enc.csi_prp_enc_mem.out_width,
					      enc.csi_prp_enc_mem.out_height,
					      enc.csi_prp_enc_mem.out_width,
					      cam->rotation,
					      cam->rot_enc_bufs[0],
					      cam->rot_enc_bufs[1], 0, 0, 0);
		if (err != 0) {
			printk(KERN_ERR "MEM_ROT_ENC_MEM input buffer\n");
			return err;
		}

		err =
		    ipu_init_channel_buffer(cam->ipu, MEM_ROT_ENC_MEM,
					    IPU_OUTPUT_BUFFER,
					    enc.csi_prp_enc_mem.out_pixel_fmt,
					    enc.csi_prp_enc_mem.out_height,
					    enc.csi_prp_enc_mem.out_width,
					    cam->v2f.fmt.pix.bytesperline /
					    bytes_per_pixel(enc.csi_prp_enc_mem.
							    out_pixel_fmt),
					    IPU_ROTATE_NONE,
					    dummy, dummy, 0,
					    cam->offset.u_offset,
					    cam->offset.v_offset);
		if (err != 0) {
			printk(KERN_ERR "MEM_ROT_ENC_MEM output buffer\n");
			return err;
		}

		err = ipu_link_channels(cam->ipu,
					CSI_PRP_ENC_MEM, MEM_ROT_ENC_MEM);
		if (err < 0) {
			printk(KERN_ERR
			       "link CSI_PRP_ENC_MEM-MEM_ROT_ENC_MEM\n");
			return err;
		}

		err = ipu_enable_channel(cam->ipu, CSI_PRP_ENC_MEM);
		if (err < 0) {
			printk(KERN_ERR "ipu_enable_channel CSI_PRP_ENC_MEM\n");
			return err;
		}
		err = ipu_enable_channel(cam->ipu, MEM_ROT_ENC_MEM);
		if (err < 0) {
			printk(KERN_ERR "ipu_enable_channel MEM_ROT_ENC_MEM\n");
			return err;
		}

		ipu_select_buffer(cam->ipu, CSI_PRP_ENC_MEM,
				  IPU_OUTPUT_BUFFER, 0);
		ipu_select_buffer(cam->ipu, CSI_PRP_ENC_MEM,
				  IPU_OUTPUT_BUFFER, 1);
	} else {
		err =
		    ipu_init_channel_buffer(cam->ipu, CSI_PRP_ENC_MEM,
					    IPU_OUTPUT_BUFFER,
					    enc.csi_prp_enc_mem.out_pixel_fmt,
					    enc.csi_prp_enc_mem.out_width,
					    enc.csi_prp_enc_mem.out_height,
					    cam->v2f.fmt.pix.bytesperline /
					    bytes_per_pixel(enc.csi_prp_enc_mem.
							    out_pixel_fmt),
					    cam->rotation,
					    dummy, dummy, 0,
					    cam->offset.u_offset,
					    cam->offset.v_offset);
		if (err != 0) {
			printk(KERN_ERR "CSI_PRP_ENC_MEM output buffer\n");
			return err;
		}
		err = ipu_enable_channel(cam->ipu, CSI_PRP_ENC_MEM);
		if (err < 0) {
			printk(KERN_ERR "ipu_enable_channel CSI_PRP_ENC_MEM\n");
			return err;
		}
	}

	return err;
}
示例#28
0
enum piglit_result
run_test(GLenum test_format, GLenum test_type, float *time_out)
{
	bool pass = true;
	int64_t time;
	GLuint tex;
	int i, Bpp, channels;
	float *tmp, *expected, *observed;
	void *data;

	glGenTextures(1, &tex);
	glBindTexture(GL_TEXTURE_2D, tex);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);

	glPixelStorei(GL_UNPACK_ALIGNMENT, 1);

	channels = num_channels(test_format);
	Bpp = bytes_per_pixel(test_format, test_type);

	if (test_type == GL_FLOAT) {
		/* Sanatize so we don't get invalid floating point values */
		tmp = malloc(texture_size * texture_size * channels * sizeof(float));
		for (i = 0; i < texture_size * texture_size * channels; ++i)
			tmp[i] = sn_to_float(32, ((GLint *)rand_data)[i]);
		data = tmp;
	} else {
		tmp = NULL;
		data = rand_data;
	}

	expected = malloc(texture_size * texture_size * 4 * sizeof(float));
	for (i = 0; i < texture_size * texture_size; ++i)
		to_expected(test_format, test_type, (GLubyte *)data + (i * Bpp),
			    expected + 4 * i);

	if (benchmark) {
		time = piglit_get_microseconds();
		for (i = 0; i < BENCHMARK_ITERATIONS; ++i)
			glTexImage2D(GL_TEXTURE_2D, 0, format->internal_format,
				     texture_size, texture_size, 0,
				     test_format, test_type, data);
		time = piglit_get_microseconds() - time;
		*time_out = (double)time / (double)BENCHMARK_ITERATIONS;
	} else {
		glTexImage2D(GL_TEXTURE_2D, 0, format->internal_format,
			     texture_size, texture_size, 0,
			     test_format, test_type, data);
	}
	pass &= piglit_check_gl_error(GL_NO_ERROR);

	if (is_format_signed(format->internal_format)) {
		glUseProgram(signed_prog);

		for (i = 0; i < texture_size * texture_size * 4; ++i)
			expected[i] = 0.5 + 0.5 * expected[i];
	} else {
		glUseProgram(unsigned_prog);
	}

	piglit_draw_rect_tex(0, 0, texture_size, texture_size, 0, 0, 1, 1);

	observed = malloc(texture_size * texture_size * 4 * sizeof(float));
	glReadPixels(0, 0, texture_size, texture_size,
		     GL_RGBA, GL_FLOAT, observed);
	pass &= piglit_check_gl_error(GL_NO_ERROR);

	pass &= piglit_compare_images_color(0, 0, texture_size, texture_size, 4,
					    tolerance, expected, observed);

	free(observed);
	free(expected);
	free(tmp);

	piglit_report_subtest_result(pass ? PIGLIT_PASS : PIGLIT_FAIL,
				     "%s texture with %s and %s",
				     piglit_get_gl_enum_name(format->internal_format),
				     piglit_get_gl_enum_name(test_format),
				     piglit_get_gl_enum_name(test_type));

	glDeleteTextures(1, &tex);

	return pass;
}
示例#29
0
GLint gluBuild2DMipmaps( GLenum target, GLint components,
                         GLint width, GLint height, GLenum format,
                         GLenum type, const void *data )
{
   GLint w, h, maxsize;
   void *image, *newimage;
   GLint neww, newh, level, bpp;
   int error;

   glGetIntegerv( GL_MAX_TEXTURE_SIZE, &maxsize );

   w = round2( width );
   if (w>maxsize) {
      w = maxsize;
   }
   h = round2( height );
   if (h>maxsize) {
      h = maxsize;
   }

   bpp = bytes_per_pixel( format, type );
   if (bpp==0) {
      /* probably a bad format or type enum */
      return GLU_INVALID_ENUM;
   }

   if (w!=width || h!=height) {
      /* must rescale image to get "top" mipmap texture image */
      image = malloc( (w+4) * h * bpp );
      if (!image) {
	 return GLU_OUT_OF_MEMORY;
      }
      error = gluScaleImage( format, width, height, type, data,
			     w, h, type, image );
      if (error) {
	 return error;
      }
   }
   else {
      image = (void *) data;
   }

   level = 0;
   while (1) {
      glTexImage2D( target, level, components, w, h, 0, format, type, image );

      if (w==1 && h==1)  break;

      neww = (w<2) ? 1 : w/2;
      newh = (h<2) ? 1 : h/2;
      newimage = malloc( (neww+4) * newh * bpp );
      if (!newimage) {
	 return GLU_OUT_OF_MEMORY;
      }

      error =  gluScaleImage( format, w, h, type, image,
			      neww, newh, type, newimage );
      if (error) {
	 return error;
      }

      if (image!=data) {
	 free( image );
      }
      image = newimage;

      w = neww;
      h = newh;
      level++;
   }

   if (image!=data) {
      free( image );
   }

   return 0;
}
示例#30
0
static PyObject*
_composite_apply(PyObject *self, PyObject *args)
{
    PyObject *py_image1 = NULL, *py_image2 = NULL, *image_mode = NULL,
             *w1, *h1, *w2, *h2, *py_x, *py_y, *py_merge = NULL;

    if (!PyArg_UnpackTuple(args, "apply", 9, 10, &image_mode, &py_image1, &w1, &h1, &py_image2, &w2, &h2, &py_x, &py_y, &py_merge)) {
        return NULL;
    }

    char *image_mode_str = PyString_AsString(image_mode);

    unsigned char *ptr1 = (unsigned char *) PyString_AsString(py_image1), *aux1 = NULL;
    unsigned char *ptr2 = (unsigned char *) PyString_AsString(py_image2), *aux2 = NULL;

    int width1 = (int) PyInt_AsLong(w1),
        width2 = (int) PyInt_AsLong(w2),
        height1 = (int) PyInt_AsLong(h1),
        height2 = (int) PyInt_AsLong(h2),
        x_pos = (int) PyInt_AsLong(py_x),
        y_pos = (int) PyInt_AsLong(py_y),
        merge = 1;

    if (py_merge) {
        merge = (int) PyInt_AsLong(py_merge);
    }

    int num_bytes = bytes_per_pixel(image_mode_str);
    int r_idx = rgb_order(image_mode_str, 'R'),
        g_idx = rgb_order(image_mode_str, 'G'),
        b_idx = rgb_order(image_mode_str, 'B'),
        a_idx = rgb_order(image_mode_str, 'A');


    int r1, g1, b1, a1, r2, g2, b2, a2, x, y, start_x = 0, start_y = 0;

    double delta, r, g, b, a;

    if (x_pos < 0) {
        start_x = -x_pos;
        x_pos = 0;
    }
    if (y_pos < 0) {
        start_y = -y_pos;
        y_pos = 0;
    }

    for (y = start_y; y < height2; ++y) {
        if (y_pos - start_y + y >= height1) {
            break;
        }
        int line_offset1 = ((y_pos + y - start_y) * width1 * num_bytes),
            line_offset2 = (y * width2 * num_bytes);

        aux1 = ptr1 + line_offset1 + (x_pos * num_bytes);
        aux2 = ptr2 + line_offset2 + (start_x * num_bytes);

        for (x = start_x; x < width2; ++x, aux1 += num_bytes, aux2 += num_bytes) {
            if (x_pos - start_x + x >= width1) {
                break;
            }

            r1 = aux1[r_idx];
            g1 = aux1[g_idx];
            b1 = aux1[b_idx];
            a1 = aux1[a_idx];

            r2 = aux2[r_idx];
            g2 = aux2[g_idx];
            b2 = aux2[b_idx];
            a2 = aux2[a_idx];

            a1 = 255 - a1;
            a2 = 255 - a2;

            if (merge) {
                delta = (a2 / MAX_RGB_DOUBLE) * (a1 / MAX_RGB_DOUBLE);

                a = MAX_RGB_DOUBLE * delta;

                delta = 1.0 - delta;
                delta = (delta <= SMALL_DOUBLE) ? 1.0 : (1.0 / delta);

                r = delta * ALPHA_COMPOSITE_COLOR_CHANNEL(r2, a2, r1, a1);
                g = delta * ALPHA_COMPOSITE_COLOR_CHANNEL(g2, a2, g1, a1);
                b = delta * ALPHA_COMPOSITE_COLOR_CHANNEL(b2, a2, b1, a1);
            } else {
                if (a1 == 0) {
                    r = r2;
                    g = g2;
                    b = b2;
                    a = a2;
                } else {
                    r = r1;
                    g = g1;
                    b = b1;
                    a = a1;
                }
            }

            a = 255.0 - a;

            aux1[r_idx] = ADJUST_COLOR_DOUBLE(r);
            aux1[g_idx] = ADJUST_COLOR_DOUBLE(g);
            aux1[b_idx] = ADJUST_COLOR_DOUBLE(b);
            aux1[a_idx] = ADJUST_COLOR_DOUBLE(a);
        }

    }

    Py_INCREF(py_image1);
    return py_image1;
}