Exemplo n.º 1
0
static int vivid_copy_buffer(struct vivid_dev *dev, unsigned p, u8 *vcapbuf,
		struct vivid_buffer *vid_cap_buf)
{
	bool blank = dev->must_blank[vid_cap_buf->vb.v4l2_buf.index];
	struct tpg_data *tpg = &dev->tpg;
	struct vivid_buffer *vid_out_buf = NULL;
	unsigned pixsize = tpg_g_twopixelsize(tpg, p) / 2;
	unsigned img_width = dev->compose_cap.width;
	unsigned img_height = dev->compose_cap.height;
	unsigned stride_cap = tpg->bytesperline[p];
	unsigned stride_out = dev->bytesperline_out[p];
	unsigned stride_osd = dev->display_byte_stride;
	unsigned hmax = (img_height * tpg->perc_fill) / 100;
	u8 *voutbuf;
	u8 *vosdbuf = NULL;
	unsigned y;
	bool blend = dev->bitmap_out || dev->clipcount_out || dev->fbuf_out_flags;
	/* Coarse scaling with Bresenham */
	unsigned vid_out_int_part;
	unsigned vid_out_fract_part;
	unsigned vid_out_y = 0;
	unsigned vid_out_error = 0;
	unsigned vid_overlay_int_part = 0;
	unsigned vid_overlay_fract_part = 0;
	unsigned vid_overlay_y = 0;
	unsigned vid_overlay_error = 0;
	unsigned vid_cap_right;
	bool quick;

	vid_out_int_part = dev->loop_vid_out.height / dev->loop_vid_cap.height;
	vid_out_fract_part = dev->loop_vid_out.height % dev->loop_vid_cap.height;

	if (!list_empty(&dev->vid_out_active))
		vid_out_buf = list_entry(dev->vid_out_active.next,
					 struct vivid_buffer, list);
	if (vid_out_buf == NULL)
		return -ENODATA;

	vid_cap_buf->vb.v4l2_buf.field = vid_out_buf->vb.v4l2_buf.field;

	voutbuf = vb2_plane_vaddr(&vid_out_buf->vb, p) +
				  vid_out_buf->vb.v4l2_planes[p].data_offset;
	voutbuf += dev->loop_vid_out.left * pixsize + dev->loop_vid_out.top * stride_out;
	vcapbuf += dev->compose_cap.left * pixsize + dev->compose_cap.top * stride_cap;

	if (dev->loop_vid_copy.width == 0 || dev->loop_vid_copy.height == 0) {
		/*
		 * If there is nothing to copy, then just fill the capture window
		 * with black.
		 */
		for (y = 0; y < hmax; y++, vcapbuf += stride_cap)
			memcpy(vcapbuf, tpg->black_line[p], img_width * pixsize);
		return 0;
	}

	if (dev->overlay_out_enabled &&
	    dev->loop_vid_overlay.width && dev->loop_vid_overlay.height) {
		vosdbuf = dev->video_vbase;
		vosdbuf += dev->loop_fb_copy.left * pixsize +
			   dev->loop_fb_copy.top * stride_osd;
		vid_overlay_int_part = dev->loop_vid_overlay.height /
				       dev->loop_vid_overlay_cap.height;
		vid_overlay_fract_part = dev->loop_vid_overlay.height %
					 dev->loop_vid_overlay_cap.height;
	}

	vid_cap_right = dev->loop_vid_cap.left + dev->loop_vid_cap.width;
	/* quick is true if no video scaling is needed */
	quick = dev->loop_vid_out.width == dev->loop_vid_cap.width;

	dev->cur_scaled_line = dev->loop_vid_out.height;
	for (y = 0; y < hmax; y++, vcapbuf += stride_cap) {
		/* osdline is true if this line requires overlay blending */
		bool osdline = vosdbuf && y >= dev->loop_vid_overlay_cap.top &&
			  y < dev->loop_vid_overlay_cap.top + dev->loop_vid_overlay_cap.height;

		/*
		 * If this line of the capture buffer doesn't get any video, then
		 * just fill with black.
		 */
		if (y < dev->loop_vid_cap.top ||
		    y >= dev->loop_vid_cap.top + dev->loop_vid_cap.height) {
			memcpy(vcapbuf, tpg->black_line[p], img_width * pixsize);
			continue;
		}

		/* fill the left border with black */
		if (dev->loop_vid_cap.left)
			memcpy(vcapbuf, tpg->black_line[p], dev->loop_vid_cap.left * pixsize);

		/* fill the right border with black */
		if (vid_cap_right < img_width)
			memcpy(vcapbuf + vid_cap_right * pixsize,
				tpg->black_line[p], (img_width - vid_cap_right) * pixsize);

		if (quick && !osdline) {
			memcpy(vcapbuf + dev->loop_vid_cap.left * pixsize,
			       voutbuf + vid_out_y * stride_out,
			       dev->loop_vid_cap.width * pixsize);
			goto update_vid_out_y;
		}
		if (dev->cur_scaled_line == vid_out_y) {
			memcpy(vcapbuf + dev->loop_vid_cap.left * pixsize,
			       dev->scaled_line,
			       dev->loop_vid_cap.width * pixsize);
			goto update_vid_out_y;
		}
		if (!osdline) {
			scale_line(voutbuf + vid_out_y * stride_out, dev->scaled_line,
				dev->loop_vid_out.width, dev->loop_vid_cap.width,
				tpg_g_twopixelsize(tpg, p));
		} else {
			/*
			 * Offset in bytes within loop_vid_copy to the start of the
			 * loop_vid_overlay rectangle.
			 */
			unsigned offset =
				(dev->loop_vid_overlay.left - dev->loop_vid_copy.left) * pixsize;
			u8 *osd = vosdbuf + vid_overlay_y * stride_osd;

			scale_line(voutbuf + vid_out_y * stride_out, dev->blended_line,
				dev->loop_vid_out.width, dev->loop_vid_copy.width,
				tpg_g_twopixelsize(tpg, p));
			if (blend)
				blend_line(dev, vid_overlay_y + dev->loop_vid_overlay.top,
					   dev->loop_vid_overlay.left,
					   dev->blended_line + offset, osd,
					   dev->loop_vid_overlay.width, pixsize);
			else
				memcpy(dev->blended_line + offset,
				       osd, dev->loop_vid_overlay.width * pixsize);
			scale_line(dev->blended_line, dev->scaled_line,
					dev->loop_vid_copy.width, dev->loop_vid_cap.width,
					tpg_g_twopixelsize(tpg, p));
		}
		dev->cur_scaled_line = vid_out_y;
		memcpy(vcapbuf + dev->loop_vid_cap.left * pixsize,
		       dev->scaled_line,
		       dev->loop_vid_cap.width * pixsize);

update_vid_out_y:
		if (osdline) {
			vid_overlay_y += vid_overlay_int_part;
			vid_overlay_error += vid_overlay_fract_part;
			if (vid_overlay_error >= dev->loop_vid_overlay_cap.height) {
				vid_overlay_error -= dev->loop_vid_overlay_cap.height;
				vid_overlay_y++;
			}
		}
		vid_out_y += vid_out_int_part;
		vid_out_error += vid_out_fract_part;
		if (vid_out_error >= dev->loop_vid_cap.height) {
			vid_out_error -= dev->loop_vid_cap.height;
			vid_out_y++;
		}
	}

	if (!blank)
		return 0;
	for (; y < img_height; y++, vcapbuf += stride_cap)
		memcpy(vcapbuf, tpg->contrast_line[p], img_width * pixsize);
	return 0;
}
Exemplo n.º 2
0
int blend(YUV_frame* in_frame1, YUV_frame* in_frame2, YUV_frame* out_frame,
          const double alpha, const int invert)
{
    int     ssx1, ssy1, ssx2, ssy2;
    BYTE*   Y_in1;
    BYTE*   U_in1;
    BYTE*   V_in1;
    BYTE*   Y_in2;
    BYTE*   U_in2;
    BYTE*   V_in2;
    BYTE*   Y_out;
    BYTE*   U_out;
    BYTE*   V_out;
    int     w, h, j;
    int     i_alpha;

    // check input formats
    ssx1 = in_frame1->Y.w / in_frame1->U.w;
    ssy1 = in_frame1->Y.h / in_frame1->U.h;
    ssx2 = in_frame2->Y.w / in_frame2->U.w;
    ssy2 = in_frame2->Y.h / in_frame2->U.h;
    if ((ssx1 != ssx2) || (ssy1 != ssy2))
        return YUV_Fail;
    // get working dimensions
    w = min(in_frame1->Y.w, in_frame2->Y.w);
    h = min(in_frame1->Y.h, in_frame2->Y.h);
    // do it
    i_alpha = (alpha * 256.0) + 0.5;
    Y_in1 = in_frame1->Y.buff;
    Y_in2 = in_frame2->Y.buff;
    Y_out = out_frame->Y.buff;
    for (j = h; j != 0; j--)
    {
        blend_line(Y_in1, in_frame1->Y.pixelStride,
                   Y_in2, in_frame2->Y.pixelStride,
                   Y_out, out_frame->Y.pixelStride,
                   i_alpha, invert, w);
        Y_in1 += in_frame1->Y.lineStride;
        Y_in2 += in_frame2->Y.lineStride;
        Y_out += out_frame->Y.lineStride;
    }
    w = w / ssx1;
    h = h / ssy1;
    U_in1 = in_frame1->U.buff;
    V_in1 = in_frame1->V.buff;
    U_in2 = in_frame2->U.buff;
    V_in2 = in_frame2->V.buff;
    U_out = out_frame->U.buff;
    V_out = out_frame->V.buff;
    for (j = h; j != 0; j--)
    {
        blend_line(U_in1, in_frame1->U.pixelStride,
                   U_in2, in_frame2->U.pixelStride,
                   U_out, out_frame->U.pixelStride,
                   i_alpha, invert, w);
        blend_line(V_in1, in_frame1->V.pixelStride,
                   V_in2, in_frame2->V.pixelStride,
                   V_out, out_frame->V.pixelStride,
                   i_alpha, invert, w);
        U_in1 += in_frame1->U.lineStride;
        V_in1 += in_frame1->V.lineStride;
        U_in2 += in_frame2->U.lineStride;
        V_in2 += in_frame2->V.lineStride;
        U_out += out_frame->U.lineStride;
        V_out += out_frame->V.lineStride;
    }
    return YUV_OK;
}