static void inner_flush_clip_range(struct fimg2d_bltcmd *cmd)
{
	struct fimg2d_blit *blt = &cmd->blt;
	struct fimg2d_image *img;
	struct fimg2d_clip *clp;
	struct fimg2d_rect *r;
	struct fimg2d_dma *c;
	int i, y, clp_x, clp_w, clp_h, dir;
	int x1, y1, x2, y2, bpp, stride;
	unsigned long start;

	clp = &blt->param.clipping;
	dir = DMA_TO_DEVICE;

	for (i = 0; i < MAX_IMAGES; i++) {
		if (i == IMAGE_DST)
			dir = DMA_BIDIRECTIONAL;

		img = &cmd->image[i];

		r = &img->rect;

		/* 1st plane */
		c = &cmd->dma[i].base;
		if (!c->cached)
			continue;

		if (i == IMAGE_DST && clp->enable) {
			x1 = clp->x1;
			y1 = clp->y1;
			x2 = clp->x2;
			y2 = clp->y2;
		} else {
			x1 = r->x1;
			y1 = r->y1;
			x2 = r->x2;
			y2 = r->y2;
		}

		bpp = bit_per_pixel(img, 0);
		stride = width2bytes(img->width, bpp);

		clp_x = pixel2offset(x1, bpp);
		clp_w = width2bytes(x2 - x1, bpp);
		clp_h = y2 - y1;

		if (is_inner_flushrange(stride - clp_w))
			fimg2d_dma_sync_inner(c->addr, c->cached, dir);
		else {
			for (y = 0; y < clp_h; y++) {
				start = c->addr + (stride * y) + clp_x;
				fimg2d_dma_sync_inner(start, clp_w, dir);
			}
		}

		/* 2nd plane */
		if (!is_yuvfmt(img->fmt))
			continue;

		if (img->order != P2_CRCB && img->order != P2_CBCR)
			continue;

		c = &cmd->dma[i].plane2;
		if (!c->cached)
			continue;

		bpp = bit_per_pixel(img, 1);
		stride = width2bytes(img->width, bpp);

		clp_x = pixel2offset(x1, bpp);
		clp_w = width2bytes(x2 - x1, bpp);
		if (img->fmt == CF_YCBCR_420)
			clp_h = (y2 - y1)/2;
		else
			clp_h = y2 - y1;

		if (is_inner_flushrange(stride - clp_w))
			fimg2d_dma_sync_inner(c->addr, c->cached, dir);
		else {
			for (y = 0; y < clp_h; y++) {
				start = c->addr + (stride * y) + clp_x;
				fimg2d_dma_sync_inner(c->addr, c->cached, dir);
			}
		}
	}
}
Exemple #2
0
static int fimg2d_check_dma_sync(struct fimg2d_bltcmd *cmd)
{
	struct mm_struct *mm = cmd->ctx->mm;
	struct fimg2d_cache *csrc, *cdst, *cmsk;
	enum pt_status pt;

	csrc = &cmd->src_cache;
	cdst = &cmd->dst_cache;
	cmsk = &cmd->msk_cache;

	if (cmd->srcen) {
		csrc->addr = cmd->src.addr.start +
				(cmd->src.stride * cmd->src_rect.y1);
		csrc->size = cmd->src.stride *
				(cmd->src_rect.y2 - cmd->src_rect.y1);

		if (cmd->src.addr.cacheable)
			cmd->size_all += csrc->size;

		if (cmd->src.addr.type == ADDR_USER) {
			pt = fimg2d_check_pagetable(mm, csrc->addr, csrc->size);
			if (pt == PT_FAULT)
				return -1;
		}
	}

	if (cmd->msken) {
		cmsk->addr = cmd->msk.addr.start +
				(cmd->msk.stride * cmd->msk_rect.y1);
		cmsk->size = cmd->msk.stride *
				(cmd->msk_rect.y2 - cmd->msk_rect.y1);

		if (cmd->msk.addr.cacheable)
			cmd->size_all += cmsk->size;

		if (cmd->msk.addr.type == ADDR_USER) {
			pt = fimg2d_check_pagetable(mm, cmsk->addr, cmsk->size);
			if (pt == PT_FAULT)
				return -1;
		}
	}

	/* caculate horizontally clipped region */
	if (cmd->dsten) {
		if (cmd->clipping.enable) {
			cdst->addr = cmd->dst.addr.start +
					(cmd->dst.stride * cmd->clipping.y1);
			cdst->size = cmd->dst.stride *
					(cmd->clipping.y2 - cmd->clipping.y1);
		} else {
			cdst->addr = cmd->dst.addr.start +
					(cmd->dst.stride * cmd->dst_rect.y1);
			cdst->size = cmd->dst.stride *
					(cmd->dst_rect.y2 - cmd->dst_rect.y1);
		}

		if (cmd->dst.addr.cacheable)
			cmd->size_all += cdst->size;

		if (cmd->dst.addr.type == ADDR_USER) {
			pt = fimg2d_check_pagetable(mm, cdst->addr, cdst->size);
			if (pt == PT_FAULT)
				return -1;
		}
	}

	fimg2d_debug("cached size all = %d\n", cmd->size_all);

#ifdef PERF_PROFILE
	perf_start(cmd->ctx, PERF_L1CC_FLUSH);
#endif
	if (cmd->size_all >= L1_CACHE_SIZE) {
		fimg2d_debug("innercache all\n");
		flush_all_cpu_caches();
	} else {
		fimg2d_debug("innercache range\n");
		if (cmd->srcen && cmd->src.addr.cacheable)
			fimg2d_dma_sync_inner(csrc->addr, csrc->size, DMA_TO_DEVICE);

		if (cmd->msken && cmd->msk.addr.cacheable)
			fimg2d_dma_sync_inner(cmsk->addr, cmsk->size, DMA_TO_DEVICE);

		if (cmd->dsten && cmd->dst.addr.cacheable)
			fimg2d_dma_sync_inner(cdst->addr, cdst->size, DMA_BIDIRECTIONAL);
	}
#ifdef PERF_PROFILE
	perf_end(cmd->ctx, PERF_L1CC_FLUSH);
#endif

	return 0;
}
static int fimg2d_check_dma_sync(struct fimg2d_bltcmd *cmd)
{
	struct mm_struct *mm = cmd->ctx->mm;
	struct fimg2d_param *p = &cmd->param;
	struct fimg2d_image *img;
	struct fimg2d_clip *clp;
	struct fimg2d_rect *r;
	struct fimg2d_dma *c;
	enum pt_status pt;
	int clip_x, clip_w, clip_h, y, dir, i;
	unsigned long clip_start;

	clp = &p->clipping;

	for (i = 0; i < MAX_IMAGES; i++) {
		img = &cmd->image[i];
		c = &cmd->dma[i];
		r = &img->rect;

		if (!img->addr.type)
			continue;

		/* caculate horizontally clipped region */
		if (i == IMAGE_DST && clp->enable) {
			c->addr = img->addr.start + (img->stride * clp->y1);
			c->size = img->stride * (clp->y2 - clp->y1);
		} else {
			c->addr = img->addr.start + (img->stride * r->y1);
			c->size = img->stride * (r->y2 - r->y1);
		}

		/* check pagetable */
		if (img->addr.type == ADDR_USER) {
			pt = fimg2d_check_pagetable(mm, c->addr, c->size);
			if (pt == PT_FAULT)
				return -1;
		}

		if (img->need_cacheopr && i != IMAGE_TMP) {
			c->cached = c->size;
			cmd->dma_all += c->cached;
		}
	}

#ifdef PERF_PROFILE
	perf_start(cmd->ctx, PERF_INNERCACHE);
#endif

	if (is_inner_flushall(cmd->dma_all))
		flush_all_cpu_caches();
	else {
		for (i = 0; i < MAX_IMAGES; i++) {
			img = &cmd->image[i];
			c = &cmd->dma[i];
			r = &img->rect;

			if (!img->addr.type || !c->cached)
				continue;

			if (i == IMAGE_DST)
				dir = DMA_BIDIRECTIONAL;
			else
				dir = DMA_TO_DEVICE;

			if (i == IDST && clp->enable) {
				clip_w = width2bytes(clp->x2 - clp->x1,
							img->fmt);
				clip_x = pixel2offset(clp->x1, img->fmt);
				clip_h = clp->y2 - clp->y1;
			} else {
				clip_w = width2bytes(r->x2 - r->x1, img->fmt);
				clip_x = pixel2offset(r->x1, img->fmt);
				clip_h = r->y2 - r->y1;
			}

			if (is_inner_flushrange(img->stride - clip_w))
				fimg2d_dma_sync_inner(c->addr, c->cached, dir);
			else {
				for (y = 0; y < clip_h; y++) {
					clip_start = c->addr +
						(img->stride * y) + clip_x;
					fimg2d_dma_sync_inner(clip_start,
								clip_w, dir);
				}
			}
		}
	}
#ifdef PERF_PROFILE
	perf_end(cmd->ctx, PERF_INNERCACHE);
#endif

#ifdef CONFIG_OUTER_CACHE
#ifdef PERF_PROFILE
	perf_start(cmd->ctx, PERF_OUTERCACHE);
#endif
	if (is_outer_flushall(cmd->dma_all))
		outer_flush_all();
	else {
		for (i = 0; i < MAX_IMAGES; i++) {
			img = &cmd->image[i];
			c = &cmd->dma[i];
			r = &img->rect;

			if (!img->addr.type)
				continue;

			/* clean pagetable */
			if (img->addr.type == ADDR_USER)
				fimg2d_clean_outer_pagetable(mm, c->addr, c->size);

			if (!c->cached)
				continue;

			if (i == IMAGE_DST)
				dir = CACHE_FLUSH;
			else
				dir = CACHE_CLEAN;

			if (i == IDST && clp->enable) {
				clip_w = width2bytes(clp->x2 - clp->x1,
							img->fmt);
				clip_x = pixel2offset(clp->x1, img->fmt);
				clip_h = clp->y2 - clp->y1;
			} else {
				clip_w = width2bytes(r->x2 - r->x1, img->fmt);
				clip_x = pixel2offset(r->x1, img->fmt);
				clip_h = r->y2 - r->y1;
			}

			if (is_outer_flushrange(img->stride - clip_w))
				fimg2d_dma_sync_outer(mm, c->addr,
							c->cached, dir);
			else {
				for (y = 0; y < clip_h; y++) {
					clip_start = c->addr +
						(img->stride * y) + clip_x;
					fimg2d_dma_sync_outer(mm, clip_start,
								clip_w, dir);
				}
			}
		}
	}
#ifdef PERF_PROFILE
	perf_end(cmd->ctx, PERF_OUTERCACHE);
#endif
#endif

	return 0;
}