static int store_user_dst(struct fimg2d_blit __user *buf, struct fimg2d_dma *dst_buf) { struct fimg2d_blit blt; struct fimg2d_clip *clp; struct fimg2d_image dst_img; int clp_h, bpp, stride; int len = sizeof(struct fimg2d_image); memset(&dst_img, 0, len); if (copy_from_user(&blt, buf, sizeof(blt))) return -EFAULT; if (blt.dst) if (copy_from_user(&dst_img, blt.dst, len)) return -EFAULT; clp = &blt.param.clipping; clp_h = clp->y2 - clp->y1; bpp = bit_per_pixel(&dst_img, 0); stride = width2bytes(dst_img.width, bpp); dst_buf->addr = dst_img.addr.start + (stride * clp->y1); dst_buf->size = stride * clp_h; return 0; }
static void inner_flush_clip_range(struct fimg2d_bltcmd *cmd) { struct fimg2d_blit *blt = &cmd->blt; struct fimg2d_image *img; struct fimg2d_clip *clp; struct fimg2d_rect *r; struct fimg2d_dma *c; int i, y, clp_x, clp_w, clp_h, dir; int x1, y1, x2, y2, bpp, stride; unsigned long start; clp = &blt->param.clipping; dir = DMA_TO_DEVICE; for (i = 0; i < MAX_IMAGES; i++) { if (i == IMAGE_DST) dir = DMA_BIDIRECTIONAL; img = &cmd->image[i]; r = &img->rect; /* 1st plane */ c = &cmd->dma[i].base; if (!c->cached) continue; if (i == IMAGE_DST && clp->enable) { x1 = clp->x1; y1 = clp->y1; x2 = clp->x2; y2 = clp->y2; } else { x1 = r->x1; y1 = r->y1; x2 = r->x2; y2 = r->y2; } bpp = bit_per_pixel(img, 0); stride = width2bytes(img->width, bpp); clp_x = pixel2offset(x1, bpp); clp_w = width2bytes(x2 - x1, bpp); clp_h = y2 - y1; if (is_inner_flushrange(stride - clp_w)) fimg2d_dma_sync_inner(c->addr, c->cached, dir); else { for (y = 0; y < clp_h; y++) { start = c->addr + (stride * y) + clp_x; fimg2d_dma_sync_inner(start, clp_w, dir); } } /* 2nd plane */ if (!is_yuvfmt(img->fmt)) continue; if (img->order != P2_CRCB && img->order != P2_CBCR) continue; c = &cmd->dma[i].plane2; if (!c->cached) continue; bpp = bit_per_pixel(img, 1); stride = width2bytes(img->width, bpp); clp_x = pixel2offset(x1, bpp); clp_w = width2bytes(x2 - x1, bpp); if (img->fmt == CF_YCBCR_420) clp_h = (y2 - y1)/2; else clp_h = y2 - y1; if (is_inner_flushrange(stride - clp_w)) fimg2d_dma_sync_inner(c->addr, c->cached, dir); else { for (y = 0; y < clp_h; y++) { start = c->addr + (stride * y) + clp_x; fimg2d_dma_sync_inner(c->addr, c->cached, dir); } } } }
static void outer_flush_clip_range(struct fimg2d_bltcmd *cmd) { struct mm_struct *mm; struct fimg2d_blit *blt = &cmd->blt; struct fimg2d_image *img; struct fimg2d_clip *clp; struct fimg2d_rect *r; struct fimg2d_dma *c; int clp_x, clp_w, clp_h, y, i, dir; int x1, y1, x2, y2, bpp, stride; unsigned long start; if (WARN_ON(!cmd->ctx)) return; mm = cmd->ctx->mm; clp = &blt->param.clipping; dir = CACHE_CLEAN; for (i = 0; i < MAX_IMAGES; i++) { img = &cmd->image[i]; /* clean pagetable on outercache */ c = &cmd->dma[i].base; if (c->size) fimg2d_clean_outer_pagetable(mm, c->addr, c->size); c = &cmd->dma[i].plane2; if (c->size) fimg2d_clean_outer_pagetable(mm, c->addr, c->size); if (i == IMAGE_DST) dir = CACHE_FLUSH; /* 1st plane */ c = &cmd->dma[i].base; if (!c->cached) continue; r = &img->rect; if (i == IMAGE_DST && clp->enable) { x1 = clp->x1; y1 = clp->y1; x2 = clp->x2; y2 = clp->y2; } else { x1 = r->x1; y1 = r->y1; x2 = r->x2; y2 = r->y2; } bpp = bit_per_pixel(img, 0); stride = width2bytes(img->width, bpp); clp_x = pixel2offset(x1, bpp); clp_w = width2bytes(x2 - x1, bpp); clp_h = y2 - y1; if (is_outer_flushrange(stride - clp_w)) fimg2d_dma_sync_outer(mm, c->addr, c->cached, dir); else { for (y = 0; y < clp_h; y++) { start = c->addr + (stride * y) + clp_x; fimg2d_dma_sync_outer(mm, start, clp_w, dir); } } /* 2nd plane */ if (!is_yuvfmt(img->fmt)) continue; if (img->order != P2_CRCB && img->order != P2_CBCR) continue; c = &cmd->dma[i].plane2; if (!c->cached) continue; bpp = bit_per_pixel(img, 1); stride = width2bytes(img->width, bpp); clp_x = pixel2offset(x1, bpp); clp_w = width2bytes(x2 - x1, bpp); if (img->fmt == CF_YCBCR_420) clp_h = (y2 - y1)/2; else clp_h = y2 - y1; if (is_outer_flushrange(stride - clp_w)) fimg2d_dma_sync_outer(mm, c->addr, c->cached, dir); else { for (y = 0; y < clp_h; y++) { start = c->addr + (stride * y) + clp_x; fimg2d_dma_sync_outer(mm, start, clp_w, dir); } } } }
static int fimg2d_calc_dma_size(struct fimg2d_bltcmd *cmd) { struct fimg2d_blit *blt = &cmd->blt; struct fimg2d_image *img; struct fimg2d_clip *clp; struct fimg2d_rect *r; struct fimg2d_dma *c; enum addr_space addr_type; int i, y1, y2, stride, clp_h, bpp; addr_type = blt->dst->addr.type; if (addr_type != ADDR_USER && addr_type != ADDR_USER_CONTIG) return -1; clp = &blt->param.clipping; for (i = 0; i < MAX_IMAGES; i++) { img = &cmd->image[i]; r = &img->rect; if (i == IMAGE_DST && clp->enable) { y1 = clp->y1; y2 = clp->y2; } else { y1 = r->y1; y2 = r->y2; } /* 1st plane */ bpp = bit_per_pixel(img, 0); stride = width2bytes(img->width, bpp); clp_h = y2 - y1; c = &cmd->dma[i].base; c->addr = img->addr.start + (stride * y1); c->size = stride * clp_h; if (img->need_cacheopr) { c->cached = c->size; cmd->dma_all += c->cached; } if (!is_yuvfmt(img->fmt)) continue; /* 2nd plane */ if (img->order == P2_CRCB || img->order == P2_CBCR) { bpp = bit_per_pixel(img, 1); stride = width2bytes(img->width, bpp); if (img->fmt == CF_YCBCR_420) clp_h = (y2 - y1)/2; else clp_h = y2 - y1; c = &cmd->dma[i].plane2; c->addr = img->plane2.start + (stride * y1); c->size = stride * clp_h; if (img->need_cacheopr) { c->cached = c->size; cmd->dma_all += c->cached; } } } return 0; }