int fimg2d_check_pgd(struct mm_struct *mm, struct fimg2d_bltcmd *cmd) { struct fimg2d_dma *c; struct fimg2d_image *img; enum pt_status pt; int i, ret; for (i = 0; i < MAX_IMAGES; i++) { img = &cmd->image[i]; if (!img->addr.type) continue; c = &cmd->dma[i].base; if (!c->size) continue; pt = fimg2d_check_pagetable(mm, c->addr, c->size, i == IDST); if (pt == PT_FAULT) { ret = -EFAULT; goto err_pgtable; } /* 2nd plane */ if (!is_yuvfmt(img->fmt)) continue; if (img->order != P2_CRCB && img->order != P2_CBCR) continue; c = &cmd->dma[i].plane2; if (!c->size) continue; pt = fimg2d_check_pagetable(mm, c->addr, c->size, i == IDST); if (pt == PT_FAULT) { ret = -EFAULT; goto err_pgtable; } } return 0; err_pgtable: return ret; }
static int fimg2d_check_dma_sync(struct fimg2d_bltcmd *cmd) { struct mm_struct *mm = cmd->ctx->mm; struct fimg2d_dma *c; enum pt_status pt; int i; fimg2d_calc_dma_size(cmd); if (fimg2d_check_address(cmd)) return -EINVAL; for (i = 0; i < MAX_IMAGES; i++) { c = &cmd->dma[i].base; if (!c->size) continue; pt = fimg2d_check_pagetable(mm, c->addr, c->size); if (pt == PT_FAULT) return -EFAULT; } #ifndef CCI_SNOOP fimg2d_debug("cache flush\n"); perf_start(cmd, PERF_CACHE); if (is_inner_flushall(cmd->dma_all)) { inner_touch_range(cmd); flush_all_cpu_caches(); } else { inner_flush_clip_range(cmd); } #ifdef CONFIG_OUTER_CACHE if (is_outer_flushall(cmd->dma_all)) outer_flush_all(); else outer_flush_clip_range(cmd); #endif perf_end(cmd, PERF_CACHE); #endif return 0; }
static int fimg2d_check_dma_sync(struct fimg2d_bltcmd *cmd) { struct mm_struct *mm = cmd->ctx->mm; struct fimg2d_dma *c; enum pt_status pt; int i, ret; fimg2d_calc_dma_size(cmd); for (i = 0; i < MAX_IMAGES; i++) { c = &cmd->dma[i].base; if (!c->size) continue; pt = fimg2d_check_pagetable(mm, c->addr, c->size); if (pt == PT_FAULT) { ret = -EFAULT; goto err_pgtable; } } perf_start(cmd, PERF_CACHE); if (is_inner_flushall(cmd->dma_all)) flush_cache_all(); else inner_flush_clip_range(cmd); #ifdef CONFIG_OUTER_CACHE if (is_outer_flushall(cmd->dma_all)) outer_flush_all(); else outer_flush_clip_range(cmd); #endif perf_end(cmd, PERF_CACHE); return 0; err_pgtable: return ret; }
int fimg2d_check_pgd(struct mm_struct *mm, struct fimg2d_bltcmd *cmd) { struct fimg2d_dma *c; enum pt_status pt; int i, ret; for (i = 0; i < MAX_IMAGES; i++) { c = &cmd->dma[i].base; if (!c->size) continue; pt = fimg2d_check_pagetable(mm, c->addr, c->size, i == IDST); if (pt == PT_FAULT) { ret = -EFAULT; goto err_pgtable; } } return 0; err_pgtable: return ret; }
static int fimg2d_check_dma_sync(struct fimg2d_bltcmd *cmd) { struct mm_struct *mm = cmd->ctx->mm; struct fimg2d_cache *csrc, *cdst, *cmsk; enum pt_status pt; csrc = &cmd->src_cache; cdst = &cmd->dst_cache; cmsk = &cmd->msk_cache; if (cmd->srcen) { csrc->addr = cmd->src.addr.start + (cmd->src.stride * cmd->src_rect.y1); csrc->size = cmd->src.stride * (cmd->src_rect.y2 - cmd->src_rect.y1); if (cmd->src.addr.cacheable) cmd->size_all += csrc->size; if (cmd->src.addr.type == ADDR_USER) { pt = fimg2d_check_pagetable(mm, csrc->addr, csrc->size); if (pt == PT_FAULT) return -1; } } if (cmd->msken) { cmsk->addr = cmd->msk.addr.start + (cmd->msk.stride * cmd->msk_rect.y1); cmsk->size = cmd->msk.stride * (cmd->msk_rect.y2 - cmd->msk_rect.y1); if (cmd->msk.addr.cacheable) cmd->size_all += cmsk->size; if (cmd->msk.addr.type == ADDR_USER) { pt = fimg2d_check_pagetable(mm, cmsk->addr, cmsk->size); if (pt == PT_FAULT) return -1; } } /* caculate horizontally clipped region */ if (cmd->dsten) { if (cmd->clipping.enable) { cdst->addr = cmd->dst.addr.start + (cmd->dst.stride * cmd->clipping.y1); cdst->size = cmd->dst.stride * (cmd->clipping.y2 - cmd->clipping.y1); } else { cdst->addr = cmd->dst.addr.start + (cmd->dst.stride * cmd->dst_rect.y1); cdst->size = cmd->dst.stride * (cmd->dst_rect.y2 - cmd->dst_rect.y1); } if (cmd->dst.addr.cacheable) cmd->size_all += cdst->size; if (cmd->dst.addr.type == ADDR_USER) { pt = fimg2d_check_pagetable(mm, cdst->addr, cdst->size); if (pt == PT_FAULT) return -1; } } fimg2d_debug("cached size all = %d\n", cmd->size_all); #ifdef PERF_PROFILE perf_start(cmd->ctx, PERF_L1CC_FLUSH); #endif if (cmd->size_all >= L1_CACHE_SIZE) { fimg2d_debug("innercache all\n"); flush_all_cpu_caches(); } else { fimg2d_debug("innercache range\n"); if (cmd->srcen && cmd->src.addr.cacheable) fimg2d_dma_sync_inner(csrc->addr, csrc->size, DMA_TO_DEVICE); if (cmd->msken && cmd->msk.addr.cacheable) fimg2d_dma_sync_inner(cmsk->addr, cmsk->size, DMA_TO_DEVICE); if (cmd->dsten && cmd->dst.addr.cacheable) fimg2d_dma_sync_inner(cdst->addr, cdst->size, DMA_BIDIRECTIONAL); } #ifdef PERF_PROFILE perf_end(cmd->ctx, PERF_L1CC_FLUSH); #endif return 0; }
static int fimg2d_check_dma_sync(struct fimg2d_bltcmd *cmd) { struct mm_struct *mm = cmd->ctx->mm; struct fimg2d_param *p = &cmd->param; struct fimg2d_image *img; struct fimg2d_clip *clp; struct fimg2d_rect *r; struct fimg2d_dma *c; enum pt_status pt; int clip_x, clip_w, clip_h, y, dir, i; unsigned long clip_start; clp = &p->clipping; for (i = 0; i < MAX_IMAGES; i++) { img = &cmd->image[i]; c = &cmd->dma[i]; r = &img->rect; if (!img->addr.type) continue; /* caculate horizontally clipped region */ if (i == IMAGE_DST && clp->enable) { c->addr = img->addr.start + (img->stride * clp->y1); c->size = img->stride * (clp->y2 - clp->y1); } else { c->addr = img->addr.start + (img->stride * r->y1); c->size = img->stride * (r->y2 - r->y1); } /* check pagetable */ if (img->addr.type == ADDR_USER) { pt = fimg2d_check_pagetable(mm, c->addr, c->size); if (pt == PT_FAULT) return -1; } if (img->need_cacheopr && i != IMAGE_TMP) { c->cached = c->size; cmd->dma_all += c->cached; } } #ifdef PERF_PROFILE perf_start(cmd->ctx, PERF_INNERCACHE); #endif if (is_inner_flushall(cmd->dma_all)) flush_all_cpu_caches(); else { for (i = 0; i < MAX_IMAGES; i++) { img = &cmd->image[i]; c = &cmd->dma[i]; r = &img->rect; if (!img->addr.type || !c->cached) continue; if (i == IMAGE_DST) dir = DMA_BIDIRECTIONAL; else dir = DMA_TO_DEVICE; if (i == IDST && clp->enable) { clip_w = width2bytes(clp->x2 - clp->x1, img->fmt); clip_x = pixel2offset(clp->x1, img->fmt); clip_h = clp->y2 - clp->y1; } else { clip_w = width2bytes(r->x2 - r->x1, img->fmt); clip_x = pixel2offset(r->x1, img->fmt); clip_h = r->y2 - r->y1; } if (is_inner_flushrange(img->stride - clip_w)) fimg2d_dma_sync_inner(c->addr, c->cached, dir); else { for (y = 0; y < clip_h; y++) { clip_start = c->addr + (img->stride * y) + clip_x; fimg2d_dma_sync_inner(clip_start, clip_w, dir); } } } } #ifdef PERF_PROFILE perf_end(cmd->ctx, PERF_INNERCACHE); #endif #ifdef CONFIG_OUTER_CACHE #ifdef PERF_PROFILE perf_start(cmd->ctx, PERF_OUTERCACHE); #endif if (is_outer_flushall(cmd->dma_all)) outer_flush_all(); else { for (i = 0; i < MAX_IMAGES; i++) { img = &cmd->image[i]; c = &cmd->dma[i]; r = &img->rect; if (!img->addr.type) continue; /* clean pagetable */ if (img->addr.type == ADDR_USER) fimg2d_clean_outer_pagetable(mm, c->addr, c->size); if (!c->cached) continue; if (i == IMAGE_DST) dir = CACHE_FLUSH; else dir = CACHE_CLEAN; if (i == IDST && clp->enable) { clip_w = width2bytes(clp->x2 - clp->x1, img->fmt); clip_x = pixel2offset(clp->x1, img->fmt); clip_h = clp->y2 - clp->y1; } else { clip_w = width2bytes(r->x2 - r->x1, img->fmt); clip_x = pixel2offset(r->x1, img->fmt); clip_h = r->y2 - r->y1; } if (is_outer_flushrange(img->stride - clip_w)) fimg2d_dma_sync_outer(mm, c->addr, c->cached, dir); else { for (y = 0; y < clip_h; y++) { clip_start = c->addr + (img->stride * y) + clip_x; fimg2d_dma_sync_outer(mm, clip_start, clip_w, dir); } } } } #ifdef PERF_PROFILE perf_end(cmd->ctx, PERF_OUTERCACHE); #endif #endif return 0; }