int fimg2d4x_bitblt(struct fimg2d_control *ctrl) { int ret = 0; enum addr_space addr_type; struct fimg2d_context *ctx; struct fimg2d_bltcmd *cmd; unsigned long *pgd; fimg2d_debug("enter blitter\n"); while (1) { cmd = fimg2d_get_command(ctrl); if (!cmd) break; ctx = cmd->ctx; atomic_set(&ctrl->busy, 1); perf_start(cmd, PERF_SFR); ctrl->configure(ctrl, cmd); perf_end(cmd, PERF_SFR); addr_type = cmd->image[IDST].addr.type; if (addr_type == ADDR_USER || addr_type == ADDR_USER_CONTIG) { if(!ctx->mm || !ctx->mm->pgd) { atomic_set(&ctrl->busy, 0); goto fail_n_del; } pgd = (unsigned long *)ctx->mm->pgd; exynos_sysmmu_enable(ctrl->dev, (unsigned long)virt_to_phys(pgd)); fimg2d_debug("sysmmu enable: pgd %p ctx %p seq_no(%u)\n", pgd, ctx, cmd->blt.seq_no); exynos_sysmmu_set_pbuf(ctrl->dev, nbufs, prefbuf); fimg2d_debug("set smmu prefbuf\n"); } fimg2d4x_pre_bitblt(ctrl, cmd); perf_start(cmd, PERF_BLIT); /* start blit */ ctrl->run(ctrl); ret = fimg2d4x_blit_wait(ctrl, cmd); perf_end(cmd, PERF_BLIT); if (addr_type == ADDR_USER || addr_type == ADDR_USER_CONTIG) { exynos_sysmmu_disable(ctrl->dev); fimg2d_debug("sysmmu disable\n"); } fail_n_del: fimg2d_del_command(ctrl, cmd); } fimg2d_debug("exit blitter\n"); return ret; }
void g2d_sysmmu_off(struct g2d_global *g2d_dev) { exynos_sysmmu_disable(g2d_dev->dev); }
void fimg2d4x_bitblt(struct fimg2d_control *info) { struct fimg2d_context *ctx; struct fimg2d_bltcmd *cmd; unsigned long *pgd; int ret; fimg2d_debug("enter blitter\n"); #ifdef CONFIG_PM_RUNTIME pm_runtime_get_sync(info->dev); fimg2d_debug("pm_runtime_get_sync\n"); #endif fimg2d_clk_on(info); while ((cmd = fimg2d_get_first_command(info))) { ctx = cmd->ctx; if (info->err) { printk(KERN_ERR "[%s] device error\n", __func__); goto blitend; } atomic_set(&info->busy, 1); ret = info->configure(info, cmd); if (ret) goto blitend; if (cmd->image[IDST].addr.type != ADDR_PHYS) { pgd = (unsigned long *)ctx->mm->pgd; exynos_sysmmu_enable(info->dev, (unsigned long)virt_to_phys(pgd)); fimg2d_debug("sysmmu enable: pgd %p ctx %p seq_no(%u)\n", pgd, ctx, cmd->seq_no); } fimg2d4x_pre_bitblt(info, cmd); #ifdef PERF_PROFILE perf_start(cmd->ctx, PERF_BLIT); #endif /* start blit */ info->run(info); fimg2d4x_blit_wait(info, cmd); #ifdef PERF_PROFILE perf_end(cmd->ctx, PERF_BLIT); #endif if (cmd->image[IDST].addr.type != ADDR_PHYS) { exynos_sysmmu_disable(info->dev); fimg2d_debug("sysmmu disable\n"); } blitend: fimg2d_del_command(info, cmd); /* wake up context */ if (!atomic_read(&ctx->ncmd)) wake_up(&ctx->wait_q); } atomic_set(&info->active, 0); fimg2d_clk_off(info); #ifdef CONFIG_PM_RUNTIME pm_runtime_put_sync(info->dev); fimg2d_debug("pm_runtime_put_sync\n"); #endif fimg2d_debug("exit blitter\n"); }
int g2d_do_blit(struct g2d_global *g2d_dev, g2d_params *params) { unsigned long pgd; int need_dst_clean = true; if ((params->src_rect.addr == NULL) || (params->dst_rect.addr == NULL)) { FIMG2D_ERROR("error : addr Null\n"); return false; } if (params->flag.memory_type == G2D_MEMORY_KERNEL) { params->src_rect.addr = (unsigned char *)phys_to_virt((unsigned long)params->src_rect.addr); params->dst_rect.addr = (unsigned char *)phys_to_virt((unsigned long)params->dst_rect.addr); pgd = (unsigned long)init_mm.pgd; } else { pgd = (unsigned long)current->mm->pgd; } if (params->flag.memory_type == G2D_MEMORY_USER) { g2d_clip clip_src; g2d_clip_for_src(¶ms->src_rect, ¶ms->dst_rect, ¶ms->clip, &clip_src); if (g2d_check_overlap(params->src_rect, params->dst_rect, params->clip)) return false; g2d_dev->src_attribute = g2d_check_pagetable((unsigned char *)GET_START_ADDR(params->src_rect), (unsigned int)GET_RECT_SIZE(params->src_rect) + 8, (u32)virt_to_phys((void *)pgd)); if (g2d_dev->src_attribute == G2D_PT_NOTVALID) { FIMG2D_DEBUG("Src is not in valid pagetable\n"); return false; } g2d_dev->dst_attribute = g2d_check_pagetable((unsigned char *)GET_START_ADDR_C(params->dst_rect, params->clip), (unsigned int)GET_RECT_SIZE_C(params->dst_rect, params->clip), (u32)virt_to_phys((void *)pgd)); if (g2d_dev->dst_attribute == G2D_PT_NOTVALID) { FIMG2D_DEBUG("Dst is not in valid pagetable\n"); return false; } g2d_pagetable_clean((unsigned char *)GET_START_ADDR(params->src_rect), (u32)GET_RECT_SIZE(params->src_rect) + 8, (u32)virt_to_phys((void *)pgd)); g2d_pagetable_clean((unsigned char *)GET_START_ADDR_C(params->dst_rect, params->clip), (u32)GET_RECT_SIZE_C(params->dst_rect, params->clip), (u32)virt_to_phys((void *)pgd)); if (params->flag.render_mode & G2D_CACHE_OP) { /*g2d_mem_cache_oneshot((void *)GET_START_ADDR(params->src_rect), (void *)GET_START_ADDR(params->dst_rect), (unsigned int)GET_REAL_SIZE(params->src_rect), (unsigned int)GET_REAL_SIZE(params->dst_rect));*/ // need_dst_clean = g2d_check_need_dst_cache_clean(params); g2d_mem_inner_cache(params); g2d_mem_outer_cache(g2d_dev, params, &need_dst_clean); } } exynos_sysmmu_disable(g2d_dev->dev); exynos_sysmmu_enable(g2d_dev->dev, (u32)virt_to_phys((void *)pgd)); if(g2d_init_regs(g2d_dev, params) < 0) { return false; } /* Do bitblit */ g2d_start_bitblt(g2d_dev, params); if (!need_dst_clean) g2d_mem_outer_cache_inv(params); return true; }
int fimg2d4x_bitblt(struct fimg2d_control *ctrl) { int ret = 0; enum addr_space addr_type; struct fimg2d_context *ctx; struct fimg2d_bltcmd *cmd; unsigned long *pgd; fimg2d_debug("%s : enter blitter\n", __func__); while (1) { cmd = fimg2d_get_command(ctrl); if (!cmd) break; ctx = cmd->ctx; ctx->state = CTX_READY; #ifdef CONFIG_PM_RUNTIME if (fimg2d4x_get_clk_cnt(ctrl->clock) == false) fimg2d_err("2D clock is not set\n"); #endif atomic_set(&ctrl->busy, 1); perf_start(cmd, PERF_SFR); ret = ctrl->configure(ctrl, cmd); perf_end(cmd, PERF_SFR); if (IS_ERR_VALUE(ret)) { fimg2d_err("failed to configure\n"); ctx->state = CTX_ERROR; goto fail_n_del; } addr_type = cmd->image[IDST].addr.type; ctx->vma_lock = vma_lock_mapping(ctx->mm, prefbuf, MAX_IMAGES - 1); if (fimg2d_check_pgd(ctx->mm, cmd)) { ret = -EFAULT; goto fail_n_del; } if (addr_type == ADDR_USER || addr_type == ADDR_USER_CONTIG) { if (!ctx->mm || !ctx->mm->pgd) { atomic_set(&ctrl->busy, 0); fimg2d_err("ctx->mm:0x%p or ctx->mm->pgd:0x%p\n", ctx->mm, (ctx->mm) ? ctx->mm->pgd : NULL); ret = -EPERM; goto fail_n_del; } pgd = (unsigned long *)ctx->mm->pgd; #ifdef CONFIG_EXYNOS7_IOMMU if (iovmm_activate(ctrl->dev)) { fimg2d_err("failed to iovmm activate\n"); ret = -EPERM; goto fail_n_del; } #else if (exynos_sysmmu_enable(ctrl->dev, (unsigned long)virt_to_phys(pgd))) { fimg2d_err("failed to sysmme enable\n"); ret = -EPERM; goto fail_n_del; } #endif fimg2d_debug("%s : sysmmu enable: pgd %p ctx %p seq_no(%u)\n", __func__, pgd, ctx, cmd->blt.seq_no); //exynos_sysmmu_set_pbuf(ctrl->dev, nbufs, prefbuf); fimg2d_debug("%s : set smmu prefbuf\n", __func__); } fimg2d4x_pre_bitblt(ctrl, cmd); perf_start(cmd, PERF_BLIT); /* start blit */ fimg2d_debug("%s : start blit\n", __func__); ctrl->run(ctrl); ret = fimg2d4x_blit_wait(ctrl, cmd); perf_end(cmd, PERF_BLIT); perf_start(cmd, PERF_UNMAP); if (addr_type == ADDR_USER || addr_type == ADDR_USER_CONTIG) { #ifdef CONFIG_EXYNOS7_IOMMU iovmm_deactivate(ctrl->dev); if (cmd->dma[ISRC].base.size > 0) { exynos_sysmmu_unmap_user_pages(ctrl->dev, ctx->mm, cmd->dma[ISRC].base.addr, cmd->dma[ISRC].base.size); } if (cmd->dma[ISRC].plane2.size > 0) { exynos_sysmmu_unmap_user_pages(ctrl->dev, ctx->mm, cmd->dma[ISRC].plane2.addr, cmd->dma[ISRC].plane2.size); } if (cmd->dma[IMSK].base.size > 0) { exynos_sysmmu_unmap_user_pages(ctrl->dev, ctx->mm, cmd->dma[IMSK].base.addr, cmd->dma[IMSK].base.size); } if (cmd->dma[IDST].base.size > 0) { exynos_sysmmu_unmap_user_pages(ctrl->dev, ctx->mm, cmd->dma[IDST].base.addr, cmd->dma[IDST].base.size); } if (cmd->dma[IDST].plane2.size > 0) { exynos_sysmmu_unmap_user_pages(ctrl->dev, ctx->mm, cmd->dma[IDST].plane2.addr, cmd->dma[IDST].plane2.size); } #else exynos_sysmmu_disable(ctrl->dev); #endif fimg2d_debug("sysmmu disable\n"); } perf_end(cmd, PERF_UNMAP); fail_n_del: vma_unlock_mapping(ctx->vma_lock); fimg2d_del_command(ctrl, cmd); } fimg2d_debug("%s : exit blitter\n", __func__); return ret; }