static void __display_block_clock_on(struct display_driver *dispdrv) { /* DSIM -> MIC -> DECON -> SMMU */ call_pm_ops(dispdrv, dsi_driver, clk_on, dispdrv); #ifdef CONFIG_DECON_MIC call_pm_ops(dispdrv, mic_driver, clk_on, dispdrv); #endif call_pm_ops(dispdrv, decon_driver, clk_on, dispdrv); #ifdef CONFIG_ION_EXYNOS if (dispdrv->platform_status > DISP_STATUS_PM0) { if (iovmm_activate(dispdrv->decon_driver.sfb->dev) < 0) pr_err("%s: failed to reactivate vmm\n", __func__); } #endif }
int fimg2d4x_bitblt(struct fimg2d_control *ctrl) { int ret = 0; enum addr_space addr_type; struct fimg2d_context *ctx; struct fimg2d_bltcmd *cmd; unsigned long *pgd; fimg2d_debug("%s : enter blitter\n", __func__); while (1) { cmd = fimg2d_get_command(ctrl); if (!cmd) break; ctx = cmd->ctx; ctx->state = CTX_READY; #ifdef CONFIG_PM_RUNTIME if (fimg2d4x_get_clk_cnt(ctrl->clock) == false) fimg2d_err("2D clock is not set\n"); #endif atomic_set(&ctrl->busy, 1); perf_start(cmd, PERF_SFR); ret = ctrl->configure(ctrl, cmd); perf_end(cmd, PERF_SFR); if (IS_ERR_VALUE(ret)) { fimg2d_err("failed to configure\n"); ctx->state = CTX_ERROR; goto fail_n_del; } addr_type = cmd->image[IDST].addr.type; ctx->vma_lock = vma_lock_mapping(ctx->mm, prefbuf, MAX_IMAGES - 1); if (fimg2d_check_pgd(ctx->mm, cmd)) { ret = -EFAULT; goto fail_n_del; } if (addr_type == ADDR_USER || addr_type == ADDR_USER_CONTIG) { if (!ctx->mm || !ctx->mm->pgd) { atomic_set(&ctrl->busy, 0); fimg2d_err("ctx->mm:0x%p or ctx->mm->pgd:0x%p\n", ctx->mm, (ctx->mm) ? ctx->mm->pgd : NULL); ret = -EPERM; goto fail_n_del; } pgd = (unsigned long *)ctx->mm->pgd; #ifdef CONFIG_EXYNOS7_IOMMU if (iovmm_activate(ctrl->dev)) { fimg2d_err("failed to iovmm activate\n"); ret = -EPERM; goto fail_n_del; } #else if (exynos_sysmmu_enable(ctrl->dev, (unsigned long)virt_to_phys(pgd))) { fimg2d_err("failed to sysmme enable\n"); ret = -EPERM; goto fail_n_del; } #endif fimg2d_debug("%s : sysmmu enable: pgd %p ctx %p seq_no(%u)\n", __func__, pgd, ctx, cmd->blt.seq_no); //exynos_sysmmu_set_pbuf(ctrl->dev, nbufs, prefbuf); fimg2d_debug("%s : set smmu prefbuf\n", __func__); } fimg2d4x_pre_bitblt(ctrl, cmd); perf_start(cmd, PERF_BLIT); /* start blit */ fimg2d_debug("%s : start blit\n", __func__); ctrl->run(ctrl); ret = fimg2d4x_blit_wait(ctrl, cmd); perf_end(cmd, PERF_BLIT); perf_start(cmd, PERF_UNMAP); if (addr_type == ADDR_USER || addr_type == ADDR_USER_CONTIG) { #ifdef CONFIG_EXYNOS7_IOMMU iovmm_deactivate(ctrl->dev); if (cmd->dma[ISRC].base.size > 0) { exynos_sysmmu_unmap_user_pages(ctrl->dev, ctx->mm, cmd->dma[ISRC].base.addr, cmd->dma[ISRC].base.size); } if (cmd->dma[ISRC].plane2.size > 0) { exynos_sysmmu_unmap_user_pages(ctrl->dev, ctx->mm, cmd->dma[ISRC].plane2.addr, cmd->dma[ISRC].plane2.size); } if (cmd->dma[IMSK].base.size > 0) { exynos_sysmmu_unmap_user_pages(ctrl->dev, ctx->mm, cmd->dma[IMSK].base.addr, cmd->dma[IMSK].base.size); } if (cmd->dma[IDST].base.size > 0) { exynos_sysmmu_unmap_user_pages(ctrl->dev, ctx->mm, cmd->dma[IDST].base.addr, cmd->dma[IDST].base.size); } if (cmd->dma[IDST].plane2.size > 0) { exynos_sysmmu_unmap_user_pages(ctrl->dev, ctx->mm, cmd->dma[IDST].plane2.addr, cmd->dma[IDST].plane2.size); } #else exynos_sysmmu_disable(ctrl->dev); #endif fimg2d_debug("sysmmu disable\n"); } perf_end(cmd, PERF_UNMAP); fail_n_del: vma_unlock_mapping(ctx->vma_lock); fimg2d_del_command(ctrl, cmd); } fimg2d_debug("%s : exit blitter\n", __func__); return ret; }
static int jpeg_probe(struct platform_device *pdev) { struct jpeg_dev *jpeg; struct resource *res; int i, ret; jpeg = devm_kzalloc(&pdev->dev, sizeof(struct jpeg_dev), GFP_KERNEL); if (!jpeg) { dev_err(&pdev->dev, "%s: not enough memory\n", __func__); return -ENOMEM; } ret = of_property_read_u32(pdev->dev.of_node, "ip_ver", &jpeg->ver); if (ret) { dev_err(&pdev->dev, "%s: ip_ver doesn't exist\n", __func__); return -EINVAL; } jpeg->dev = &pdev->dev; spin_lock_init(&jpeg->slock); /* Get memory resource and map SFR region. */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); jpeg->regs = devm_request_and_ioremap(&pdev->dev, res); if (jpeg->regs == NULL) { dev_err(&pdev->dev, "failed to claim register region\n"); return -ENOENT; } /* Get IRQ resource and register IRQ handler. */ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { dev_err(&pdev->dev, "failed to get IRQ resource\n"); return -ENXIO; } /* Get memory resource and map SFR region. */ ret = devm_request_irq(&pdev->dev, res->start, (void *)jpeg_irq_handler, 0, pdev->name, jpeg); if (ret) { dev_err(&pdev->dev, "failed to install irq\n"); return ret; } /* clock */ for (i = 0; i < JPEG_CLK_NUM; i++) jpeg->clocks[i] = ERR_PTR(-ENOENT); ret = jpeg_clk_get(jpeg); if (ret) return ret; jpeg->oneshot_dev = m2m1shot_create_device(&pdev->dev, &jpeg_oneshot_ops, "jpeg", pdev->id, -1); if (IS_ERR(jpeg->oneshot_dev)) { pr_err("%s: Failed to create m2m1shot device\n", __func__); ret = PTR_ERR(jpeg->oneshot_dev); goto err_m2m1shot; } platform_set_drvdata(pdev, jpeg); ret = exynos_create_iovmm(&pdev->dev, 3, 3); if (ret) { dev_err(&pdev->dev, "%s: Failed(%d) to create IOVMM\n", __func__, ret); goto err_iovmm; } ret = iovmm_activate(&pdev->dev); if (ret) { dev_err(&pdev->dev, "%s: Failed(%d) to activate IOVMM\n", __func__, ret); /* nothing to do for exynos_create_iovmm() */ goto err_iovmm; } iovmm_set_fault_handler(&pdev->dev, jpeg_sysmmu_fault_handler, jpeg); pm_runtime_enable(&pdev->dev); if (!IS_ENABLED(CONFIG_PM_RUNTIME)) { jpeg_clock_gating(jpeg, true); set_bit(DEV_RUNTIME_RESUME, &jpeg->state); } dev_info(&pdev->dev, "JPEG driver register successfully"); return 0; err_iovmm: m2m1shot_destroy_device(jpeg->oneshot_dev); err_m2m1shot: jpeg_clk_put(jpeg); return ret; }