static int fimg2d_check_address_range(unsigned long addr, size_t size)
{
	struct vm_area_struct *vma;
	int ret = 0;

	if (addr + size <= addr) {
		fimg2d_err("address overflow. addr:0x%lx, size:%d\n",
				addr, size);
		return -EINVAL;
	}

	down_read(&current->mm->mmap_sem);
	vma = find_vma(current->mm, addr);

	if ((vma == NULL) || (vma->vm_end < (addr + size))) {
		if (vma)
			fimg2d_err("%#lx, %#x = vma[%#lx, %#lx]\n",
				addr, size, vma->vm_start, vma->vm_end);
		ret = -EINVAL;
	}

	up_read(&current->mm->mmap_sem);

	return ret;
}
Example #2
0
static int fimg2d_sysmmu_fault_handler(struct device *dev, const char *mmuname,
		enum exynos_sysmmu_inttype itype,
		unsigned long pgtable_base, unsigned long fault_addr)
{
	struct fimg2d_bltcmd *cmd;

	if (itype == SYSMMU_PAGEFAULT) {
		fimg2d_err("sysmmu page fault(0x%lx), pgd(0x%lx)\n",
				fault_addr, pgtable_base);
	} else {
		fimg2d_err("sysmmu fault type(%d) pgd(0x%lx) addr(0x%lx)\n",
				itype, pgtable_base, fault_addr);
	}

	cmd = fimg2d_get_command(ctrl);
	if (WARN_ON(!cmd))
		goto next;

	if (cmd->ctx->mm->pgd != phys_to_virt(pgtable_base)) {
		fimg2d_err("pgtable base invalid\n");
		goto next;
	}

	fimg2d_dump_command(cmd);

next:
	ctrl->dump(ctrl);

	BUG();
	return 0;
}
Example #3
0
static int fimg2d_check_address_range(unsigned long addr, size_t size)
{
	struct vm_area_struct *vma;
	struct vm_area_struct *nvma;
	int ret = 0;

	if (addr + size <= addr) {
		fimg2d_err("address overflow. addr:0x%lx, size:%d\n",
				addr, size);
		return -EINVAL;
	}

	down_read(&current->mm->mmap_sem);
	vma = find_vma(current->mm, addr);

	if (vma == NULL) {
		fimg2d_err("vma is NULL\n");
		ret = -EINVAL;
	} else {
		nvma = vma->vm_next;

		while ((vma->vm_end < (addr + size)) &&
				(vma != NULL) && (nvma != NULL) &&
				(vma->vm_end == nvma->vm_start)) {
			vma = vma->vm_next;
			nvma = nvma->vm_next;
		}

		if (vma->vm_end < (addr + size)) {
			fimg2d_err("addr : %#lx, size : %#x - out of vma[%#lx, %#lx] range\n",
					addr, size, vma->vm_start, vma->vm_end);
			ret =  -EFAULT;
		}
	}

	up_read(&current->mm->mmap_sem);

	if (!ret) {
		/*
		 * Invoking COW gainst the first and last page if they can be
		 * accessed by CPU and G2D concurrently.
		 * checking the return value of get_user_pages_fast() is not
		 * required because the calls to get_user_pages_fast() are to
		 * invoke COW so that COW is not invoked against the first and
		 * the last pages while G2D is working.
		 */
		if (!IS_ALIGNED(addr, PAGE_SIZE))
			get_user_pages_fast(addr, 1, 1, NULL);

		if (!IS_ALIGNED(addr + size, PAGE_SIZE))
			get_user_pages_fast(addr + size, 1, 1, NULL);
	}

	return ret;
}
static int fimg2d_open(struct inode *inode, struct file *file)
{
	struct fimg2d_context *ctx;
	unsigned long flags, count;

	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
	if (!ctx) {
		fimg2d_err("not enough memory for ctx\n");
		return -ENOMEM;
	}
	file->private_data = (void *)ctx;

	g2d_spin_lock(&ctrl->bltlock, flags);
	fimg2d_add_context(ctrl, ctx);
	count = atomic_read(&ctrl->nctx);
	g2d_spin_unlock(&ctrl->bltlock, flags);

	if (count == 1)
		fimg2d_pm_qos_update(ctrl, FIMG2D_QOS_ON);
	else {
#ifdef CONFIG_FIMG2D_USE_BUS_DEVFREQ
		fimg2d_debug("count:%ld, fimg2d_pm_qos_update(ON,mif,int) is already called\n", count);
#endif
#ifdef CONFIG_ARM_EXYNOS_IKS_CPUFREQ
		fimg2d_debug("count:%ld, fimg2d_pm_qos_update(ON,cpu) is already called\n", count);
#endif
	}
	return 0;
}
static void fimg2d4x_pre_bitblt(struct fimg2d_control *ctrl,
		struct fimg2d_bltcmd *cmd)
{
	switch (ctrl->pdata->ip_ver) {
	case IP_VER_G2D_5AR2:
	/* disable cci path */
		g2d_cci_snoop_control(ctrl->pdata->ip_ver,
			       NON_SHAREABLE_PATH, SHARED_G2D_SEL);
		break;

	case IP_VER_G2D_5H:
	case IP_VER_G2D_5HP:
#ifndef CCI_SNOOP
		/* disable cci path */
		g2d_cci_snoop_control(ctrl->pdata->ip_ver,
				NON_SHAREABLE_PATH, SHARED_FROM_SYSMMU);
		fimg2d_debug("disable cci\n");
#endif
#ifdef CCI_SNOOP
		/* enable cci path */
		g2d_cci_snoop_control(ctrl->pdata->ip_ver,
				SHAREABLE_PATH, SHARED_G2D_SEL);
		fimg2d_debug("enable cci\n");
#endif
		break;

	default:
		fimg2d_err("g2d_cci_snoop_control is not called\n");
		break;
	}
}
Example #6
0
static int fimg2d_open(struct inode *inode, struct file *file)
{
	struct fimg2d_context *ctx;
	unsigned long flags, count;

	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
	if (!ctx) {
		fimg2d_err("not enough memory for ctx\n");
		return -ENOMEM;
	}
	file->private_data = (void *)ctx;

	g2d_spin_lock(&ctrl->bltlock, flags);
	fimg2d_add_context(ctrl, ctx);
	count = atomic_read(&ctrl->nctx);
	g2d_spin_unlock(&ctrl->bltlock, flags);

#ifdef CONFIG_ARM_EXYNOS_IKS_CPUFREQ
	if (count == 1) {
		/* mif lock : 800MHz */
		pm_qos_update_request(&exynos5_g2d_mif_qos, 800000);
		pm_qos_update_request(&exynos5_g2d_cpu_qos, 400000);
	} else
		fimg2d_debug("count:%ld, pm_qos_update_request() is already called\n", count);
#endif
	return 0;
}
Example #7
0
static int fimg2d_open(struct inode *inode, struct file *file)
{
	struct fimg2d_context *ctx;
	unsigned long flags, qflags, count;

	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
	if (!ctx) {
		fimg2d_err("not enough memory for ctx\n");
		return -ENOMEM;
	}
	file->private_data = (void *)ctx;

	g2d_spin_lock(&ctrl->bltlock, flags);
	fimg2d_add_context(ctrl, ctx);
	count = atomic_read(&ctrl->nctx);
	g2d_spin_unlock(&ctrl->bltlock, flags);

	if (count == 1) {
		g2d_spin_lock(&ctrl->qoslock, qflags);
		ctrl->pre_qos_lv = G2D_LV3;
		ctrl->qos_lv = G2D_LV2;
		g2d_spin_unlock(&ctrl->qoslock, qflags);
		fimg2d_pm_qos_update(ctrl, FIMG2D_QOS_ON);
	} else {
		fimg2d_debug("count:%ld, fimg2d_pm_pos_update is "
						"already called\n", count);
	}
	return 0;
}
Example #8
0
static int fimg2d_context_wait(struct fimg2d_context *ctx)
{
	int ret;

	ret = wait_event_timeout(ctx->wait_q, !atomic_read(&ctx->ncmd),
			CTX_TIMEOUT);
	if (!ret) {
		fimg2d_err("ctx %p wait timeout\n", ctx);
		return -ETIME;
	}

	if (ctx->state == CTX_ERROR) {
		ctx->state = CTX_READY;
		fimg2d_err("ctx %p error before blit\n", ctx);
		return -EINVAL;
	}

	return 0;
}
Example #9
0
static int fimg2d_sysmmu_fault_handler(struct iommu_domain *domain,
		struct device *dev, unsigned long iova, int flags, void *token)
{
	struct fimg2d_bltcmd *cmd;

	cmd = fimg2d_get_command(ctrl);
	if (!cmd) {
		fimg2d_err("no available command\n");
		goto done;
	}

	fimg2d_debug_command(cmd);

	if (atomic_read(&ctrl->busy)) {
		fimg2d_err("dumping g2d registers..\n");
		ctrl->dump(ctrl);
	}
done:
	return 0;
}
Example #10
0
int fimg2d_clk_setup(struct fimg2d_control *ctrl)
{
	struct fimg2d_platdata *pdata;
	struct clk *parent, *sclk;
	int ret = 0;

	sclk = parent = NULL;
	pdata = to_fimg2d_plat(ctrl->dev);

	if (ip_is_g2d_5g() || ip_is_g2d_5a()) {
		fimg2d_info("aclk_acp(%lu) pclk_acp(%lu)\n",
				clk_get_rate(clk_get(NULL, "aclk_acp")),
				clk_get_rate(clk_get(NULL, "pclk_acp")));
	} else {
		sclk = clk_get(ctrl->dev, pdata->clkname);
		if (IS_ERR(sclk)) {
			fimg2d_err("failed to get fimg2d clk\n");
			ret = -ENOENT;
			goto err_clk1;
		}
		fimg2d_info("fimg2d clk name: %s clkrate: %ld\n",
				pdata->clkname, clk_get_rate(sclk));
	}
	/* clock for gating */
	ctrl->clock = clk_get(ctrl->dev, pdata->gate_clkname);
	if (IS_ERR(ctrl->clock)) {
		fimg2d_err("failed to get gate clk\n");
		ret = -ENOENT;
		goto err_clk2;
	}
	fimg2d_info("gate clk: %s\n", pdata->gate_clkname);

	return ret;

err_clk2:
	if (sclk)
		clk_put(sclk);

err_clk1:
	return ret;
}
Example #11
0
static int fimg2d_runtime_resume(struct device *dev)
{
	int ret = 0;

	if (ip_is_g2d_5r()) {
		ret = fimg2d_clk_set_gate(ctrl);
		if (ret) {
			fimg2d_err("failed to fimg2d_clk_set_gate()\n");
			ret = -ENOENT;
		}
	} else if (ip_is_g2d_5h() || ip_is_g2d_5hp()) {
		ret = exynos5430_fimg2d_clk_set(ctrl);
		if (ret) {
			fimg2d_err("failed to exynos5430_fimg2d_clk_set()\n");
			ret = -ENOENT;
		}
	}

	fimg2d_debug("runtime resume... done\n");
	return ret;
}
Example #12
0
static int fimg2d_context_wait(struct fimg2d_context *ctx)
{
	int ret;

	ret = wait_event_timeout(ctx->wait_q, !atomic_read(&ctx->ncmd),
			CTX_TIMEOUT);
	if (!ret) {
		fimg2d_err("ctx %p wait timeout\n", ctx);
		return -ETIME;
	}
	return 0;
}
Example #13
0
static int fimg2d4x_blit_wait(struct fimg2d_control *ctrl,
		struct fimg2d_bltcmd *cmd)
{
	int ret;

	ret = wait_event_timeout(ctrl->wait_q, !atomic_read(&ctrl->busy),
			BLIT_TIMEOUT);
	if (!ret) {
		fimg2d4x_disable_irq(ctrl);

		fimg2d_err("blit wait timeout\n");
		if (!fimg2d4x_blit_done_status(ctrl))
			fimg2d_err("blit not finished\n");

		fimg2d_dump_command(cmd);
		fimg2d4x_reset(ctrl);

		return -1;
	}
	return 0;
}
Example #14
0
int fimg2d_clk_set_gate(struct fimg2d_control *ctrl)
{
	/* CPLL:666MHz */
	struct clk *aclk_333_g2d_sw, *aclk_333_g2d;
	struct fimg2d_platdata *pdata;
	int ret = 0;

	pdata = to_fimg2d_plat(ctrl->dev);

	aclk_333_g2d_sw = clk_get(NULL, "aclk_333_g2d_sw");
	if (IS_ERR(aclk_333_g2d_sw)) {
		pr_err("failed to get %s clock\n", "aclk_333_g2d_sw");
		ret = PTR_ERR(aclk_333_g2d_sw);
		goto err_g2d_dout;
	}

	aclk_333_g2d = clk_get(NULL, "aclk_333_g2d"); /* sclk_fimg2d */
	if (IS_ERR(aclk_333_g2d)) {
		pr_err("failed to get %s clock\n", "aclk_333_g2d");
		ret = PTR_ERR(aclk_333_g2d);
		goto err_g2d_sw;
	}

	if (clk_set_parent(aclk_333_g2d, aclk_333_g2d_sw))
		pr_err("Unable to set parent %s of clock %s\n",
			aclk_333_g2d_sw->name, aclk_333_g2d->name);


	/* clock for gating */
	ctrl->clock = clk_get(ctrl->dev, pdata->gate_clkname);
	if (IS_ERR(ctrl->clock)) {
		fimg2d_err("failed to get gate clk\n");
		ret = -ENOENT;
		goto err_aclk_g2d;
	}
	fimg2d_debug("gate clk: %s\n", pdata->gate_clkname);

err_aclk_g2d:
	if (aclk_333_g2d)
		clk_put(aclk_333_g2d);
err_g2d_sw:
	if (aclk_333_g2d_sw)
		clk_put(aclk_333_g2d_sw);
err_g2d_dout:

	return ret;
}
Example #15
0
static int fimg2d_resume(struct device *dev)
{
	unsigned long flags;
	int ret = 0;

	g2d_spin_lock(&ctrl->bltlock, flags);
	atomic_set(&ctrl->suspended, 0);
	g2d_spin_unlock(&ctrl->bltlock, flags);
	/* G2D clk gating mask */
	if (ip_is_g2d_5ar2()) {
		fimg2d_clk_on(ctrl);
		fimg2d_clk_off(ctrl);
	} else if (ip_is_g2d_5hp()) {
		ret = exynos5430_fimg2d_clk_set(ctrl);
		if (ret) {
			fimg2d_err("failed to exynos5430_fimg2d_clk_set()\n");
			return -ENOENT;
		}
	}
	fimg2d_info("resume... done\n");
	return ret;
}
Example #16
0
static int fimg2d_probe(struct platform_device *pdev)
{
	int ret = 0;
	struct resource *res;

	if (!to_fimg2d_plat(&pdev->dev)) {
		fimg2d_err("failed to get platform data\n");
		return -ENOMEM;
	}

	/* global structure */
	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
	if (!ctrl) {
		fimg2d_err("failed to allocate memory for controller\n");
		return -ENOMEM;
	}

	/* setup global ctrl */
	ret = fimg2d_setup_controller(ctrl);
	if (ret) {
		fimg2d_err("failed to setup controller\n");
		goto drv_free;
	}
	ctrl->dev = &pdev->dev;

	/* memory region */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		fimg2d_err("failed to get resource\n");
		ret = -ENOENT;
		goto drv_free;
	}

	ctrl->mem = request_mem_region(res->start, resource_size(res),
					pdev->name);
	if (!ctrl->mem) {
		fimg2d_err("failed to request memory region\n");
		ret = -ENOMEM;
		goto res_free;
	}

	/* ioremap */
	ctrl->regs = ioremap(res->start, resource_size(res));
	if (!ctrl->regs) {
		fimg2d_err("failed to ioremap for SFR\n");
		ret = -ENOENT;
		goto mem_free;
	}
	fimg2d_info("base address: 0x%lx\n", (unsigned long)res->start);

	/* irq */
	ctrl->irq = platform_get_irq(pdev, 0);
	if (!ctrl->irq) {
		fimg2d_err("failed to get irq resource\n");
		ret = -ENOENT;
		goto reg_unmap;
	}
	fimg2d_info("irq: %d\n", ctrl->irq);

	ret = request_irq(ctrl->irq, fimg2d_irq, IRQF_DISABLED,
			pdev->name, ctrl);
	if (ret) {
		fimg2d_err("failed to request irq\n");
		ret = -ENOENT;
		goto reg_unmap;
	}

	ret = fimg2d_clk_setup(ctrl);
	if (ret) {
		fimg2d_err("failed to setup clk\n");
		ret = -ENOENT;
		goto irq_free;
	}

#ifdef CONFIG_PM_RUNTIME
	pm_runtime_enable(ctrl->dev);
	fimg2d_info("enable runtime pm\n");
	pm_runtime_get_sync(ctrl->dev);
#else
	fimg2d_clk_on(ctrl);
#endif

	exynos_sysmmu_set_fault_handler(ctrl->dev, fimg2d_sysmmu_fault_handler);
	fimg2d_info("register sysmmu page fault handler\n");

	/* misc register */
	ret = misc_register(&fimg2d_dev);
	if (ret) {
		fimg2d_err("failed to register misc driver\n");
		goto clk_release;
	}

#ifdef CONFIG_ARM_EXYNOS_IKS_CPUFREQ
	pm_qos_add_request(&exynos5_g2d_cpu_qos,
			PM_QOS_CPU_FREQ_MIN, 0);
	pm_qos_add_request(&exynos5_g2d_mif_qos,
			PM_QOS_BUS_THROUGHPUT, 0);
#endif

	return 0;

clk_release:
#ifdef CONFIG_PM_RUNTIME
	pm_runtime_disable(ctrl->dev);
#else
	fimg2d_clk_off(ctrl);
#endif
	fimg2d_clk_release(ctrl);

irq_free:
	free_irq(ctrl->irq, NULL);
reg_unmap:
	iounmap(ctrl->regs);
mem_free:
	kfree(ctrl->mem);
res_free:
	release_resource(ctrl->mem);
drv_free:
#ifdef BLIT_WORKQUE
	if (ctrl->work_q)
		destroy_workqueue(ctrl->work_q);
#endif
	mutex_destroy(&ctrl->drvlock);
	kfree(ctrl);

	return ret;
}
Example #17
0
static long fimg2d_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	int ret = 0;
	struct fimg2d_context *ctx;
	struct mm_struct *mm;

	ctx = file->private_data;

	switch (cmd) {
	case FIMG2D_BITBLT_BLIT:

		mm = get_task_mm(current);
		if(!mm) {
			fimg2d_err("no mm for ctx\n");
			return -ENXIO;
		}

		fimg2d_clk_on(ctrl);

		g2d_lock(&ctrl->drvlock);
		ctx->mm = mm;

		if (atomic_read(&ctrl->drvact) ||
				atomic_read(&ctrl->suspended)) {
			fimg2d_err("driver is unavailable, do sw fallback\n");
			g2d_unlock(&ctrl->drvlock);
			fimg2d_clk_off(ctrl);
			mmput(mm);
			return -EPERM;
		}

		ret = fimg2d_add_command(ctrl, ctx, (struct fimg2d_blit __user *)arg);
		if (ret) {
			fimg2d_err("add command not allowed.\n");
			g2d_unlock(&ctrl->drvlock);
			fimg2d_clk_off(ctrl);
			mmput(mm);
			return ret;
		}

		ret = fimg2d_request_bitblt(ctrl, ctx);
		if (ret) {
			fimg2d_err("request bitblit not allowed.\n");
			g2d_unlock(&ctrl->drvlock);
			fimg2d_clk_off(ctrl);
			mmput(mm);
			return -EBUSY;
		}
		g2d_unlock(&ctrl->drvlock);
		fimg2d_clk_off(ctrl);
		mmput(mm);
		break;

	case FIMG2D_BITBLT_VERSION:
	{
		struct fimg2d_version ver;
		struct fimg2d_platdata *pdata;

		pdata = to_fimg2d_plat(ctrl->dev);
		ver.hw = pdata->hw_ver;
		ver.sw = 0;
		fimg2d_info("version info. hw(0x%x), sw(0x%x)\n",
				ver.hw, ver.sw);
		if (copy_to_user((void *)arg, &ver, sizeof(ver)))
			return -EFAULT;
		break;
	}
	case FIMG2D_BITBLT_ACTIVATE:
	{
		enum driver_act act;

		if (copy_from_user(&act, (void *)arg, sizeof(act)))
			return -EFAULT;

		g2d_lock(&ctrl->drvlock);
		atomic_set(&ctrl->drvact, act);
		if (act == DRV_ACT)
			fimg2d_info("fimg2d driver is activated\n");
		else
			fimg2d_info("fimg2d driver is deactivated\n");
		g2d_unlock(&ctrl->drvlock);
		break;
	}
	default:
		fimg2d_err("unknown ioctl\n");
		ret = -EFAULT;
		break;
	}

	return ret;
}
Example #18
0
int fimg2d_add_command(struct fimg2d_control *ctrl,
		struct fimg2d_context *ctx, struct fimg2d_blit __user *buf)
{
	unsigned long flags, qflags;
	struct fimg2d_blit *blt;
	struct fimg2d_bltcmd *cmd;
	int len = sizeof(struct fimg2d_image);
	int ret = 0;

	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
	if (!cmd)
		return -ENOMEM;

	if (copy_from_user(&cmd->blt, buf, sizeof(cmd->blt))) {
		ret = -EFAULT;
		goto err;
	}

	cmd->ctx = ctx;

	blt = &cmd->blt;

	if (blt->src) {
		if (copy_from_user(&cmd->image[ISRC], blt->src, len)) {
			ret = -EFAULT;
			goto err;
		}
		blt->src = &cmd->image[ISRC];
	}

	if (blt->msk) {
		if (copy_from_user(&cmd->image[IMSK], blt->msk, len)) {
			ret = -EFAULT;
			goto err;
		}
		blt->msk = &cmd->image[IMSK];
	}

	if (blt->tmp) {
		if (copy_from_user(&cmd->image[ITMP], blt->tmp, len)) {
			ret = -EFAULT;
			goto err;
		}
		blt->tmp = &cmd->image[ITMP];
	}

	if (blt->dst) {
		if (copy_from_user(&cmd->image[IDST], blt->dst, len)) {
			ret = -EFAULT;
			goto err;
		}
		blt->dst = &cmd->image[IDST];
	}

	fimg2d_dump_command(cmd);

	perf_start(cmd, PERF_TOTAL);

	if (fimg2d_check_params(cmd)) {
		ret = -EINVAL;
		goto err;
	}

	fimg2d_fixup_params(cmd);

	if (fimg2d_check_dma_sync(cmd)) {
		ret = -EFAULT;
		goto err;
	}

	g2d_spin_lock(&ctrl->qoslock, qflags);
	if ((blt->qos_lv >= G2D_LV0) && (blt->qos_lv < G2D_LV_END)) {
		ctrl->pre_qos_lv = ctrl->qos_lv;
		ctrl->qos_lv = blt->qos_lv;
		fimg2d_debug("pre_qos_lv:%d, qos_lv:%d qos_id:%d\n",
				ctrl->pre_qos_lv, ctrl->qos_lv, blt->qos_lv);
	} else {
		fimg2d_err("invalid qos_lv:0x%x\n", blt->qos_lv);
		g2d_spin_unlock(&ctrl->qoslock, qflags);
		ret = -EINVAL;
		goto err;
	}
	g2d_spin_unlock(&ctrl->qoslock, qflags);

	/* add command node and increase ncmd */
	g2d_spin_lock(&ctrl->bltlock, flags);
	if (atomic_read(&ctrl->drvact) || atomic_read(&ctrl->suspended)) {
		fimg2d_debug("driver is unavailable, do sw fallback\n");
		g2d_spin_unlock(&ctrl->bltlock, flags);
		ret = -EPERM;
		goto err;
	}
	atomic_inc(&ctx->ncmd);
	fimg2d_enqueue(&cmd->node, &ctrl->cmd_q);
	fimg2d_debug("ctx %p pgd %p ncmd(%d) seq_no(%u)\n",
			cmd->ctx, (unsigned long *)cmd->ctx->mm->pgd,
			atomic_read(&ctx->ncmd), cmd->blt.seq_no);
	g2d_spin_unlock(&ctrl->bltlock, flags);

	return 0;

err:
	kfree(cmd);
	return ret;
}
Example #19
0
static int fimg2d_probe(struct platform_device *pdev)
{
	int ret = 0;
	struct resource *res;
	struct fimg2d_platdata *pdata;
#ifdef CONFIG_OF
	struct device *dev = &pdev->dev;
	int id = 0;
#else
	pdata = to_fimg2d_plat(&pdev->dev);
#endif

	dev_info(&pdev->dev, "++%s\n", __func__);

#ifdef CONFIG_OF
	if (dev->of_node) {
		id = of_alias_get_id(pdev->dev.of_node, "fimg2d");
	} else {
		id = pdev->id;
		pdata = dev->platform_data;
		if (!pdata) {
			dev_err(&pdev->dev, "no platform data\n");
			return -EINVAL;
		}
	}
#else
	if (!to_fimg2d_plat(&pdev->dev)) {
		fimg2d_err("failed to get platform data\n");
		return -ENOMEM;
	}
#endif
	/* global structure */
	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
	if (!ctrl) {
		fimg2d_err("failed to allocate memory for controller\n");
		return -ENOMEM;
	}

#ifdef CONFIG_OF
	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
	if (!pdata) {
		fimg2d_err("failed to allocate memory for controller\n");
		kfree(ctrl);
		return -ENOMEM;
	}
	ctrl->pdata = pdata;
	g2d_parse_dt(dev->of_node, ctrl->pdata);
#endif

	/* setup global ctrl */
	ret = fimg2d_setup_controller(ctrl);
	if (ret) {
		fimg2d_err("failed to setup controller\n");
		goto drv_free;
	}
	ctrl->dev = &pdev->dev;

	/* memory region */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		fimg2d_err("failed to get resource\n");
		ret = -ENOENT;
		goto drv_free;
	}

	ctrl->mem = request_mem_region(res->start, resource_size(res),
					pdev->name);
	if (!ctrl->mem) {
		fimg2d_err("failed to request memory region\n");
		ret = -ENOMEM;
		goto drv_free;
	}

	/* ioremap */
	ctrl->regs = ioremap(res->start, resource_size(res));
	if (!ctrl->regs) {
		fimg2d_err("failed to ioremap for SFR\n");
		ret = -ENOENT;
		goto mem_free;
	}
	fimg2d_debug("base address: 0x%lx\n", (unsigned long)res->start);

	/* irq */
	ctrl->irq = platform_get_irq(pdev, 0);
	if (!ctrl->irq) {
		fimg2d_err("failed to get irq resource\n");
		ret = -ENOENT;
		goto reg_unmap;
	}
	fimg2d_debug("irq: %d\n", ctrl->irq);

	ret = request_irq(ctrl->irq, fimg2d_irq, IRQF_DISABLED,
			pdev->name, ctrl);
	if (ret) {
		fimg2d_err("failed to request irq\n");
		ret = -ENOENT;
		goto reg_unmap;
	}

	ret = fimg2d_clk_setup(ctrl);
	if (ret) {
		fimg2d_err("failed to setup clk\n");
		ret = -ENOENT;
		goto irq_free;
	}

	spin_lock_init(&ctrl->qoslock);

#ifdef CONFIG_PM_RUNTIME
	pm_runtime_enable(ctrl->dev);
	fimg2d_info("enable runtime pm\n");
#else
	fimg2d_clk_on(ctrl);
#endif

#ifdef FIMG2D_IOVMM_PAGETABLE
	exynos_create_iovmm(dev, 3, 3);
#endif
	iovmm_set_fault_handler(dev, fimg2d_sysmmu_fault_handler, ctrl);

	fimg2d_debug("register sysmmu page fault handler\n");

	/* misc register */
	ret = misc_register(&fimg2d_dev);
	if (ret) {
		fimg2d_err("failed to register misc driver\n");
		goto clk_release;
	}

	fimg2d_pm_qos_add(ctrl);

	dev_info(&pdev->dev, "fimg2d registered successfully\n");

	return 0;

clk_release:
#ifdef CONFIG_PM_RUNTIME
	pm_runtime_disable(ctrl->dev);
#else
	fimg2d_clk_off(ctrl);
#endif
	fimg2d_clk_release(ctrl);

irq_free:
	free_irq(ctrl->irq, NULL);
reg_unmap:
	iounmap(ctrl->regs);
mem_free:
	release_mem_region(res->start, resource_size(res));
drv_free:
#ifdef BLIT_WORKQUE
	if (ctrl->work_q)
		destroy_workqueue(ctrl->work_q);
#endif
	mutex_destroy(&ctrl->drvlock);
#ifdef CONFIG_OF
	kfree(pdata);
#endif
	kfree(ctrl);

	return ret;
}
int fimg2d4x_bitblt(struct fimg2d_control *ctrl)
{
	int ret = 0;
	enum addr_space addr_type;
	struct fimg2d_context *ctx;
	struct fimg2d_bltcmd *cmd;
	unsigned long *pgd;

	fimg2d_debug("%s : enter blitter\n", __func__);

	while (1) {
		cmd = fimg2d_get_command(ctrl);
		if (!cmd)
			break;

		ctx = cmd->ctx;
		ctx->state = CTX_READY;

#ifdef CONFIG_PM_RUNTIME
		if (fimg2d4x_get_clk_cnt(ctrl->clock) == false)
			fimg2d_err("2D clock is not set\n");
#endif

		atomic_set(&ctrl->busy, 1);
		perf_start(cmd, PERF_SFR);
		ret = ctrl->configure(ctrl, cmd);
		perf_end(cmd, PERF_SFR);
		if (IS_ERR_VALUE(ret)) {
			fimg2d_err("failed to configure\n");
			ctx->state = CTX_ERROR;
			goto fail_n_del;
		}

		addr_type = cmd->image[IDST].addr.type;

		ctx->vma_lock = vma_lock_mapping(ctx->mm, prefbuf, MAX_IMAGES - 1);

		if (fimg2d_check_pgd(ctx->mm, cmd)) {
			ret = -EFAULT;
			goto fail_n_del;
		}

		if (addr_type == ADDR_USER || addr_type == ADDR_USER_CONTIG) {
			if (!ctx->mm || !ctx->mm->pgd) {
				atomic_set(&ctrl->busy, 0);
				fimg2d_err("ctx->mm:0x%p or ctx->mm->pgd:0x%p\n",
					       ctx->mm,
					       (ctx->mm) ? ctx->mm->pgd : NULL);
				ret = -EPERM;
				goto fail_n_del;
			}
			pgd = (unsigned long *)ctx->mm->pgd;
#ifdef CONFIG_EXYNOS7_IOMMU
			if (iovmm_activate(ctrl->dev)) {
				fimg2d_err("failed to iovmm activate\n");
				ret = -EPERM;
				goto fail_n_del;
			}
#else
			if (exynos_sysmmu_enable(ctrl->dev,
					(unsigned long)virt_to_phys(pgd))) {
				fimg2d_err("failed to sysmme enable\n");
				ret = -EPERM;
				goto fail_n_del;
			}
#endif
			fimg2d_debug("%s : sysmmu enable: pgd %p ctx %p seq_no(%u)\n",
				__func__, pgd, ctx, cmd->blt.seq_no);

			//exynos_sysmmu_set_pbuf(ctrl->dev, nbufs, prefbuf);
			fimg2d_debug("%s : set smmu prefbuf\n", __func__);
		}

		fimg2d4x_pre_bitblt(ctrl, cmd);

		perf_start(cmd, PERF_BLIT);
		/* start blit */
		fimg2d_debug("%s : start blit\n", __func__);
		ctrl->run(ctrl);
		ret = fimg2d4x_blit_wait(ctrl, cmd);
		perf_end(cmd, PERF_BLIT);

		perf_start(cmd, PERF_UNMAP);
		if (addr_type == ADDR_USER || addr_type == ADDR_USER_CONTIG) {
#ifdef CONFIG_EXYNOS7_IOMMU
			iovmm_deactivate(ctrl->dev);

			if (cmd->dma[ISRC].base.size > 0) {
				exynos_sysmmu_unmap_user_pages(ctrl->dev,
					ctx->mm, cmd->dma[ISRC].base.addr,
					cmd->dma[ISRC].base.size);
			}

			if (cmd->dma[ISRC].plane2.size > 0) {
				exynos_sysmmu_unmap_user_pages(ctrl->dev,
					ctx->mm, cmd->dma[ISRC].plane2.addr,
					cmd->dma[ISRC].plane2.size);
			}

			if (cmd->dma[IMSK].base.size > 0) {
				exynos_sysmmu_unmap_user_pages(ctrl->dev,
					ctx->mm, cmd->dma[IMSK].base.addr,
					cmd->dma[IMSK].base.size);
			}

			if (cmd->dma[IDST].base.size > 0) {
				exynos_sysmmu_unmap_user_pages(ctrl->dev,
					ctx->mm, cmd->dma[IDST].base.addr,
					cmd->dma[IDST].base.size);
			}

			if (cmd->dma[IDST].plane2.size > 0) {
				exynos_sysmmu_unmap_user_pages(ctrl->dev,
					ctx->mm, cmd->dma[IDST].plane2.addr,
					cmd->dma[IDST].plane2.size);
			}
#else
			exynos_sysmmu_disable(ctrl->dev);
#endif
			fimg2d_debug("sysmmu disable\n");
		}
		perf_end(cmd, PERF_UNMAP);
fail_n_del:
		vma_unlock_mapping(ctx->vma_lock);
		fimg2d_del_command(ctrl, cmd);
	}

	fimg2d_debug("%s : exit blitter\n", __func__);

	return ret;
}
Example #21
0
static long compat_fimg2d_ioctl32(struct file *file, unsigned int cmd,
							unsigned long arg)
{
	switch (cmd) {
	case COMPAT_FIMG2D_BITBLT_BLIT:
	{
		struct compat_fimg2d_blit __user *data32;
		struct fimg2d_blit __user *data;
		struct mm_struct *mm;
		enum blit_op op;
		enum blit_sync sync;
		enum fimg2d_qos_level qos_lv;
		compat_uint_t seq_no;
		unsigned long stack_cursor = 0;
		int err;

		mm = get_task_mm(current);
		if (!mm) {
			fimg2d_err("no mm for ctx\n");
			return -ENXIO;
		}

		data32 = compat_ptr(arg);
		data = compat_alloc_user_space(sizeof(*data));
		if (!data) {
			fimg2d_err("failed to allocate user compat space\n");
			mmput(mm);
			return -ENOMEM;
		}

		stack_cursor += sizeof(*data);
		memset(data, 0, sizeof(*data));

		err = get_user(op, &data32->op);
		err |= put_user(op, &data->op);
		if (err) {
			fimg2d_err("failed to get compat data\n");
			mmput(mm);
			return err;
		}

		err = compat_get_fimg2d_param(&data->param, &data32->param);
		if (err) {
			fimg2d_err("failed to get compat data\n");
			mmput(mm);
			return err;
		}

		if (data32->src) {
			data->src = compat_alloc_user_space(sizeof(*data->src) +
								stack_cursor);
			if (!data->src) {
				fimg2d_err("failed to allocate user compat space\n");
				mmput(mm);
				return -ENOMEM;
			}

			stack_cursor += sizeof(*data->src);
			err = compat_get_fimg2d_image(data->src, data32->src);
			if (err) {
				fimg2d_err("failed to get compat data\n");
				mmput(mm);
				return err;
			}
		}

		if (data32->msk) {
			data->msk = compat_alloc_user_space(sizeof(*data->msk) +
								stack_cursor);
			if (!data->msk) {
				fimg2d_err("failed to allocate user compat space\n");
				mmput(mm);
				return -ENOMEM;
			}

			stack_cursor += sizeof(*data->msk);
			err = compat_get_fimg2d_image(data->msk, data32->msk);
			if (err) {
				fimg2d_err("failed to get compat data\n");
				mmput(mm);
				return err;
			}
		}

		if (data32->tmp) {
			data->tmp = compat_alloc_user_space(sizeof(*data->tmp) +
								stack_cursor);
			if (!data->tmp) {
				fimg2d_err("failed to allocate user compat space\n");
				mmput(mm);
				return -ENOMEM;
			}

			stack_cursor += sizeof(*data->tmp);
			err = compat_get_fimg2d_image(data->tmp, data32->tmp);
			if (err) {
				fimg2d_err("failed to get compat data\n");
				mmput(mm);
				return err;
			}
		}

		if (data32->dst) {
			data->dst = compat_alloc_user_space(sizeof(*data->dst) +
								stack_cursor);
			if (!data->dst) {
				fimg2d_err("failed to allocate user compat space\n");
				mmput(mm);
				return -ENOMEM;
			}

			stack_cursor += sizeof(*data->dst);
			err = compat_get_fimg2d_image(data->dst, data32->dst);
			if (err) {
				fimg2d_err("failed to get compat data\n");
				mmput(mm);
				return err;
			}
		}

		err = get_user(sync, &data32->sync);
		err |= put_user(sync, &data->sync);
		if (err) {
			fimg2d_err("failed to get compat data\n");
			mmput(mm);
			return err;
		}

		err = get_user(seq_no, &data32->seq_no);
		err |= put_user(seq_no, &data->seq_no);
		if (err) {
			fimg2d_err("failed to get compat data\n");
			mmput(mm);
			return err;
		}

		err = get_user(qos_lv, &data32->qos_lv);
		err |= put_user(qos_lv, &data->qos_lv);
		if (err) {
			fimg2d_err("failed to get compat data\n");
			mmput(mm);
			return err;
		}

		err = file->f_op->unlocked_ioctl(file,
				FIMG2D_BITBLT_BLIT, (unsigned long)data);
		mmput(mm);
		return err;
	}
	case COMPAT_FIMG2D_BITBLT_VERSION:
	{
		struct compat_fimg2d_version __user *data32;
		struct fimg2d_version __user *data;
		compat_uint_t i;
		int err;

		data32 = compat_ptr(arg);
		data = compat_alloc_user_space(sizeof(*data));
		if (!data) {
			fimg2d_err("failed to allocate user compat space\n");
			return -ENOMEM;
		}

		err = get_user(i, &data32->hw);
		err |= put_user(i, &data->hw);
		err |= get_user(i, &data32->sw);
		err |= put_user(i, &data->sw);

		if (err)
			return err;

		return file->f_op->unlocked_ioctl(file,
				FIMG2D_BITBLT_VERSION, (unsigned long)data);
	}
	case FIMG2D_BITBLT_ACTIVATE:
	{
		return file->f_op->unlocked_ioctl(file,
				FIMG2D_BITBLT_ACTIVATE, arg);
	}
	default:
		fimg2d_err("unknown ioctl\n");
		return -EINVAL;
	}
}
Example #22
0
static long fimg2d_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	int ret = 0;
	struct fimg2d_context *ctx;
	struct mm_struct *mm;
	struct fimg2d_dma *usr_dst;

	ctx = file->private_data;

	switch (cmd) {
	case FIMG2D_BITBLT_BLIT:

		mm = get_task_mm(current);
		if (!mm) {
			fimg2d_err("no mm for ctx\n");
			return -ENXIO;
		}

		g2d_lock(&ctrl->drvlock);
		ctx->mm = mm;

		if (atomic_read(&ctrl->drvact) ||
				atomic_read(&ctrl->suspended)) {
			fimg2d_err("driver is unavailable, do sw fallback\n");
			g2d_unlock(&ctrl->drvlock);
			mmput(mm);
			return -EPERM;
		}

		ret = fimg2d_add_command(ctrl, ctx, (struct fimg2d_blit __user *)arg);
		if (ret) {
			fimg2d_err("add command not allowed.\n");
			g2d_unlock(&ctrl->drvlock);
			mmput(mm);
			return ret;
		}

		fimg2d_pm_qos_update(ctrl, FIMG2D_QOS_ON);

		usr_dst = kzalloc(sizeof(struct fimg2d_dma), GFP_KERNEL);
		if (!usr_dst) {
			fimg2d_err("failed to allocate memory for fimg2d_dma\n");
			g2d_unlock(&ctrl->drvlock);
			mmput(mm);
			return -ENOMEM;
		}

		ret = store_user_dst((struct fimg2d_blit __user *)arg, usr_dst);
		if (ret) {
			fimg2d_err("store_user_dst() not allowed.\n");
			g2d_unlock(&ctrl->drvlock);
			kfree(usr_dst);
			mmput(mm);
			return ret;
		}

		ret = fimg2d_request_bitblt(ctrl, ctx);
		if (ret) {
			fimg2d_info("request bitblit not allowed, "
					"so passing to s/w fallback.\n");
			g2d_unlock(&ctrl->drvlock);
			kfree(usr_dst);
			mmput(mm);
			return -EBUSY;
		}

		g2d_unlock(&ctrl->drvlock);

		fimg2d_debug("addr : %p, size : %zd\n",
				(void *)usr_dst->addr, usr_dst->size);
#ifndef CCI_SNOOP
		fimg2d_dma_unsync_inner(usr_dst->addr,
				usr_dst->size, DMA_FROM_DEVICE);
#endif
		kfree(usr_dst);
		mmput(mm);
		break;

	case FIMG2D_BITBLT_VERSION:
	{
		struct fimg2d_version ver;
		struct fimg2d_platdata *pdata;

#ifdef CONFIG_OF
		pdata = ctrl->pdata;
#else
		pdata = to_fimg2d_plat(ctrl->dev);

#endif
		ver.hw = pdata->hw_ver;
		ver.sw = 0;
		fimg2d_info("version info. hw(0x%x), sw(0x%x)\n",
				ver.hw, ver.sw);
		if (copy_to_user((void *)arg, &ver, sizeof(ver)))
			return -EFAULT;
		break;
	}
	case FIMG2D_BITBLT_ACTIVATE:
	{
		enum driver_act act;

		if (copy_from_user(&act, (void *)arg, sizeof(act)))
			return -EFAULT;

		g2d_lock(&ctrl->drvlock);
		atomic_set(&ctrl->drvact, act);
		if (act == DRV_ACT) {
			fimg2d_power_control(ctrl, FIMG2D_PW_OFF);
			fimg2d_info("fimg2d driver is activated\n");
		} else {
			fimg2d_power_control(ctrl, FIMG2D_PW_ON);
			fimg2d_info("fimg2d driver is deactivated\n");
		}
		g2d_unlock(&ctrl->drvlock);
		break;
	}
	default:
		fimg2d_err("unknown ioctl\n");
		ret = -EFAULT;
		break;
	}

	return ret;
}