static int fimg2d_open(struct inode *inode, struct file *file) { struct fimg2d_context *ctx; unsigned long flags, qflags, count; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) { fimg2d_err("not enough memory for ctx\n"); return -ENOMEM; } file->private_data = (void *)ctx; g2d_spin_lock(&ctrl->bltlock, flags); fimg2d_add_context(ctrl, ctx); count = atomic_read(&ctrl->nctx); g2d_spin_unlock(&ctrl->bltlock, flags); if (count == 1) { g2d_spin_lock(&ctrl->qoslock, qflags); ctrl->pre_qos_lv = G2D_LV3; ctrl->qos_lv = G2D_LV2; g2d_spin_unlock(&ctrl->qoslock, qflags); fimg2d_pm_qos_update(ctrl, FIMG2D_QOS_ON); } else { fimg2d_debug("count:%ld, fimg2d_pm_pos_update is " "already called\n", count); } return 0; }
static int fimg2d_open(struct inode *inode, struct file *file) { struct fimg2d_context *ctx; unsigned long flags, qflags, count; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) { fimg2d_err("not enough memory for ctx\n"); return -ENOMEM; } file->private_data = (void *)ctx; g2d_spin_lock(&ctrl->bltlock, flags); fimg2d_add_context(ctrl, ctx); count = atomic_read(&ctrl->nctx); g2d_spin_unlock(&ctrl->bltlock, flags); if (count == 1) { g2d_spin_lock(&ctrl->qoslock, qflags); ctrl->pre_qos_lv = G2D_LV3; ctrl->qos_lv = G2D_LV2; g2d_spin_unlock(&ctrl->qoslock, qflags); fimg2d_pm_qos_update(ctrl, FIMG2D_QOS_ON); } else { #ifdef CONFIG_FIMG2D_USE_BUS_DEVFREQ fimg2d_debug("count:%ld, fimg2d_pm_qos_update(ON,mif,int) is already called\n", count); #endif #if defined(CONFIG_ARM_EXYNOS_IKS_CPUFREQ) || \ defined(CONFIG_ARM_EXYNOS_MP_CPUFREQ) fimg2d_debug("count:%ld, fimg2d_pm_qos_update(ON,cpu) is already called\n", count); #endif } return 0; }
void fimg2d_pm_qos_update_cpu(struct fimg2d_control *ctrl, enum fimg2d_qos_status status) { enum fimg2d_qos_level idx; unsigned long qflags; g2d_spin_lock(&ctrl->qoslock, qflags); if ((ctrl->qos_lv >= G2D_LV0) && (ctrl->qos_lv < G2D_LV_END)) idx = ctrl->qos_lv; else goto err; g2d_spin_unlock(&ctrl->qoslock, qflags); if (status == FIMG2D_QOS_ON) { if (ctrl->pre_qos_lv != ctrl->qos_lv) { #ifdef CONFIG_SCHED_HMP g2d_spin_lock(&ctrl->qoslock, qflags); if (idx == 0 && !ctrl->boost) { set_hmp_boost(true); ctrl->boost = true; fimg2d_debug("turn on hmp booster\n"); } g2d_spin_unlock(&ctrl->qoslock, qflags); #endif pm_qos_update_request(&ctrl->exynos5_g2d_cluster1_qos, g2d_qos_table[idx].freq_cpu); pm_qos_update_request(&ctrl->exynos5_g2d_cluster0_qos, g2d_qos_table[idx].freq_kfc); fimg2d_debug("idx:%d, freq_cpu:%d, freq_kfc:%d\n", idx, g2d_qos_table[idx].freq_cpu, g2d_qos_table[idx].freq_kfc); } } else if (status == FIMG2D_QOS_OFF) { pm_qos_update_request(&ctrl->exynos5_g2d_cluster1_qos, 0); pm_qos_update_request(&ctrl->exynos5_g2d_cluster0_qos, 0); #ifdef CONFIG_SCHED_HMP g2d_spin_lock(&ctrl->qoslock, qflags); if (ctrl->boost) { set_hmp_boost(false); ctrl->boost = false; fimg2d_debug("turn off hmp booster\n"); } g2d_spin_unlock(&ctrl->qoslock, qflags); #endif } return; err: fimg2d_debug("invalid qos_lv:%d\n", ctrl->qos_lv); }
void fimg2d_pm_qos_update_bus(struct fimg2d_control *ctrl, enum fimg2d_qos_status status) { enum fimg2d_qos_level idx; int ret = 0; unsigned long qflags; g2d_spin_lock(&ctrl->qoslock, qflags); if ((ctrl->qos_lv >= G2D_LV0) && (ctrl->qos_lv < G2D_LV_END)) idx = ctrl->qos_lv; else goto err; g2d_spin_unlock(&ctrl->qoslock, qflags); if (status == FIMG2D_QOS_ON) { if (ctrl->pre_qos_lv != ctrl->qos_lv) { pm_qos_update_request(&ctrl->exynos5_g2d_mif_qos, g2d_qos_table[idx].freq_mif); pm_qos_update_request(&ctrl->exynos5_g2d_int_qos, g2d_qos_table[idx].freq_int); fimg2d_debug("idx:%d, freq_mif:%d, freq_int:%d, ret:%d\n", idx, g2d_qos_table[idx].freq_mif, g2d_qos_table[idx].freq_int, ret); } } else if (status == FIMG2D_QOS_OFF) { pm_qos_update_request(&ctrl->exynos5_g2d_mif_qos, 0); pm_qos_update_request(&ctrl->exynos5_g2d_int_qos, 0); } return; err: fimg2d_debug("invalid qos_lv:%d\n", ctrl->qos_lv); }
static int fimg2d_release(struct inode *inode, struct file *file) { struct fimg2d_context *ctx = file->private_data; int retry = POLL_RETRY; unsigned long flags, count; fimg2d_debug("ctx %p\n", ctx); while (retry--) { if (!atomic_read(&ctx->ncmd)) break; mdelay(POLL_TIMEOUT); } g2d_spin_lock(&ctrl->bltlock, flags); fimg2d_del_context(ctrl, ctx); count = atomic_read(&ctrl->nctx); g2d_spin_unlock(&ctrl->bltlock, flags); if (!count) fimg2d_pm_qos_update(ctrl, FIMG2D_QOS_OFF); else { fimg2d_debug("count:%ld, fimg2d_pm_pos_update is " "not called yet\n", count); } kfree(ctx); return 0; }
static int fimg2d_open(struct inode *inode, struct file *file) { struct fimg2d_context *ctx; unsigned long flags, count; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) { fimg2d_err("not enough memory for ctx\n"); return -ENOMEM; } file->private_data = (void *)ctx; g2d_spin_lock(&ctrl->bltlock, flags); fimg2d_add_context(ctrl, ctx); count = atomic_read(&ctrl->nctx); g2d_spin_unlock(&ctrl->bltlock, flags); #ifdef CONFIG_ARM_EXYNOS_IKS_CPUFREQ if (count == 1) { /* mif lock : 800MHz */ pm_qos_update_request(&exynos5_g2d_mif_qos, 800000); pm_qos_update_request(&exynos5_g2d_cpu_qos, 400000); } else fimg2d_debug("count:%ld, pm_qos_update_request() is already called\n", count); #endif return 0; }
static int fimg2d_release(struct inode *inode, struct file *file) { struct fimg2d_context *ctx = file->private_data; int retry = POLL_RETRY; unsigned long flags, count; fimg2d_debug("ctx %p\n", ctx); while (retry--) { if (!atomic_read(&ctx->ncmd)) break; mdelay(POLL_TIMEOUT); } g2d_spin_lock(&ctrl->bltlock, flags); fimg2d_del_context(ctrl, ctx); count = atomic_read(&ctrl->nctx); g2d_spin_unlock(&ctrl->bltlock, flags); #ifdef CONFIG_ARM_EXYNOS_IKS_CPUFREQ if (!count) { pm_qos_update_request(&exynos5_g2d_mif_qos, 0); pm_qos_update_request(&exynos5_g2d_cpu_qos, 0); } else fimg2d_debug("count:%ld, pm_qos_update_request() is already called\n", count); #endif kfree(ctx); return 0; }
static int fimg2d_release(struct inode *inode, struct file *file) { struct fimg2d_context *ctx = file->private_data; int retry = POLL_RETRY; unsigned long flags, count; fimg2d_debug("ctx %p\n", ctx); while (retry--) { if (!atomic_read(&ctx->ncmd)) break; mdelay(POLL_TIMEOUT); } g2d_spin_lock(&ctrl->bltlock, flags); fimg2d_del_context(ctrl, ctx); count = atomic_read(&ctrl->nctx); g2d_spin_unlock(&ctrl->bltlock, flags); if (!count) fimg2d_pm_qos_update(ctrl, FIMG2D_QOS_OFF); else { #ifdef CONFIG_FIMG2D_USE_BUS_DEVFREQ fimg2d_debug("count:%ld, fimg2d_pm_qos_update(OFF,mif.int) is not called yet\n", count); #endif #ifdef CONFIG_ARM_EXYNOS_IKS_CPUFREQ fimg2d_debug("count:%ld, fimg2d_pm_qos_update(OFF, cpu) is not called yet\n", count); #endif } kfree(ctx); return 0; }
void fimg2d_del_context(struct fimg2d_control *ctrl, struct fimg2d_context *ctx) { unsigned long flags; g2d_spin_lock(&ctrl->bltlock, flags); atomic_dec(&ctrl->nctx); fimg2d_debug("ctx %p nctx(%d)\n", ctx, atomic_read(&ctrl->nctx)); g2d_spin_unlock(&ctrl->bltlock, flags); }
struct fimg2d_bltcmd *fimg2d_get_command(struct fimg2d_control *ctrl) { unsigned long flags; struct fimg2d_bltcmd *cmd; g2d_spin_lock(&ctrl->bltlock, flags); cmd = fimg2d_get_first_command(ctrl); g2d_spin_unlock(&ctrl->bltlock, flags); return cmd; }
void fimg2d_add_context(struct fimg2d_control *ctrl, struct fimg2d_context *ctx) { unsigned long flags; g2d_spin_lock(&ctrl->bltlock, flags); atomic_set(&ctx->ncmd, 0); init_waitqueue_head(&ctx->wait_q); atomic_inc(&ctrl->nctx); fimg2d_debug("ctx %p nctx(%d)\n", ctx, atomic_read(&ctrl->nctx)); g2d_spin_unlock(&ctrl->bltlock, flags); }
static int fimg2d_resume(struct device *dev) { unsigned long flags; g2d_lock(&ctrl->drvlock); g2d_spin_lock(&ctrl->bltlock, flags); atomic_set(&ctrl->suspended, 0); g2d_spin_unlock(&ctrl->bltlock, flags); g2d_unlock(&ctrl->drvlock); fimg2d_info("resume... done\n"); return 0; }
static int fimg2d_request_bitblt(struct fimg2d_control *ctrl, struct fimg2d_context *ctx) { #ifdef BLIT_WORKQUE unsigned long flags; g2d_spin_lock(&ctrl->bltlock, flags); fimg2d_debug("dispatch ctx %p to kernel thread\n", ctx); queue_work(ctrl->work_q, &fimg2d_work); g2d_spin_unlock(&ctrl->bltlock, flags); return fimg2d_context_wait(ctx); #else return fimg2d_do_bitblt(ctrl); #endif }
static int fimg2d_resume(struct device *dev) { unsigned long flags; g2d_lock(&ctrl->drvlock); g2d_spin_lock(&ctrl->bltlock, flags); atomic_set(&ctrl->suspended, 0); g2d_spin_unlock(&ctrl->bltlock, flags); g2d_unlock(&ctrl->drvlock); /* G2D clk gating mask */ if (ip_is_g2d_5ar2()) { fimg2d_clk_on(ctrl); fimg2d_clk_off(ctrl); } fimg2d_info("resume... done\n"); return 0; }
static int fimg2d_suspend(struct device *dev) { unsigned long flags; int retry = POLL_RETRY; g2d_spin_lock(&ctrl->bltlock, flags); atomic_set(&ctrl->suspended, 1); g2d_spin_unlock(&ctrl->bltlock, flags); while (retry--) { if (fimg2d_queue_is_empty(&ctrl->cmd_q)) break; mdelay(POLL_TIMEOUT); } if (ip_is_g2d_5h() || ip_is_g2d_5hp()) exynos5433_fimg2d_clk_set_osc(ctrl); fimg2d_info("suspend... done\n"); return 0; }
static int fimg2d_suspend(struct device *dev) { unsigned long flags; int retry = POLL_RETRY; g2d_lock(&ctrl->drvlock); g2d_spin_lock(&ctrl->bltlock, flags); atomic_set(&ctrl->suspended, 1); g2d_spin_unlock(&ctrl->bltlock, flags); while (retry--) { if (fimg2d_queue_is_empty(&ctrl->cmd_q)) break; mdelay(POLL_TIMEOUT); } g2d_unlock(&ctrl->drvlock); fimg2d_info("suspend... done\n"); return 0; }
void fimg2d_del_command(struct fimg2d_control *ctrl, struct fimg2d_bltcmd *cmd) { unsigned long flags; struct fimg2d_context *ctx = cmd->ctx; perf_end(cmd, PERF_TOTAL); perf_print(cmd); g2d_spin_lock(&ctrl->bltlock, flags); fimg2d_dequeue(&cmd->node); kfree(cmd); atomic_dec(&ctx->ncmd); #ifdef BLIT_WORKQUE /* wake up context */ if (!atomic_read(&ctx->ncmd)) wake_up(&ctx->wait_q); #endif g2d_spin_unlock(&ctrl->bltlock, flags); }
static int fimg2d_resume(struct device *dev) { unsigned long flags; int ret = 0; g2d_spin_lock(&ctrl->bltlock, flags); atomic_set(&ctrl->suspended, 0); g2d_spin_unlock(&ctrl->bltlock, flags); /* G2D clk gating mask */ if (ip_is_g2d_5ar2()) { fimg2d_clk_on(ctrl); fimg2d_clk_off(ctrl); } else if (ip_is_g2d_5hp()) { ret = exynos5430_fimg2d_clk_set(ctrl); if (ret) { fimg2d_err("failed to exynos5430_fimg2d_clk_set()\n"); return -ENOENT; } } fimg2d_info("resume... done\n"); return ret; }
void fimg2d_pm_qos_update(struct fimg2d_control *ctrl, enum fimg2d_qos_status status) { #if defined(CONFIG_ARM_EXYNOS_IKS_CPUFREQ) || \ defined(CONFIG_ARM_EXYNOS_MP_CPUFREQ) || \ defined(CONFIG_FIMG2D_USE_BUS_DEVFREQ) struct fimg2d_platdata *pdata; enum fimg2d_qos_level idx; int ret = 0; unsigned long qflags; #ifdef CONFIG_OF pdata = ctrl->pdata; #else pdata = to_fimg2d_plat(ctrl->dev); #endif #endif g2d_spin_lock(&ctrl->qoslock, qflags); if ((ctrl->qos_lv >= G2D_LV0) && (ctrl->qos_lv < G2D_LV_END)) idx = ctrl->qos_lv; else goto err; g2d_spin_unlock(&ctrl->qoslock, qflags); if (status == FIMG2D_QOS_ON) { if (ctrl->pre_qos_lv != ctrl->qos_lv) { #ifdef CONFIG_FIMG2D_USE_BUS_DEVFREQ if (idx == 0) ret = set_hmp_boost(true); pm_qos_update_request(&ctrl->exynos5_g2d_mif_qos, g2d_qos_table[idx].freq_mif); pm_qos_update_request(&ctrl->exynos5_g2d_int_qos, g2d_qos_table[idx].freq_int); fimg2d_debug("idx:%d, freq_mif:%d, freq_int:%d, ret:%d\n", idx, g2d_qos_table[idx].freq_mif, g2d_qos_table[idx].freq_int, ret); #endif #if defined(CONFIG_ARM_EXYNOS_IKS_CPUFREQ) || \ defined(CONFIG_ARM_EXYNOS_MP_CPUFREQ) pm_qos_update_request(&ctrl->exynos5_g2d_cpu_qos, g2d_qos_table[idx].freq_cpu); pm_qos_update_request(&ctrl->exynos5_g2d_kfc_qos, g2d_qos_table[idx].freq_kfc); fimg2d_debug("idx:%d, freq_cpu:%d, freq_kfc:%d\n", idx, g2d_qos_table[idx].freq_cpu, g2d_qos_table[idx].freq_kfc); } #endif } else if (status == FIMG2D_QOS_OFF) { #ifdef CONFIG_FIMG2D_USE_BUS_DEVFREQ pm_qos_update_request(&ctrl->exynos5_g2d_mif_qos, 0); pm_qos_update_request(&ctrl->exynos5_g2d_int_qos, 0); #endif #if defined(CONFIG_ARM_EXYNOS_IKS_CPUFREQ) || \ defined(CONFIG_ARM_EXYNOS_MP_CPUFREQ) pm_qos_update_request(&ctrl->exynos5_g2d_cpu_qos, 0); pm_qos_update_request(&ctrl->exynos5_g2d_kfc_qos, 0); #endif if (idx == 0) ret = set_hmp_boost(false); } err: fimg2d_debug("invalid qos_lv:%d\n", ctrl->qos_lv); }
int fimg2d_add_command(struct fimg2d_control *ctrl, struct fimg2d_context *ctx, struct fimg2d_blit __user *buf) { unsigned long flags; struct fimg2d_blit *blt; struct fimg2d_bltcmd *cmd; int len = sizeof(struct fimg2d_image); int ret = 0; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; if (copy_from_user(&cmd->blt, buf, sizeof(cmd->blt))) { ret = -EFAULT; goto err; } cmd->ctx = ctx; blt = &cmd->blt; if (blt->src) { if (copy_from_user(&cmd->image[ISRC], blt->src, len)) { ret = -EFAULT; goto err; } blt->src = &cmd->image[ISRC]; } if (blt->msk) { if (copy_from_user(&cmd->image[IMSK], blt->msk, len)) { ret = -EFAULT; goto err; } blt->msk = &cmd->image[IMSK]; } if (blt->tmp) { if (copy_from_user(&cmd->image[ITMP], blt->tmp, len)) { ret = -EFAULT; goto err; } blt->tmp = &cmd->image[ITMP]; } if (blt->dst) { if (copy_from_user(&cmd->image[IDST], blt->dst, len)) { ret = -EFAULT; goto err; } blt->dst = &cmd->image[IDST]; } fimg2d_dump_command(cmd); perf_start(cmd, PERF_TOTAL); if (fimg2d_check_params(cmd)) { ret = -EINVAL; goto err; } fimg2d_fixup_params(cmd); if (fimg2d_check_dma_sync(cmd)) { ret = -EFAULT; goto err; } /* add command node and increase ncmd */ g2d_spin_lock(&ctrl->bltlock, flags); if (atomic_read(&ctrl->drvact) || atomic_read(&ctrl->suspended)) { fimg2d_debug("driver is unavailable, do sw fallback\n"); g2d_spin_unlock(&ctrl->bltlock, flags); ret = -EPERM; goto err; } atomic_inc(&ctx->ncmd); fimg2d_enqueue(&cmd->node, &ctrl->cmd_q); fimg2d_debug("ctx %p pgd %p ncmd(%d) seq_no(%u)\n", cmd->ctx, (unsigned long *)cmd->ctx->mm->pgd, atomic_read(&ctx->ncmd), cmd->blt.seq_no); g2d_spin_unlock(&ctrl->bltlock, flags); return 0; err: kfree(cmd); return ret; }