int mdp4_dsi_cmd_pipe_commit(int cndx, int wait) { int i, undx; int mixer = 0; struct vsycn_ctrl *vctrl; struct vsync_update *vp; struct mdp4_overlay_pipe *pipe; struct mdp4_overlay_pipe *real_pipe; unsigned long flags; int need_dmap_wait = 0; int need_ov_wait = 0; int cnt = 0; vctrl = &vsync_ctrl_db[0]; mutex_lock(&vctrl->update_lock); undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; pipe = vctrl->base_pipe; if (pipe == NULL) { pr_err("%s: NO base pipe\n", __func__); mutex_unlock(&vctrl->update_lock); return 0; } mixer = pipe->mixer_num; mdp_update_pm(vctrl->mfd, vctrl->vsync_time); /* * allow stage_commit without pipes queued * (vp->update_cnt == 0) to unstage pipes after * overlay_unset */ vctrl->update_ndx++; vctrl->update_ndx &= 0x01; vp->update_cnt = 0; /* reset */ if (vctrl->blt_free) { vctrl->blt_free--; if (vctrl->blt_free == 0) mdp4_free_writeback_buf(vctrl->mfd, mixer); } if (mdp4_dsi_cmd_clk_check(vctrl) < 0) { mdp4_dsi_cmd_pipe_clean(vp); mutex_unlock(&vctrl->update_lock); return 0; } mutex_unlock(&vctrl->update_lock); /* free previous committed iommu back to pool */ mdp4_overlay_iommu_unmap_freelist(mixer); spin_lock_irqsave(&vctrl->spin_lock, flags); if (pipe->ov_blt_addr) { /* Blt */ if (vctrl->blt_wait) { INIT_COMPLETION(vctrl->dmap_comp); need_dmap_wait = 1; } if (vctrl->ov_koff != vctrl->ov_done) { INIT_COMPLETION(vctrl->ov_comp); need_ov_wait = 1; } } else { /* direct out */ if (vctrl->dmap_koff != vctrl->dmap_done) { INIT_COMPLETION(vctrl->dmap_comp); pr_debug("%s: wait, ok=%d od=%d dk=%d dd=%d cpu=%d\n", __func__, vctrl->ov_koff, vctrl->ov_done, vctrl->dmap_koff, vctrl->dmap_done, smp_processor_id()); need_dmap_wait = 1; } } spin_unlock_irqrestore(&vctrl->spin_lock, flags); if (need_dmap_wait) { pr_debug("%s: wait4dmap\n", __func__); mdp4_dsi_cmd_wait4dmap(0); } if (need_ov_wait) { pr_debug("%s: wait4ov\n", __func__); mdp4_dsi_cmd_wait4ov(0); } if (pipe->ov_blt_addr) { if (vctrl->blt_end) { vctrl->blt_end = 0; pipe->ov_blt_addr = 0; pipe->dma_blt_addr = 0; } } if (vctrl->blt_change) { mdp4_overlayproc_cfg(pipe); mdp4_overlay_dmap_xy(pipe); vctrl->blt_change = 0; } pipe = vp->plist; for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) { if (pipe->pipe_used) { cnt++; real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx); if (real_pipe && real_pipe->pipe_used) { /* pipe not unset */ mdp4_overlay_vsync_commit(pipe); } /* free previous iommu to freelist * which will be freed at next * pipe_commit */ mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0); pipe->pipe_used = 0; /* clear */ } } /* tx dcs command if had any */ mipi_dsi_cmdlist_commit(1); mdp4_mixer_stage_commit(mixer); pipe = vctrl->base_pipe; spin_lock_irqsave(&vctrl->spin_lock, flags); if (pipe->ov_blt_addr) { mdp4_dsi_cmd_blt_ov_update(pipe); pipe->ov_cnt++; vctrl->ov_koff++; INIT_COMPLETION(vctrl->ov_comp); vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM); } else { INIT_COMPLETION(vctrl->dmap_comp); vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM); vctrl->dmap_koff++; } pr_debug("%s: kickoff, pid=%d\n", __func__, current->pid); /* kickoff overlay engine */ mdp4_stat.kickoff_ov0++; mdp_pipe_kickoff_simplified(MDP_OVERLAY0_TERM); mb(); /* make sure kickoff ececuted */ spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_stat.overlay_commit[pipe->mixer_num]++; if (wait) { if (pipe->ov_blt_addr) mdp4_dsi_cmd_wait4ov(0); else mdp4_dsi_cmd_wait4dmap(0); } return cnt; }
int mdp4_dsi_cmd_off(struct platform_device *pdev) { int ret = 0; int cndx = 0; struct msm_fb_data_type *mfd; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; struct vsync_update *vp; int undx; int need_wait, cnt; unsigned long flags; pr_debug("%s+: pid=%d\n", __func__, current->pid); mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); mutex_lock(&mfd->dma->ov_mutex); vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (pipe == NULL) { pr_err("%s: NO base pipe\n", __func__); mutex_unlock(&mfd->dma->ov_mutex); return ret; } need_wait = 0; mutex_lock(&vctrl->update_lock); atomic_set(&vctrl->suspend, 1); complete_all(&vctrl->vsync_comp); pr_debug("%s: clk=%d pan=%d\n", __func__, vctrl->clk_enabled, vctrl->pan_display); if (vctrl->clk_enabled) need_wait = 1; mutex_unlock(&vctrl->update_lock); cnt = 0; if (need_wait) { while (vctrl->clk_enabled) { msleep(20); cnt++; if (cnt > 10) break; } } /* message for system suspnded */ if (cnt > 10){ spin_lock_irqsave(&vctrl->spin_lock, flags); vctrl->clk_control = 0; vctrl->clk_enabled = 0; vctrl->expire_tick = 0; spin_unlock_irqrestore(&vctrl->spin_lock, flags); mipi_dsi_clk_cfg(0); mdp_clk_ctrl(0); pr_err("%s: Error, SET_CLK_OFF by force\n", __func__); } else pr_debug("%s: mdp clocks off at cnt=%d\n", __func__, cnt); /* sanity check, free pipes besides base layer */ mdp4_overlay_unset_mixer(pipe->mixer_num); mdp4_mixer_stage_down(pipe, 1); mdp4_overlay_pipe_free(pipe, 1); vctrl->base_pipe = NULL; if (vctrl->vsync_enabled) { vsync_irq_disable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM); vctrl->vsync_enabled = 0; } undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; if (vp->update_cnt) { /* * pipe's iommu will be freed at next overlay play * and iommu_drop statistic will be increased by one */ pr_warn("%s: update_cnt=%d\n", __func__, vp->update_cnt); mdp4_dsi_cmd_pipe_clean(vp); } mutex_unlock(&mfd->dma->ov_mutex); pr_debug("%s-:\n", __func__); return ret; }
int mdp4_dsi_cmd_off(struct platform_device *pdev) { int ret = 0; int cndx = 0; struct msm_fb_data_type *mfd; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; struct vsync_update *vp; int undx; int need_wait, cnt; unsigned long flags; int mixer = 0; pr_debug("%s+: pid=%d\n", __func__, current->pid); mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); mutex_lock(&mfd->dma->ov_mutex); vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (pipe == NULL) { pr_err("%s: NO base pipe\n", __func__); mutex_unlock(&mfd->dma->ov_mutex); return ret; } need_wait = 0; mutex_lock(&vctrl->update_lock); wake_up_interruptible_all(&vctrl->wait_queue); pr_debug("%s: clk=%d pan=%d\n", __func__, vctrl->clk_enabled, vctrl->pan_display); if (vctrl->clk_enabled) need_wait = 1; mutex_unlock(&vctrl->update_lock); cnt = 0; if (need_wait) { while (vctrl->clk_enabled) { msleep(20); cnt++; if (cnt > 10) break; } } if (cnt > 10) { spin_lock_irqsave(&vctrl->spin_lock, flags); vctrl->clk_control = 0; vctrl->clk_enabled = 0; vctrl->expire_tick = 0; spin_unlock_irqrestore(&vctrl->spin_lock, flags); mipi_dsi_clk_cfg(0); mdp_clk_ctrl(0); pr_err("%s: Error, SET_CLK_OFF by force\n", __func__); } if (vctrl->vsync_enabled) { vsync_irq_disable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM); vctrl->vsync_enabled = 0; } undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; if (vp->update_cnt) { /* * pipe's iommu will be freed at next overlay play * and iommu_drop statistic will be increased by one */ pr_warn("%s: update_cnt=%d\n", __func__, vp->update_cnt); mdp4_dsi_cmd_pipe_clean(vp); } if (pipe) { /* sanity check, free pipes besides base layer */ mixer = pipe->mixer_num; mdp4_overlay_unset_mixer(mixer); if (mfd->ref_cnt == 0) { /* adb stop */ if (pipe->pipe_type == OVERLAY_TYPE_BF) mdp4_overlay_borderfill_stage_down(pipe); /* base pipe may change after borderfill_stage_down */ pipe = vctrl->base_pipe; mdp4_mixer_stage_down(pipe, 1); mdp4_overlay_pipe_free(pipe); vctrl->base_pipe = NULL; } else { /* system suspending */ mdp4_mixer_stage_down(pipe, 1); mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 1); } } atomic_set(&vctrl->suspend, 1); /* * clean up ion freelist * there need two stage to empty ion free list * therefore need call unmap freelist twice */ mdp4_overlay_iommu_unmap_freelist(mixer); mdp4_overlay_iommu_unmap_freelist(mixer); mutex_unlock(&mfd->dma->ov_mutex); pr_debug("%s-:\n", __func__); return ret; }