static int mdp4_dtv_stop(struct msm_fb_data_type *mfd) { int cndx = 0; struct vsycn_ctrl *vctrl; vctrl = &vsync_ctrl_db[cndx]; if (vctrl->base_pipe == NULL) return -EINVAL; mdp4_dtv_wait4dmae(0); MDP_OUTP(MDP_BASE + DTV_BASE, 0); return 0; }
void mdp4_dtv_overlay(struct msm_fb_data_type *mfd) { int cndx = 0; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; mutex_lock(&mfd->dma->ov_mutex); if (!mfd->panel_power_on) { mutex_unlock(&mfd->dma->ov_mutex); return; } vctrl = &vsync_ctrl_db[cndx]; if (vctrl->base_pipe == NULL) mdp4_overlay_dtv_set(mfd, NULL); pipe = vctrl->base_pipe; if (pipe == NULL) { pr_warn("%s: dtv_pipe == NULL\n", __func__); mutex_unlock(&mfd->dma->ov_mutex); return; } if (hdmi_prim_display && (pipe->pipe_used == 0 || pipe->mixer_stage != MDP4_MIXER_STAGE_BASE)) { pr_err("%s: NOT baselayer\n", __func__); mutex_unlock(&mfd->dma->ov_mutex); return; } if (pipe->pipe_type == OVERLAY_TYPE_RGB) { pipe->srcp0_addr = (uint32)mfd->ibuf.buf; mdp4_dtv_pipe_queue(0, pipe); } if (mdp4_dtv_pipe_commit(0, 0)) mdp4_dtv_wait4dmae(0); mutex_unlock(&mfd->dma->ov_mutex); }
static void mdp4_dtv_do_blt(struct msm_fb_data_type *mfd, int enable) { unsigned long flag; int data; int cndx = 0; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; mdp4_allocate_writeback_buf(mfd, MDP4_MIXER1); if (!mfd->ov1_wb_buf->write_addr) { pr_info("%s: ctrl=%d blt_base NOT assigned\n", __func__, cndx); return; } spin_lock_irqsave(&vctrl->spin_lock, flag); if (enable && pipe->ov_blt_addr == 0) { pipe->ov_blt_addr = mfd->ov1_wb_buf->write_addr; pipe->dma_blt_addr = mfd->ov1_wb_buf->read_addr; pipe->blt_cnt = 0; pipe->ov_cnt = 0; pipe->blt_dmap_done = 0; pipe->blt_ov_koff = 0; pipe->blt_ov_done = 0; mdp4_stat.blt_dtv++; vctrl->blt_change++; } else if (enable == 0 && pipe->ov_blt_addr) { pipe->ov_blt_addr = 0; pipe->dma_blt_addr = 0; vctrl->blt_change++; } pr_info("%s: enable=%d change=%d blt_addr=%x\n", __func__, enable, vctrl->blt_change, (int)pipe->ov_blt_addr); if (!vctrl->blt_change) { spin_unlock_irqrestore(&vctrl->spin_lock, flag); return; } atomic_set(&vctrl->suspend, 1); spin_unlock_irqrestore(&vctrl->spin_lock, flag); data = inpdw(MDP_BASE + DTV_BASE); data &= 0x01; if (data) /* timing generator enabled */ mdp4_dtv_wait4dmae(0); if (pipe->ov_blt_addr == 0) { MDP_OUTP(MDP_BASE + DTV_BASE, 0); /* stop dtv */ msleep(20); mdp4_overlayproc_cfg(pipe); mdp4_overlay_dmae_xy(pipe); MDP_OUTP(MDP_BASE + DTV_BASE, 1); /* start dtv */ } atomic_set(&vctrl->suspend, 0); }
int mdp4_dtv_pipe_commit(int cndx, int wait) { int i, undx; int mixer = 0; struct vsycn_ctrl *vctrl; struct vsync_update *vp; struct mdp4_overlay_pipe *pipe; struct mdp4_overlay_pipe *real_pipe; unsigned long flags; int cnt = 0; vctrl = &vsync_ctrl_db[cndx]; mutex_lock(&vctrl->update_lock); undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; pipe = vctrl->base_pipe; mixer = pipe->mixer_num; mdp4_overlay_iommu_unmap_freelist(mixer); if (vp->update_cnt == 0) { mutex_unlock(&vctrl->update_lock); return 0; } /* HTC FIXME: Sometime the mdp hang happened if mdp only create HDMI base pipe and do mdp4_dtv_pipe_commit. So checking all mdp pipe to prevent fail condition happened. */ if(!dtv_pipe_ready) { for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) { real_pipe = mdp4_overlay_ndx2pipe(i); if (real_pipe && real_pipe->pipe_used != 0 && real_pipe->pipe_type != OVERLAY_TYPE_BF && real_pipe->mixer_num == MDP4_MIXER1) { dtv_pipe_ready = true; break; } } if(!dtv_pipe_ready && dtv_enabled) { PR_DISP_INFO("Dtv real pipe is not ready, skip this time\n"); mutex_unlock(&vctrl->update_lock); return 0; } } vctrl->update_ndx++; vctrl->update_ndx &= 0x01; vp->update_cnt = 0; /* reset */ mutex_unlock(&vctrl->update_lock); pipe = vp->plist; for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) { if (pipe->pipe_used) { cnt++; real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx); if (real_pipe && real_pipe->pipe_used) { /* pipe not unset */ mdp4_overlay_vsync_commit(pipe); } /* free previous iommu to freelist * which will be freed at next * pipe_commit */ mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0); pipe->pipe_used = 0; /* clear */ } } mdp4_mixer_stage_commit(mixer); /* start timing generator & mmu if they are not started yet */ mdp4_overlay_dtv_start(); pipe = vctrl->base_pipe; spin_lock_irqsave(&vctrl->spin_lock, flags); if (pipe->ov_blt_addr) { mdp4_dtv_blt_ov_update(pipe); pipe->blt_ov_done++; vsync_irq_enable(INTR_OVERLAY1_DONE, MDP_OVERLAY1_TERM); mb(); pipe->blt_ov_koff++; /* kickoff overlay1 engine */ mdp4_stat.kickoff_ov1++; outpdw(MDP_BASE + 0x0008, 0); } else { /* schedule second phase update at dmap */ INIT_COMPLETION(vctrl->dmae_comp); vsync_irq_enable(INTR_DMA_E_DONE, MDP_DMA_E_TERM); } spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_stat.overlay_commit[pipe->mixer_num]++; if (wait) mdp4_dtv_wait4dmae(cndx); return cnt; }
int mdp4_dtv_pipe_commit(int cndx, int wait) { int i, undx; int mixer = 0; struct vsycn_ctrl *vctrl; struct vsync_update *vp; struct mdp4_overlay_pipe *pipe; struct mdp4_overlay_pipe *real_pipe; unsigned long flags; int cnt = 0; vctrl = &vsync_ctrl_db[cndx]; mutex_lock(&vctrl->update_lock); undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; pipe = vctrl->base_pipe; mixer = pipe->mixer_num; mdp4_overlay_iommu_unmap_freelist(mixer); if (vp->update_cnt == 0) { mutex_unlock(&vctrl->update_lock); return 0; } vctrl->update_ndx++; vctrl->update_ndx &= 0x01; vp->update_cnt = 0; /* reset */ mutex_unlock(&vctrl->update_lock); pipe = vp->plist; for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) { if (pipe->pipe_used) { cnt++; real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx); if (real_pipe && real_pipe->pipe_used) { /* pipe not unset */ mdp4_overlay_vsync_commit(pipe); } /* free previous iommu to freelist * which will be freed at next * pipe_commit */ mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0); pipe->pipe_used = 0; /* clear */ } } mdp4_mixer_stage_commit(mixer); /* start timing generator & mmu if they are not started yet */ mdp4_overlay_dtv_start(); pipe = vctrl->base_pipe; spin_lock_irqsave(&vctrl->spin_lock, flags); if (pipe->ov_blt_addr) { mdp4_dtv_blt_ov_update(pipe); pipe->blt_ov_done++; vsync_irq_enable(INTR_OVERLAY1_DONE, MDP_OVERLAY1_TERM); mb(); pipe->blt_ov_koff++; /* kickoff overlay1 engine */ mdp4_stat.kickoff_ov1++; outpdw(MDP_BASE + 0x0008, 0); } else { /* schedule second phase update at dmap */ INIT_COMPLETION(vctrl->dmae_comp); vsync_irq_enable(INTR_DMA_E_DONE, MDP_DMA_E_TERM); } spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_stat.overlay_commit[pipe->mixer_num]++; if (wait) mdp4_dtv_wait4dmae(cndx); return cnt; }
int mdp4_dtv_pipe_commit(int cndx, int wait) { int i, undx; int mixer = 0; struct vsycn_ctrl *vctrl; struct vsync_update *vp; struct mdp4_overlay_pipe *pipe; struct mdp4_overlay_pipe *real_pipe; unsigned long flags; int cnt = 0; vctrl = &vsync_ctrl_db[cndx]; mutex_lock(&vctrl->update_lock); undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; pipe = vctrl->base_pipe; if (pipe == NULL) { pr_err("%s: NO base pipe\n", __func__); mutex_unlock(&vctrl->update_lock); return 0; } mixer = pipe->mixer_num; mdp_update_pm(vctrl->mfd, vctrl->vsync_time); /* * allow stage_commit without pipes queued * (vp->update_cnt == 0) to unstage pipes after * overlay_unset */ vctrl->update_ndx++; vctrl->update_ndx &= 0x01; vp->update_cnt = 0; /* reset */ mutex_unlock(&vctrl->update_lock); pipe = vp->plist; for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) { if (pipe->pipe_used) { cnt++; real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx); if (real_pipe && real_pipe->pipe_used) { /* pipe not unset */ mdp4_overlay_vsync_commit(pipe); } } } mdp4_mixer_stage_commit(mixer); /* start timing generator & mmu if they are not started yet */ mdp4_overlay_dtv_start(); /* * there has possibility that pipe commit come very close to next vsync * this may cause two consecutive pie_commits happen within same vsync * period which casue iommu page fault when previous iommu buffer * freed. Set ION_IOMMU_UNMAP_DELAYED flag at ion_map_iommu() to * add delay unmap iommu buffer to fix this problem. * Also ion_unmap_iommu() may take as long as 9 ms to free an ion buffer. * therefore mdp4_overlay_iommu_unmap_freelist(mixer) should be called * ater stage_commit() to ensure pipe_commit (up to stage_commit) * is completed within vsync period. */ /* free previous committed iommu back to pool */ mdp4_overlay_iommu_unmap_freelist(mixer); pipe = vp->plist; for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) { if (pipe->pipe_used) { /* free previous iommu to freelist * which will be freed at next * pipe_commit */ mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0); pipe->pipe_used = 0; /* clear */ } } pipe = vctrl->base_pipe; spin_lock_irqsave(&vctrl->spin_lock, flags); if (pipe->ov_blt_addr) { mdp4_dtv_blt_ov_update(pipe); pipe->blt_ov_done++; vsync_irq_enable(INTR_OVERLAY1_DONE, MDP_OVERLAY1_TERM); mb(); pipe->blt_ov_koff++; /* kickoff overlay1 engine */ mdp4_stat.kickoff_ov1++; outpdw(MDP_BASE + 0x0008, 0); } else { /* schedule second phase update at dmap */ INIT_COMPLETION(vctrl->dmae_comp); vsync_irq_enable(INTR_DMA_E_DONE, MDP_DMA_E_TERM); } spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_stat.overlay_commit[pipe->mixer_num]++; if (wait) mdp4_dtv_wait4dmae(0); return cnt; }
int mdp4_dtv_pipe_commit(int cndx, int wait) { int i, undx; int mixer = 0; struct vsycn_ctrl *vctrl; struct vsync_update *vp; struct mdp4_overlay_pipe *pipe; struct mdp4_overlay_pipe *real_pipe; unsigned long flags; int cnt = 0; vctrl = &vsync_ctrl_db[cndx]; mutex_lock(&vctrl->update_lock); undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; pipe = vctrl->base_pipe; mixer = pipe->mixer_num; mdp4_overlay_iommu_unmap_freelist(mixer); if (vp->update_cnt == 0) { mutex_unlock(&vctrl->update_lock); return 0; } if(!dtv_pipe_ready) { for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) { real_pipe = mdp4_overlay_ndx2pipe(i); if (real_pipe && real_pipe->pipe_used != 0 && real_pipe->pipe_type != OVERLAY_TYPE_BF && real_pipe->mixer_num == MDP4_MIXER1) { dtv_pipe_ready = true; break; } } if(!dtv_pipe_ready && dtv_enabled) { PR_DISP_INFO("Dtv real pipe is not ready, skip this time\n"); mutex_unlock(&vctrl->update_lock); return 0; } } vctrl->update_ndx++; vctrl->update_ndx &= 0x01; vp->update_cnt = 0; mutex_unlock(&vctrl->update_lock); pipe = vp->plist; for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) { if (pipe->pipe_used) { cnt++; real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx); if (real_pipe && real_pipe->pipe_used) { mdp4_overlay_vsync_commit(pipe); } mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0); pipe->pipe_used = 0; } } mdp4_mixer_stage_commit(mixer); mdp4_overlay_dtv_start(); pipe = vctrl->base_pipe; spin_lock_irqsave(&vctrl->spin_lock, flags); if (pipe->ov_blt_addr) { mdp4_dtv_blt_ov_update(pipe); pipe->blt_ov_done++; vsync_irq_enable(INTR_OVERLAY1_DONE, MDP_OVERLAY1_TERM); mb(); pipe->blt_ov_koff++; mdp4_stat.kickoff_ov1++; outpdw(MDP_BASE + 0x0008, 0); } else { INIT_COMPLETION(vctrl->dmae_comp); vsync_irq_enable(INTR_DMA_E_DONE, MDP_DMA_E_TERM); } spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_stat.overlay_commit[pipe->mixer_num]++; if (wait) mdp4_dtv_wait4dmae(cndx); return cnt; }
int mdp4_dtv_pipe_commit(int cndx, int wait) { int i, undx; int mixer = 0; struct vsycn_ctrl *vctrl; struct vsync_update *vp; struct mdp4_overlay_pipe *pipe; struct mdp4_overlay_pipe *real_pipe; unsigned long flags; int cnt = 0; vctrl = &vsync_ctrl_db[cndx]; mutex_lock(&vctrl->update_lock); undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; pipe = vctrl->base_pipe; if (pipe == NULL) { pr_err("%s: NO base pipe\n", __func__); mutex_unlock(&vctrl->update_lock); return 0; } mixer = pipe->mixer_num; mdp_update_pm(vctrl->mfd, vctrl->vsync_time); vctrl->update_ndx++; vctrl->update_ndx &= 0x01; vp->update_cnt = 0; mutex_unlock(&vctrl->update_lock); pipe = vp->plist; for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) { if (pipe->pipe_used) { cnt++; real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx); if (real_pipe && real_pipe->pipe_used) { mdp4_overlay_vsync_commit(pipe); } } } mdp4_mixer_stage_commit(mixer); mdp4_overlay_dtv_start(); mdp4_overlay_iommu_unmap_freelist(mixer); pipe = vp->plist; for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) { if (pipe->pipe_used) { mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0); pipe->pipe_used = 0; } } pipe = vctrl->base_pipe; spin_lock_irqsave(&vctrl->spin_lock, flags); if (pipe->ov_blt_addr) { mdp4_dtv_blt_ov_update(pipe); pipe->blt_ov_done++; vsync_irq_enable(INTR_OVERLAY1_DONE, MDP_OVERLAY1_TERM); mb(); pipe->blt_ov_koff++; mdp4_stat.kickoff_ov1++; outpdw(MDP_BASE + 0x0008, 0); } else { INIT_COMPLETION(vctrl->dmae_comp); vsync_irq_enable(INTR_DMA_E_DONE, MDP_DMA_E_TERM); } spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_stat.overlay_commit[pipe->mixer_num]++; if (wait) mdp4_dtv_wait4dmae(0); return cnt; }