void mdp4_overlay_lcdc_vsync_push(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { unsigned long flag; if (pipe->flags & MDP_OV_PLAY_NOWAIT) return; if (lcdc_pipe->blt_addr) { mdp4_overlay_lcdc_dma_busy_wait(mfd); mdp4_lcdc_blt_ov_update(lcdc_pipe); lcdc_pipe->ov_cnt++; spin_lock_irqsave(&mdp_spin_lock, flag); outp32(MDP_INTR_CLEAR, INTR_OVERLAY0_DONE); mdp_intr_mask |= INTR_OVERLAY0_DONE; outp32(MDP_INTR_ENABLE, mdp_intr_mask); mdp_enable_irq(MDP_OVERLAY0_TERM); mfd->dma->busy = TRUE; mb(); /* make sure all registers updated */ spin_unlock_irqrestore(&mdp_spin_lock, flag); outpdw(MDP_BASE + 0x0004, 0); /* kickoff overlay engine */ mdp4_stat.kickoff_ov0++; mb(); mdp4_overlay_lcdc_wait4event(mfd, INTR_DMA_P_DONE); } else { mdp4_overlay_lcdc_wait4event(mfd, INTR_PRIMARY_VSYNC); } mdp4_set_perf_level(); }
/* * mdp4_dma_p_done_lcdc: called from isr */ void mdp4_dmap_done_lcdc(int cndx) { struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; if (cndx >= MAX_CONTROLLER) { pr_err("%s: out or range: cndx=%d\n", __func__, cndx); return; } vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; spin_lock(&vctrl->spin_lock); vsync_irq_disable(INTR_DMA_P_DONE, MDP_DMAP_TERM); if (vctrl->blt_change) { mdp4_overlayproc_cfg(pipe); mdp4_overlay_dmap_xy(pipe); if (pipe->ov_blt_addr) { mdp4_lcdc_blt_ov_update(pipe); pipe->ov_cnt++; /* Prefill one frame */ vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM); /* kickoff overlay0 engine */ mdp4_stat.kickoff_ov0++; vctrl->ov_koff++; /* make up for prefill */ outpdw(MDP_BASE + 0x0004, 0); } vctrl->blt_change = 0; } complete_all(&vctrl->dmap_comp); mdp4_overlay_dma_commit(cndx); spin_unlock(&vctrl->spin_lock); }
void mdp4_dma_p_done_lcdc(struct mdp_dma_data *dma) { if (blt_cfg_changed) { mdp_is_in_isr = TRUE; mdp4_overlayproc_cfg(lcdc_pipe); mdp4_overlay_dmap_xy(lcdc_pipe); mdp_is_in_isr = FALSE; if (lcdc_pipe->blt_addr) { mdp4_lcdc_blt_ov_update(lcdc_pipe); lcdc_pipe->ov_cnt++; outp32(MDP_INTR_CLEAR, INTR_OVERLAY0_DONE); mdp_intr_mask |= INTR_OVERLAY0_DONE; outp32(MDP_INTR_ENABLE, mdp_intr_mask); dma->busy = TRUE; mdp_enable_irq(MDP_OVERLAY0_TERM); /* kickoff overlay engine */ outpdw(MDP_BASE + 0x0004, 0); } blt_cfg_changed = 0; } complete_all(&lcdc_comp); }
static void mdp4_overlay_lcdc_prefill(struct msm_fb_data_type *mfd) { unsigned long flag; if (lcdc_pipe->ov_blt_addr) { mdp4_overlay_lcdc_dma_busy_wait(mfd); mdp4_lcdc_blt_ov_update(lcdc_pipe); lcdc_pipe->ov_cnt++; spin_lock_irqsave(&mdp_spin_lock, flag); outp32(MDP_INTR_CLEAR, INTR_OVERLAY0_DONE); mdp_intr_mask |= INTR_OVERLAY0_DONE; outp32(MDP_INTR_ENABLE, mdp_intr_mask); mdp_enable_irq(MDP_OVERLAY0_TERM); mfd->dma->busy = TRUE; mb(); /* make sure all registers updated */ spin_unlock_irqrestore(&mdp_spin_lock, flag); outpdw(MDP_BASE + 0x0004, 0); /* kickoff overlay engine */ mdp4_stat.kickoff_ov0++; mb(); } }
int mdp4_lcdc_pipe_commit(void) { int i, undx; int mixer = 0; struct vsycn_ctrl *vctrl; struct vsync_update *vp; struct mdp4_overlay_pipe *pipe; struct mdp4_overlay_pipe *real_pipe; unsigned long flags; int cnt = 0; vctrl = &vsync_ctrl_db[0]; mutex_lock(&vctrl->update_lock); undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; pipe = vctrl->base_pipe; mixer = pipe->mixer_num; if (vp->update_cnt == 0) { mutex_unlock(&vctrl->update_lock); return 0; } vctrl->update_ndx++; vctrl->update_ndx &= 0x01; vp->update_cnt = 0; /* reset */ if (vctrl->blt_free) { vctrl->blt_free--; if (vctrl->blt_free == 0) mdp4_free_writeback_buf(vctrl->mfd, mixer); } mutex_unlock(&vctrl->update_lock); /* free previous committed iommu back to pool */ mdp4_overlay_iommu_unmap_freelist(mixer); spin_lock_irqsave(&vctrl->spin_lock, flags); if (vctrl->ov_koff != vctrl->ov_done) { spin_unlock_irqrestore(&vctrl->spin_lock, flags); pr_err("%s: Error, frame dropped %d %d\n", __func__, vctrl->ov_koff, vctrl->ov_done); return 0; } spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_overlay_mdp_perf_upd(vctrl->mfd, 1); if (vctrl->blt_change) { pipe = vctrl->base_pipe; spin_lock_irqsave(&vctrl->spin_lock, flags); INIT_COMPLETION(vctrl->dmap_comp); INIT_COMPLETION(vctrl->ov_comp); vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM); spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_lcdc_wait4dmap(0); if (pipe->ov_blt_addr) mdp4_lcdc_wait4ov(0); } pipe = vp->plist; for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) { if (pipe->pipe_used) { cnt++; real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx); if (real_pipe && real_pipe->pipe_used) { /* pipe not unset */ mdp4_overlay_vsync_commit(pipe); } /* free previous iommu to freelist * which will be freed at next * pipe_commit */ mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0); pipe->pipe_used = 0; /* clear */ } } mdp4_mixer_stage_commit(mixer); /* start timing generator & mmu if they are not started yet */ mdp4_overlay_lcdc_start(); pipe = vctrl->base_pipe; spin_lock_irqsave(&vctrl->spin_lock, flags); if (pipe->ov_blt_addr) { mdp4_lcdc_blt_ov_update(pipe); pipe->ov_cnt++; INIT_COMPLETION(vctrl->ov_comp); vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM); mb(); vctrl->ov_koff++; /* kickoff overlay engine */ mdp4_stat.kickoff_ov0++; outpdw(MDP_BASE + 0x0004, 0); } else { /* schedule second phase update at dmap */ INIT_COMPLETION(vctrl->dmap_comp); vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM); } spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_stat.overlay_commit[pipe->mixer_num]++; return cnt; }
int mdp4_lcdc_pipe_commit(int cndx, int wait) { int i, undx; int mixer = 0; struct vsycn_ctrl *vctrl; struct vsync_update *vp; struct mdp4_overlay_pipe *pipe; struct mdp4_overlay_pipe *real_pipe; unsigned long flags; int cnt = 0; vctrl = &vsync_ctrl_db[cndx]; mutex_lock(&vctrl->update_lock); undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; pipe = vctrl->base_pipe; if (pipe == NULL) { pr_err("%s: NO base pipe\n", __func__); mutex_unlock(&vctrl->update_lock); return 0; } mixer = pipe->mixer_num; mdp_update_pm(vctrl->mfd, vctrl->vsync_time); /* * allow stage_commit without pipes queued * (vp->update_cnt == 0) to unstage pipes after * overlay_unset */ vctrl->update_ndx++; vctrl->update_ndx &= 0x01; vp->update_cnt = 0; /* reset */ if (vctrl->blt_free) { vctrl->blt_free--; if (vctrl->blt_free == 0) mdp4_free_writeback_buf(vctrl->mfd, mixer); } mutex_unlock(&vctrl->update_lock); spin_lock_irqsave(&vctrl->spin_lock, flags); if (vctrl->ov_koff != vctrl->ov_done) { spin_unlock_irqrestore(&vctrl->spin_lock, flags); pr_err("%s: Error, frame dropped %d %d\n", __func__, vctrl->ov_koff, vctrl->ov_done); return 0; } spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_overlay_mdp_perf_upd(vctrl->mfd, 1); if (vctrl->blt_change) { pipe = vctrl->base_pipe; spin_lock_irqsave(&vctrl->spin_lock, flags); INIT_COMPLETION(vctrl->dmap_comp); INIT_COMPLETION(vctrl->ov_comp); vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM); spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_lcdc_wait4dmap(0); if (pipe->ov_blt_addr) mdp4_lcdc_wait4ov(0); } pipe = vp->plist; for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) { if (pipe->pipe_used) { cnt++; real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx); if (real_pipe && real_pipe->pipe_used) { /* pipe not unset */ mdp4_overlay_vsync_commit(pipe); if (pipe->frame_format != MDP4_FRAME_FORMAT_LINEAR) { spin_lock_irqsave(&vctrl->spin_lock, flags); INIT_COMPLETION(vctrl->dmap_comp); vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM); spin_unlock_irqrestore(&vctrl->spin_lock, flags); } } } } mdp4_mixer_stage_commit(mixer); /* start timing generator & mmu if they are not started yet */ mdp4_overlay_lcdc_start(); /* * there has possibility that pipe commit come very close to next vsync * this may cause two consecutive pie_commits happen within same vsync * period which casue iommu page fault when previous iommu buffer * freed. Set ION_IOMMU_UNMAP_DELAYED flag at ion_map_iommu() to * add delay unmap iommu buffer to fix this problem. * Also ion_unmap_iommu() may take as long as 9 ms to free an ion buffer. * therefore mdp4_overlay_iommu_unmap_freelist(mixer) should be called * ater stage_commit() to ensure pipe_commit (up to stage_commit) * is completed within vsync period. */ /* free previous committed iommu back to pool */ mdp4_overlay_iommu_unmap_freelist(mixer); pipe = vp->plist; for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) { if (pipe->pipe_used) { /* free previous iommu to freelist * which will be freed at next * pipe_commit */ mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0); pipe->pipe_used = 0; /* clear */ } } pipe = vctrl->base_pipe; spin_lock_irqsave(&vctrl->spin_lock, flags); if (pipe->ov_blt_addr) { mdp4_lcdc_blt_ov_update(pipe); pipe->ov_cnt++; INIT_COMPLETION(vctrl->ov_comp); vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM); mb(); vctrl->ov_koff++; /* kickoff overlay engine */ mdp4_stat.kickoff_ov0++; outpdw(MDP_BASE + 0x0004, 0); } else { /* schedule second phase update at dmap */ INIT_COMPLETION(vctrl->dmap_comp); vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM); } spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_stat.overlay_commit[pipe->mixer_num]++; if (wait) { if (pipe->ov_blt_addr) mdp4_lcdc_wait4ov(0); else mdp4_lcdc_wait4dmap(0); } return cnt; }
int mdp4_lcdc_pipe_commit(int cndx, int wait) { int i, undx; int mixer = 0; struct vsycn_ctrl *vctrl; struct vsync_update *vp; struct mdp4_overlay_pipe *pipe; struct mdp4_overlay_pipe *real_pipe; unsigned long flags; int cnt = 0; vctrl = &vsync_ctrl_db[cndx]; mutex_lock(&vctrl->update_lock); undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; pipe = vctrl->base_pipe; mixer = pipe->mixer_num; mdp_update_pm(vctrl->mfd, vctrl->vsync_time); if (vp->update_cnt == 0) { mutex_unlock(&vctrl->update_lock); return 0; } vctrl->update_ndx++; vctrl->update_ndx &= 0x01; vp->update_cnt = 0; /* */ if (vctrl->blt_free) { vctrl->blt_free--; if (vctrl->blt_free == 0) mdp4_free_writeback_buf(vctrl->mfd, mixer); } mutex_unlock(&vctrl->update_lock); /* */ mdp4_overlay_iommu_unmap_freelist(mixer); spin_lock_irqsave(&vctrl->spin_lock, flags); if (vctrl->ov_koff != vctrl->ov_done) { spin_unlock_irqrestore(&vctrl->spin_lock, flags); pr_err("%s: Error, frame dropped %d %d\n", __func__, vctrl->ov_koff, vctrl->ov_done); return 0; } spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_overlay_mdp_perf_upd(vctrl->mfd, 1); if (vctrl->blt_change) { pipe = vctrl->base_pipe; spin_lock_irqsave(&vctrl->spin_lock, flags); INIT_COMPLETION(vctrl->dmap_comp); INIT_COMPLETION(vctrl->ov_comp); vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM); spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_lcdc_wait4dmap(0); if (pipe->ov_blt_addr) mdp4_lcdc_wait4ov(0); } pipe = vp->plist; for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) { if (pipe->pipe_used) { cnt++; real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx); if (real_pipe && real_pipe->pipe_used) { /* */ mdp4_overlay_vsync_commit(pipe); } /* */ mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0); pipe->pipe_used = 0; /* */ } } mdp4_mixer_stage_commit(mixer); /* */ mdp4_overlay_lcdc_start(); pipe = vctrl->base_pipe; spin_lock_irqsave(&vctrl->spin_lock, flags); if (pipe->ov_blt_addr) { mdp4_lcdc_blt_ov_update(pipe); pipe->ov_cnt++; INIT_COMPLETION(vctrl->ov_comp); vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM); mb(); vctrl->ov_koff++; /* */ mdp4_stat.kickoff_ov0++; outpdw(MDP_BASE + 0x0004, 0); } else { /* */ INIT_COMPLETION(vctrl->dmap_comp); vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM); } spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_stat.overlay_commit[pipe->mixer_num]++; if (wait) { if (pipe->ov_blt_addr) mdp4_lcdc_wait4ov(cndx); else mdp4_lcdc_wait4dmap(cndx); } return cnt; }