int mdp4_lcdc_off(struct platform_device *pdev) { int ret = 0; int cndx = 0; struct msm_fb_data_type *mfd; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; unsigned long flags; int need_wait = 0; mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; atomic_set(&vctrl->suspend, 1); atomic_set(&vctrl->vsync_resume, 0); msleep(20); /* >= 17 ms */ complete_all(&vctrl->vsync_comp); if (pipe->ov_blt_addr) { spin_lock_irqsave(&vctrl->spin_lock, flags); if (vctrl->ov_koff != vctrl->ov_done) need_wait = 1; spin_unlock_irqrestore(&vctrl->spin_lock, flags); if (need_wait) mdp4_lcdc_wait4ov(0); } MDP_OUTP(MDP_BASE + LCDC_BASE, 0); lcdc_enabled = 0; mdp_histogram_ctrl_all(FALSE); if (pipe) { /* sanity check, free pipes besides base layer */ mdp4_overlay_unset_mixer(pipe->mixer_num); if (mfd->ref_cnt == 0) { /* adb stop */ if (pipe->pipe_type == OVERLAY_TYPE_BF) mdp4_overlay_borderfill_stage_down(pipe); vctrl->base_pipe = NULL; } else { /* system suspending */ mdp4_mixer_stage_down(vctrl->base_pipe, 1); mdp4_overlay_iommu_pipe_free( vctrl->base_pipe->pipe_ndx, 1); } } /* MDP clock disable */ mdp_clk_ctrl(0); mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); return ret; }
void mdp4_lcdc_overlay(struct msm_fb_data_type *mfd) { struct fb_info *fbi = mfd->fbi; uint8 *buf; unsigned int buf_offset; int bpp; int cnt, cndx = 0; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (!pipe || !mfd->panel_power_on) return; pr_debug("%s: cpu=%d pid=%d\n", __func__, smp_processor_id(), current->pid); if (pipe->pipe_type == OVERLAY_TYPE_RGB) { bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; buf_offset = calc_fb_offset(mfd, fbi, bpp); if (mfd->map_buffer->iova[0]) { pipe->srcp0_addr = mfd->map_buffer->iova[0] + buf_offset; } else pipe->srcp0_addr = (uint32)(buf + buf_offset); mdp4_lcdc_pipe_queue(0, pipe); } mdp4_overlay_mdp_perf_upd(mfd, 1); mutex_lock(&mfd->dma->ov_mutex); cnt = mdp4_lcdc_pipe_commit(cndx, 0); mutex_unlock(&mfd->dma->ov_mutex); #if defined (CONFIG_EUR_MODEL_GT_I9210) if (cnt >= 0) #else if (cnt) #endif { if (pipe->ov_blt_addr) mdp4_lcdc_wait4ov(cndx); else mdp4_lcdc_wait4dmap(cndx); } mdp4_mixer_late_commit(); mdp4_overlay_mdp_perf_upd(mfd, 0); }
void mdp4_lcdc_overlay(struct msm_fb_data_type *mfd) { struct fb_info *fbi = mfd->fbi; uint8 *buf; unsigned int buf_offset; int bpp; int cnt, cndx = 0; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; unsigned long flags; vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (!pipe || !mfd->panel_power_on) return; pr_debug("%s: cpu=%d pid=%d\n", __func__, smp_processor_id(), current->pid); if (pipe->pipe_type == OVERLAY_TYPE_RGB) { bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; buf_offset = calc_fb_offset(mfd, fbi, bpp); if (mfd->display_iova) pipe->srcp0_addr = mfd->display_iova + buf_offset; else pipe->srcp0_addr = (uint32)(buf + buf_offset); mdp4_lcdc_pipe_queue(0, pipe); } spin_lock_irqsave(&vctrl->spin_lock, flags); vctrl->expire_tick = VSYNC_EXPIRE_TICK; spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_overlay_mdp_perf_upd(mfd, 1); cnt = 0; mutex_lock(&mfd->dma->ov_mutex); cnt = mdp4_lcdc_pipe_commit(); mutex_unlock(&mfd->dma->ov_mutex); if (cnt) { if (pipe->ov_blt_addr) mdp4_lcdc_wait4ov(0); else mdp4_lcdc_wait4dmap(0); } mdp4_overlay_mdp_perf_upd(mfd, 0); }
void mdp4_lcdc_overlay(struct msm_fb_data_type *mfd) { struct fb_info *fbi = mfd->fbi; uint8 *buf; unsigned int buf_offset; int bpp; int cnt, cndx = 0; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; mutex_lock(&mfd->dma->ov_mutex); vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (!pipe || mdp_fb_is_power_off(mfd)) { mutex_unlock(&mfd->dma->ov_mutex); return; } pr_debug("%s: cpu=%d pid=%d\n", __func__, smp_processor_id(), current->pid); if (pipe->pipe_type == OVERLAY_TYPE_RGB) { bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; buf_offset = calc_fb_offset(mfd, fbi, bpp); if (mfd->display_iova) pipe->srcp0_addr = mfd->display_iova + buf_offset; else pipe->srcp0_addr = (uint32)(buf + buf_offset); mdp4_lcdc_pipe_queue(0, pipe); } mdp4_overlay_mdp_perf_upd(mfd, 1); cnt = mdp4_lcdc_pipe_commit(cndx, 0); if (cnt >= 0) { if (pipe->ov_blt_addr) mdp4_lcdc_wait4ov(cndx); else mdp4_lcdc_wait4dmap(cndx); } mdp4_overlay_mdp_perf_upd(mfd, 0); mutex_unlock(&mfd->dma->ov_mutex); }
void mdp4_lcdc_overlay(struct msm_fb_data_type *mfd) { struct fb_info *fbi = mfd->fbi; uint8 *buf; unsigned int buf_offset; int bpp; int cndx = 0; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (!pipe || !mfd->panel_power_on) return; pr_debug("%s: cpu=%d pid=%d\n", __func__, smp_processor_id(), current->pid); if (pipe->pipe_type == OVERLAY_TYPE_RGB) { bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; buf_offset = calc_fb_offset(mfd, fbi, bpp); if (mfd->map_buffer->iova[0]) { pipe->srcp0_addr = mfd->map_buffer->iova[0] + buf_offset; } else pipe->srcp0_addr = (uint32)(buf + buf_offset); mdp4_lcdc_pipe_queue(0, pipe); } mdp4_lcdc_pipe_commit(); if (pipe->ov_blt_addr) mdp4_lcdc_wait4ov(0); else mdp4_lcdc_wait4dmap(0); }
int mdp4_lcdc_pipe_commit(void) { int i, undx; int mixer = 0; struct vsycn_ctrl *vctrl; struct vsync_update *vp; struct mdp4_overlay_pipe *pipe; struct mdp4_overlay_pipe *real_pipe; unsigned long flags; int cnt = 0; vctrl = &vsync_ctrl_db[0]; mutex_lock(&vctrl->update_lock); undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; pipe = vctrl->base_pipe; mixer = pipe->mixer_num; if (vp->update_cnt == 0) { mutex_unlock(&vctrl->update_lock); return 0; } vctrl->update_ndx++; vctrl->update_ndx &= 0x01; vp->update_cnt = 0; /* reset */ if (vctrl->blt_free) { vctrl->blt_free--; if (vctrl->blt_free == 0) mdp4_free_writeback_buf(vctrl->mfd, mixer); } mutex_unlock(&vctrl->update_lock); /* free previous committed iommu back to pool */ mdp4_overlay_iommu_unmap_freelist(mixer); spin_lock_irqsave(&vctrl->spin_lock, flags); if (vctrl->ov_koff != vctrl->ov_done) { spin_unlock_irqrestore(&vctrl->spin_lock, flags); pr_err("%s: Error, frame dropped %d %d\n", __func__, vctrl->ov_koff, vctrl->ov_done); return 0; } spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_overlay_mdp_perf_upd(vctrl->mfd, 1); if (vctrl->blt_change) { pipe = vctrl->base_pipe; spin_lock_irqsave(&vctrl->spin_lock, flags); INIT_COMPLETION(vctrl->dmap_comp); INIT_COMPLETION(vctrl->ov_comp); vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM); spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_lcdc_wait4dmap(0); if (pipe->ov_blt_addr) mdp4_lcdc_wait4ov(0); } pipe = vp->plist; for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) { if (pipe->pipe_used) { cnt++; real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx); if (real_pipe && real_pipe->pipe_used) { /* pipe not unset */ mdp4_overlay_vsync_commit(pipe); } /* free previous iommu to freelist * which will be freed at next * pipe_commit */ mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0); pipe->pipe_used = 0; /* clear */ } } mdp4_mixer_stage_commit(mixer); /* start timing generator & mmu if they are not started yet */ mdp4_overlay_lcdc_start(); pipe = vctrl->base_pipe; spin_lock_irqsave(&vctrl->spin_lock, flags); if (pipe->ov_blt_addr) { mdp4_lcdc_blt_ov_update(pipe); pipe->ov_cnt++; INIT_COMPLETION(vctrl->ov_comp); vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM); mb(); vctrl->ov_koff++; /* kickoff overlay engine */ mdp4_stat.kickoff_ov0++; outpdw(MDP_BASE + 0x0004, 0); } else { /* schedule second phase update at dmap */ INIT_COMPLETION(vctrl->dmap_comp); vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM); } spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_stat.overlay_commit[pipe->mixer_num]++; return cnt; }
int mdp4_lcdc_off(struct platform_device *pdev) { int ret = 0; int cndx = 0; struct msm_fb_data_type *mfd; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; unsigned long flags; int need_wait = 0; pr_err("[QC_DEBUG] %s Entry\n", __func__); mdp4_mixer_late_commit(); mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); #if defined (CONFIG_EUR_MODEL_GT_I9210) mutex_lock(&mfd->dma->ov_mutex); #endif vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; #if defined (CONFIG_EUR_MODEL_GT_I9210) pr_err("[QC_DEBUG] vctrl->wait_vsync_cnt: %d\n", vctrl->wait_vsync_cnt); mdp4_lcdc_wait4vsync(cndx); atomic_set(&vctrl->vsync_resume, 0); #else atomic_set(&vctrl->suspend, 1); atomic_set(&vctrl->vsync_resume, 0); msleep(20); /* >= 17 ms */ #endif complete_all(&vctrl->vsync_comp); if (pipe->ov_blt_addr) { spin_lock_irqsave(&vctrl->spin_lock, flags); if (vctrl->ov_koff != vctrl->ov_done) need_wait = 1; spin_unlock_irqrestore(&vctrl->spin_lock, flags); if (need_wait) mdp4_lcdc_wait4ov(0); } #if !defined (CONFIG_EUR_MODEL_GT_I9210) MDP_OUTP(MDP_BASE + LCDC_BASE, 0); #endif lcdc_enabled = 0; mdp_histogram_ctrl_all(FALSE); if (pipe) { /* sanity check, free pipes besides base layer */ mdp4_overlay_unset_mixer(pipe->mixer_num); if (mfd->ref_cnt == 0) { /* adb stop */ if (pipe->pipe_type == OVERLAY_TYPE_BF) mdp4_overlay_borderfill_stage_down(pipe); vctrl->base_pipe = NULL; } else { /* system suspending */ mdp4_mixer_stage_down(vctrl->base_pipe, 1); mdp4_overlay_iommu_pipe_free( vctrl->base_pipe->pipe_ndx, 1); } } #if defined (CONFIG_EUR_MODEL_GT_I9210) mdp4_lcdc_tg_off(vctrl); atomic_set(&vctrl->suspend, 1); #endif /* MDP clock disable */ mdp_clk_ctrl(0); mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); #if defined (CONFIG_EUR_MODEL_GT_I9210) mutex_unlock(&mfd->dma->ov_mutex); pr_err("[QC_DEBUG] %s Exit\n", __func__); #endif return ret; }
int mdp4_lcdc_off(struct platform_device *pdev) { int ret = 0; int cndx = 0; struct msm_fb_data_type *mfd; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; struct vsync_update *vp; unsigned long flags; int undx, need_wait = 0; int mixer = 0; mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); mutex_lock(&mfd->dma->ov_mutex); vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; mdp4_lcdc_wait4vsync(cndx); wake_up_interruptible_all(&vctrl->wait_queue); if (pipe->ov_blt_addr) { spin_lock_irqsave(&vctrl->spin_lock, flags); if (vctrl->ov_koff != vctrl->ov_done) need_wait = 1; spin_unlock_irqrestore(&vctrl->spin_lock, flags); if (need_wait) mdp4_lcdc_wait4ov(0); } mdp_histogram_ctrl_all(FALSE); lcdc_enabled = 0; undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; if (vp->update_cnt) { /* * pipe's iommu will be freed at next overlay play * and iommu_drop statistic will be increased by one */ pr_warn("%s: update_cnt=%d\n", __func__, vp->update_cnt); mdp4_lcdc_pipe_clean(vp); } if (pipe) { /* sanity check, free pipes besides base layer */ mixer = pipe->mixer_num; mdp4_overlay_unset_mixer(mixer); if (mfd->ref_cnt == 0) { /* adb stop */ if (pipe->pipe_type == OVERLAY_TYPE_BF) mdp4_overlay_borderfill_stage_down(pipe); /* base pipe may change after borderfill_stage_down */ pipe = vctrl->base_pipe; mdp4_mixer_stage_down(pipe, 1); mdp4_overlay_pipe_free(pipe, 1); vctrl->base_pipe = NULL; } else { /* system suspending */ mdp4_mixer_stage_down(vctrl->base_pipe, 1); mdp4_overlay_iommu_pipe_free( vctrl->base_pipe->pipe_ndx, 1); } } mdp4_lcdc_tg_off(vctrl); atomic_set(&vctrl->suspend, 1); /* * clean up ion freelist * there need two stage to empty ion free list * therefore need call unmap freelist twice */ mdp4_overlay_iommu_unmap_freelist(mixer); mdp4_overlay_iommu_unmap_freelist(mixer); /* MDP clock disable */ mdp_clk_ctrl(0); mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); mutex_unlock(&mfd->dma->ov_mutex); return ret; }
int mdp4_lcdc_pipe_commit(int cndx, int wait) { int i, undx; int mixer = 0; struct vsycn_ctrl *vctrl; struct vsync_update *vp; struct mdp4_overlay_pipe *pipe; struct mdp4_overlay_pipe *real_pipe; unsigned long flags; int cnt = 0; vctrl = &vsync_ctrl_db[cndx]; mutex_lock(&vctrl->update_lock); undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; pipe = vctrl->base_pipe; if (pipe == NULL) { pr_err("%s: NO base pipe\n", __func__); mutex_unlock(&vctrl->update_lock); return 0; } mixer = pipe->mixer_num; mdp_update_pm(vctrl->mfd, vctrl->vsync_time); /* * allow stage_commit without pipes queued * (vp->update_cnt == 0) to unstage pipes after * overlay_unset */ vctrl->update_ndx++; vctrl->update_ndx &= 0x01; vp->update_cnt = 0; /* reset */ if (vctrl->blt_free) { vctrl->blt_free--; if (vctrl->blt_free == 0) mdp4_free_writeback_buf(vctrl->mfd, mixer); } mutex_unlock(&vctrl->update_lock); spin_lock_irqsave(&vctrl->spin_lock, flags); if (vctrl->ov_koff != vctrl->ov_done) { spin_unlock_irqrestore(&vctrl->spin_lock, flags); pr_err("%s: Error, frame dropped %d %d\n", __func__, vctrl->ov_koff, vctrl->ov_done); return 0; } spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_overlay_mdp_perf_upd(vctrl->mfd, 1); if (vctrl->blt_change) { pipe = vctrl->base_pipe; spin_lock_irqsave(&vctrl->spin_lock, flags); INIT_COMPLETION(vctrl->dmap_comp); INIT_COMPLETION(vctrl->ov_comp); vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM); spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_lcdc_wait4dmap(0); if (pipe->ov_blt_addr) mdp4_lcdc_wait4ov(0); } pipe = vp->plist; for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) { if (pipe->pipe_used) { cnt++; real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx); if (real_pipe && real_pipe->pipe_used) { /* pipe not unset */ mdp4_overlay_vsync_commit(pipe); if (pipe->frame_format != MDP4_FRAME_FORMAT_LINEAR) { spin_lock_irqsave(&vctrl->spin_lock, flags); INIT_COMPLETION(vctrl->dmap_comp); vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM); spin_unlock_irqrestore(&vctrl->spin_lock, flags); } } } } mdp4_mixer_stage_commit(mixer); /* start timing generator & mmu if they are not started yet */ mdp4_overlay_lcdc_start(); /* * there has possibility that pipe commit come very close to next vsync * this may cause two consecutive pie_commits happen within same vsync * period which casue iommu page fault when previous iommu buffer * freed. Set ION_IOMMU_UNMAP_DELAYED flag at ion_map_iommu() to * add delay unmap iommu buffer to fix this problem. * Also ion_unmap_iommu() may take as long as 9 ms to free an ion buffer. * therefore mdp4_overlay_iommu_unmap_freelist(mixer) should be called * ater stage_commit() to ensure pipe_commit (up to stage_commit) * is completed within vsync period. */ /* free previous committed iommu back to pool */ mdp4_overlay_iommu_unmap_freelist(mixer); pipe = vp->plist; for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) { if (pipe->pipe_used) { /* free previous iommu to freelist * which will be freed at next * pipe_commit */ mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0); pipe->pipe_used = 0; /* clear */ } } pipe = vctrl->base_pipe; spin_lock_irqsave(&vctrl->spin_lock, flags); if (pipe->ov_blt_addr) { mdp4_lcdc_blt_ov_update(pipe); pipe->ov_cnt++; INIT_COMPLETION(vctrl->ov_comp); vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM); mb(); vctrl->ov_koff++; /* kickoff overlay engine */ mdp4_stat.kickoff_ov0++; outpdw(MDP_BASE + 0x0004, 0); } else { /* schedule second phase update at dmap */ INIT_COMPLETION(vctrl->dmap_comp); vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM); } spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_stat.overlay_commit[pipe->mixer_num]++; if (wait) { if (pipe->ov_blt_addr) mdp4_lcdc_wait4ov(0); else mdp4_lcdc_wait4dmap(0); } return cnt; }
int mdp4_lcdc_off(struct platform_device *pdev) { int ret = 0; int cndx = 0; struct msm_fb_data_type *mfd; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; struct vsync_update *vp; unsigned long flags; int undx, need_wait = 0; mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; atomic_set(&vctrl->suspend, 1); atomic_set(&vctrl->vsync_resume, 0); msleep(20); /* >= 17 ms */ complete_all(&vctrl->vsync_comp); if (pipe->ov_blt_addr) { spin_lock_irqsave(&vctrl->spin_lock, flags); if (vctrl->ov_koff != vctrl->ov_done) need_wait = 1; spin_unlock_irqrestore(&vctrl->spin_lock, flags); if (need_wait) mdp4_lcdc_wait4ov(0); } mdp_histogram_ctrl_all(FALSE); MDP_OUTP(MDP_BASE + LCDC_BASE, 0); lcdc_enabled = 0; if (vctrl->vsync_irq_enabled) { vctrl->vsync_irq_enabled = 0; vsync_irq_disable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM); } undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; if (vp->update_cnt) { /* * pipe's iommu will be freed at next overlay play * and iommu_drop statistic will be increased by one */ vp->update_cnt = 0; /* empty queue */ } if (pipe) { /* sanity check, free pipes besides base layer */ mdp4_overlay_unset_mixer(pipe->mixer_num); if (mfd->ref_cnt == 0) { /* adb stop */ if (pipe->pipe_type == OVERLAY_TYPE_BF) mdp4_overlay_borderfill_stage_down(pipe); /* base pipe may change after borderfill_stage_down */ pipe = vctrl->base_pipe; mdp4_mixer_stage_down(pipe, 1); mdp4_overlay_pipe_free(pipe); vctrl->base_pipe = NULL; } else { /* system suspending */ mdp4_mixer_stage_down(vctrl->base_pipe, 1); mdp4_overlay_iommu_pipe_free( vctrl->base_pipe->pipe_ndx, 1); } } /* MDP clock disable */ mdp_clk_ctrl(0); mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); return ret; }
int mdp4_lcdc_pipe_commit(int cndx, int wait) { int i, undx; int mixer = 0; struct vsycn_ctrl *vctrl; struct vsync_update *vp; struct mdp4_overlay_pipe *pipe; struct mdp4_overlay_pipe *real_pipe; unsigned long flags; int cnt = 0; vctrl = &vsync_ctrl_db[cndx]; mutex_lock(&vctrl->update_lock); undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; pipe = vctrl->base_pipe; mixer = pipe->mixer_num; mdp_update_pm(vctrl->mfd, vctrl->vsync_time); if (vp->update_cnt == 0) { mutex_unlock(&vctrl->update_lock); return 0; } vctrl->update_ndx++; vctrl->update_ndx &= 0x01; vp->update_cnt = 0; /* */ if (vctrl->blt_free) { vctrl->blt_free--; if (vctrl->blt_free == 0) mdp4_free_writeback_buf(vctrl->mfd, mixer); } mutex_unlock(&vctrl->update_lock); /* */ mdp4_overlay_iommu_unmap_freelist(mixer); spin_lock_irqsave(&vctrl->spin_lock, flags); if (vctrl->ov_koff != vctrl->ov_done) { spin_unlock_irqrestore(&vctrl->spin_lock, flags); pr_err("%s: Error, frame dropped %d %d\n", __func__, vctrl->ov_koff, vctrl->ov_done); return 0; } spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_overlay_mdp_perf_upd(vctrl->mfd, 1); if (vctrl->blt_change) { pipe = vctrl->base_pipe; spin_lock_irqsave(&vctrl->spin_lock, flags); INIT_COMPLETION(vctrl->dmap_comp); INIT_COMPLETION(vctrl->ov_comp); vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM); spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_lcdc_wait4dmap(0); if (pipe->ov_blt_addr) mdp4_lcdc_wait4ov(0); } pipe = vp->plist; for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) { if (pipe->pipe_used) { cnt++; real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx); if (real_pipe && real_pipe->pipe_used) { /* */ mdp4_overlay_vsync_commit(pipe); } /* */ mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0); pipe->pipe_used = 0; /* */ } } mdp4_mixer_stage_commit(mixer); /* */ mdp4_overlay_lcdc_start(); pipe = vctrl->base_pipe; spin_lock_irqsave(&vctrl->spin_lock, flags); if (pipe->ov_blt_addr) { mdp4_lcdc_blt_ov_update(pipe); pipe->ov_cnt++; INIT_COMPLETION(vctrl->ov_comp); vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM); mb(); vctrl->ov_koff++; /* */ mdp4_stat.kickoff_ov0++; outpdw(MDP_BASE + 0x0004, 0); } else { /* */ INIT_COMPLETION(vctrl->dmap_comp); vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM); } spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_stat.overlay_commit[pipe->mixer_num]++; if (wait) { if (pipe->ov_blt_addr) mdp4_lcdc_wait4ov(cndx); else mdp4_lcdc_wait4dmap(cndx); } return cnt; }