static void clk_ctrl_work(struct work_struct *work) { struct vsycn_ctrl *vctrl = container_of(work, typeof(*vctrl), clk_work); unsigned long flags; mutex_lock(&vctrl->update_lock); if (vctrl->clk_control && vctrl->clk_enabled) { pr_debug("%s: SET_CLK_OFF\n", __func__); mdp_clk_ctrl(0); spin_lock_irqsave(&vctrl->spin_lock, flags); vsync_irq_disable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM); vctrl->clk_enabled = 0; vctrl->clk_control = 0; spin_unlock_irqrestore(&vctrl->spin_lock, flags); } mutex_unlock(&vctrl->update_lock); }
static void mdp4_dsi_video_wait4dmap_done(int cndx) { unsigned long flags; struct vsycn_ctrl *vctrl; if (cndx >= MAX_CONTROLLER) { pr_err("%s: out or range: cndx=%d\n", __func__, cndx); return; } vctrl = &vsync_ctrl_db[cndx]; spin_lock_irqsave(&vctrl->spin_lock, flags); INIT_COMPLETION(vctrl->dmap_comp); vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM); spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_dsi_video_wait4dmap(cndx); vsync_irq_disable(INTR_DMA_P_DONE, MDP_DMAP_TERM); }
/* * mdp4_dma_p_done_lcdc: called from isr */ void mdp4_dmap_done_lcdc(int cndx) { struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; if (cndx >= MAX_CONTROLLER) { pr_err("%s: out or range: cndx=%d\n", __func__, cndx); return; } vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; spin_lock(&vctrl->spin_lock); vsync_irq_disable(INTR_DMA_P_DONE, MDP_DMAP_TERM); if (pipe == NULL) { spin_unlock(&vctrl->spin_lock); return; } if (vctrl->blt_change) { mdp4_overlayproc_cfg(pipe); mdp4_overlay_dmap_xy(pipe); if (pipe->ov_blt_addr) { mdp4_lcdc_blt_ov_update(pipe); pipe->ov_cnt++; /* Prefill one frame */ vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM); /* kickoff overlay0 engine */ mdp4_stat.kickoff_ov0++; vctrl->ov_koff++; /* make up for prefill */ outpdw(MDP_BASE + 0x0004, 0); } vctrl->blt_change = 0; } complete_all(&vctrl->dmap_comp); if (mdp_rev <= MDP_REV_41) mdp4_mixer_blend_cfg(MDP4_MIXER0); mdp4_overlay_dma_commit(cndx); spin_unlock(&vctrl->spin_lock); }
void mdp4_dsi_video_vsync_ctrl(struct fb_info *info, int enable) { struct vsycn_ctrl *vctrl; int cndx = 0; vctrl = &vsync_ctrl_db[cndx]; if (vctrl->vsync_irq_enabled == enable) return; pr_debug("%s: vsync enable=%d\n", __func__, enable); vctrl->vsync_irq_enabled = enable; if (enable) vsync_irq_enable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM); else vsync_irq_disable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM); }
void mdp4_dmap_done_mddi(int cndx) { struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; int diff; vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; /* blt enabled */ spin_lock(&vctrl->spin_lock); vsync_irq_disable(INTR_DMA_P_DONE, MDP_DMAP_TERM); vctrl->dmap_done++; if (vctrl->pan_display) vctrl->pan_display--; diff = vctrl->ov_done - vctrl->dmap_done; pr_debug("%s: ov_koff=%d ov_done=%d dmap_koff=%d dmap_done=%d cpu=%d\n", __func__, vctrl->ov_koff, vctrl->ov_done, vctrl->dmap_koff, vctrl->dmap_done, smp_processor_id()); complete(&vctrl->dmap_comp); if (mdp_rev <= MDP_REV_41) mdp4_mixer_blend_cfg(MDP4_MIXER0); if (diff <= 0) { if (vctrl->blt_wait) vctrl->blt_wait = 0; spin_unlock(&vctrl->spin_lock); return; } /* kick dmap */ mdp4_mddi_blt_dmap_update(pipe); pipe->dmap_cnt++; mdp4_stat.kickoff_dmap++; vctrl->dmap_koff++; INIT_COMPLETION(vctrl->dmap_comp); vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM); outpdw(MDP_BASE + 0x000c, 0); /* kickoff dmap engine */ mb(); /* make sure kickoff executed */ spin_unlock(&vctrl->spin_lock); }
int mdp4_dsi_cmd_off(struct platform_device *pdev) { int ret = 0; int cndx = 0; struct msm_fb_data_type *mfd; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; pr_info("%s+:\n", __func__); mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (pipe == NULL) { pr_err("%s: NO base pipe\n", __func__); return ret; } atomic_set(&vctrl->suspend, 1); mipi_dsi_cmd_backlight_tx(150); mdp4_mixer_stage_down(pipe); mdp4_overlay_pipe_free(pipe); vctrl->base_pipe = NULL; vctrl->fake_vsync = 1; vsync_irq_disable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM); pr_info("%s-:\n", __func__); /* * footswitch off * this will casue all mdp register * to be reset to default * after footswitch on later */ return ret; }
/* * mdp4_dmae_done_dtv: called from isr */ void mdp4_dmae_done_dtv(void) { int cndx; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; cndx = 0; if (cndx >= MAX_CONTROLLER) { pr_err("%s: out or range: cndx=%d\n", __func__, cndx); return; } vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; pr_debug("%s: cpu=%d\n", __func__, smp_processor_id()); spin_lock(&vctrl->spin_lock); if (vctrl->blt_change) { if (pipe->ov_blt_addr) { mdp4_overlayproc_cfg(pipe); mdp4_overlay_dmae_xy(pipe); mdp4_dtv_blt_ov_update(pipe); pipe->blt_ov_done++; /* Prefill one frame */ vsync_irq_enable(INTR_OVERLAY1_DONE, MDP_OVERLAY1_TERM); /* kickoff overlay1 engine */ mdp4_stat.kickoff_ov1++; outpdw(MDP_BASE + 0x0008, 0); } vctrl->blt_change = 0; } if (mdp_rev <= MDP_REV_41) mdp4_mixer_blend_cfg(MDP4_MIXER1); complete_all(&vctrl->dmae_comp); mdp4_overlay_dma_commit(MDP4_MIXER1); vsync_irq_disable(INTR_DMA_E_DONE, MDP_DMA_E_TERM); spin_unlock(&vctrl->spin_lock); }
/* * mdp4_overlay1_done_dtv: called from isr */ void mdp4_overlay1_done_dtv(void) { struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; int cndx = 0; vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; spin_lock(&vctrl->spin_lock); if (pipe->ov_blt_addr == 0) { spin_unlock(&vctrl->spin_lock); return; } mdp4_dtv_blt_dmae_update(pipe); pipe->blt_dmap_done++; vsync_irq_disable(INTR_OVERLAY1_DONE, MDP_OVERLAY1_TERM); spin_unlock(&vctrl->spin_lock); }
void mdp4_dmap_done_dsi_cmd(int cndx) { struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; int diff; vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (pipe == NULL) return; /* blt enabled */ spin_lock(&vctrl->spin_lock); vsync_irq_disable(INTR_DMA_P_DONE, MDP_DMAP_TERM); vctrl->dmap_done++; if (vctrl->pan_display) vctrl->pan_display--; diff = vctrl->ov_done - vctrl->dmap_done; pr_debug("%s: ov_koff=%d ov_done=%d dmap_koff=%d dmap_done=%d cpu=%d\n", __func__, vctrl->ov_koff, vctrl->ov_done, vctrl->dmap_koff, vctrl->dmap_done, smp_processor_id()); complete(&vctrl->dmap_comp); if (diff <= 0) { if (vctrl->blt_wait) vctrl->blt_wait = 0; spin_unlock(&vctrl->spin_lock); return; } /* kick dmap */ mdp4_dsi_cmd_blt_dmap_update(pipe); pipe->dmap_cnt++; mdp4_stat.kickoff_dmap++; vctrl->dmap_koff++; vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM); outpdw(MDP_BASE + 0x000c, 0); /* kickoff dmap engine */ mb(); spin_unlock(&vctrl->spin_lock); }
/* * mdp4_overlay0_done_dsi: called from isr */ void mdp4_overlay0_done_dsi_video(int cndx) { struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; spin_lock(&vctrl->spin_lock); vsync_irq_disable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM); vctrl->ov_done++; complete_all(&vctrl->ov_comp); if (pipe->ov_blt_addr == 0) { spin_unlock(&vctrl->spin_lock); return; } mdp4_dsi_video_blt_dmap_update(pipe); pipe->dmap_cnt++; spin_unlock(&vctrl->spin_lock); }
void mdp4_overlay0_done_dsi_cmd(int cndx) { struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; int diff; vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; spin_lock(&vctrl->spin_lock); vsync_irq_disable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM); vctrl->ov_done++; complete_all(&vctrl->ov_comp); diff = vctrl->ov_done - vctrl->dmap_done; pr_debug("%s: ov_koff=%d ov_done=%d dmap_koff=%d dmap_done=%d cpu=%d\n", __func__, vctrl->ov_koff, vctrl->ov_done, vctrl->dmap_koff, vctrl->dmap_done, smp_processor_id()); if (pipe->ov_blt_addr == 0) { spin_unlock(&vctrl->spin_lock); return; } if (diff > 1) { vctrl->blt_wait = 1; pr_debug("%s: blt_wait set\n", __func__); spin_unlock(&vctrl->spin_lock); return; } mdp4_dsi_cmd_blt_dmap_update(pipe); pipe->dmap_cnt++; mdp4_stat.kickoff_dmap++; vctrl->dmap_koff++; vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM); outpdw(MDP_BASE + 0x000c, 0); mb(); spin_unlock(&vctrl->spin_lock); }
static void mdp4_video_vsync_irq_ctrl(int cndx, int enable) { struct vsycn_ctrl *vctrl = NULL; int intr = 0; int term = 0; vctrl = &vsync_ctrl_db[cndx]; mutex_lock(&vctrl->update_lock); if (vctrl->base_pipe->mixer_num == MDP4_MIXER0) { intr = INTR_PRIMARY_VSYNC; term = MDP_PRIM_VSYNC_TERM; } else if (vctrl->base_pipe->mixer_num == MDP4_MIXER1) { intr = INTR_EXTERNAL_VSYNC; term = MDP_EXTER_VSYNC_TERM; } else if (vctrl->base_pipe->mixer_num == MDP4_MIXER_NONE) { intr = INTR_SECONDARY_VSYNC; term = MDP_SEC_VSYNC_TERM; } else pr_err("%s: wrong mixer_number=%d\n", __func__, vctrl->base_pipe->mixer_num); if (intr != 0) { if (enable) { if (vsync_irq_cnt == 0) vsync_irq_enable(intr, term); vsync_irq_cnt++; } else { if (vsync_irq_cnt) { vsync_irq_cnt--; if (vsync_irq_cnt == 0) vsync_irq_disable(intr, term); } } } pr_debug("%s: enable=%d cnt=%d intr=0x%x\n", __func__, enable, vsync_irq_cnt, intr); mutex_unlock(&vctrl->update_lock); }
void mdp4_lcdc_vsync_ctrl(struct fb_info *info, int enable) { struct vsycn_ctrl *vctrl; int cndx = 0; vctrl = &vsync_ctrl_db[cndx]; if (vctrl->vsync_irq_enabled == enable) return; pr_debug("%s: vsync enable=%d\n", __func__, enable); vctrl->vsync_irq_enabled = enable; if (enable) vsync_irq_enable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM); else vsync_irq_disable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM); if (vctrl->vsync_irq_enabled && atomic_read(&vctrl->suspend) == 0) atomic_set(&vctrl->vsync_resume, 1); }
static void mdp4_video_vsync_irq_ctrl(int cndx, int enable) { struct vsycn_ctrl *vctrl; vctrl = &vsync_ctrl_db[cndx]; mutex_lock(&vctrl->update_lock); if (enable) { if (vsync_irq_cnt == 0) vsync_irq_enable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM); vsync_irq_cnt++; } else { if (vsync_irq_cnt) { vsync_irq_cnt--; if (vsync_irq_cnt == 0) vsync_irq_disable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM); } } pr_debug("%s: enable=%d cnt=%d\n", __func__, enable, vsync_irq_cnt); mutex_unlock(&vctrl->update_lock); }
static void clk_ctrl_work(struct work_struct *work) { unsigned long flags; struct vsycn_ctrl *vctrl = container_of(work, typeof(*vctrl), clk_work); mutex_lock(&vctrl->update_lock); spin_lock_irqsave(&vctrl->spin_lock, flags); if (vctrl->clk_control && vctrl->clk_enabled) { vsync_irq_disable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM); vctrl->clk_enabled = 0; vctrl->clk_control = 0; spin_unlock_irqrestore(&vctrl->spin_lock, flags); /* make sure dsi link is idle */ mipi_dsi_mdp_busy_wait(); mipi_dsi_clk_cfg(0); mdp_clk_ctrl(0); pr_debug("%s: SET_CLK_OFF, pid=%d\n", __func__, current->pid); } else { spin_unlock_irqrestore(&vctrl->spin_lock, flags); } mutex_unlock(&vctrl->update_lock); }
void mdp4_dmap_done_dsi_cmd(int cndx) { struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; int diff; vsync_irq_disable(INTR_DMA_P_DONE, MDP_DMAP_TERM); vctrl = &vsync_ctrl_db[cndx]; vctrl->dmap_intr_tot++; pipe = vctrl->base_pipe; if (pipe->ov_blt_addr == 0) { mdp4_overlay_dma_commit(cndx); return; } /* blt enabled */ spin_lock(&vctrl->spin_lock); pipe->blt_dmap_done++; diff = pipe->blt_ov_done - pipe->blt_dmap_done; spin_unlock(&vctrl->spin_lock); pr_debug("%s: ov_done=%d dmap_done=%d ov_koff=%d dmap_koff=%d\n", __func__, pipe->blt_ov_done, pipe->blt_dmap_done, pipe->blt_ov_koff, pipe->blt_dmap_koff); if (diff <= 0) { if (pipe->blt_end) { pipe->blt_end = 0; pipe->ov_blt_addr = 0; pipe->dma_blt_addr = 0; pipe->blt_changed = 1; pr_info("%s: BLT-END\n", __func__); } } spin_unlock(&dsi_clk_lock); }
/* * mdp4_overlay0_done_lcdc: called from isr */ void mdp4_overlay0_done_lcdc(int cndx) { struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; spin_lock(&vctrl->spin_lock); vsync_irq_disable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM); vctrl->ov_done++; complete_all(&vctrl->ov_comp); if (pipe->ov_blt_addr == 0) { spin_unlock(&vctrl->spin_lock); return; } if (mdp_rev <= MDP_REV_41) mdp4_mixer_blend_cfg(MDP4_MIXER0); mdp4_lcdc_blt_dmap_update(pipe); pipe->dmap_cnt++; spin_unlock(&vctrl->spin_lock); }
void mdp4_dmap_done_dsi_video(int cndx) { struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; if (cndx >= MAX_CONTROLLER) { pr_err("%s: out or range: cndx=%d\n", __func__, cndx); return; } vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; spin_lock(&vctrl->spin_lock); vsync_irq_disable(INTR_DMA_P_DONE, MDP_DMAP_TERM); if (vctrl->blt_change && mdp_ov0_blt_ctl == MDP4_BLT_SWITCH_TG_ON_ISR) { mdp4_overlayproc_cfg(pipe); mdp4_overlay_dmap_xy(pipe); if (pipe->ov_blt_addr) { mdp4_dsi_video_blt_ov_update(pipe); pipe->ov_cnt++; vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM); mdp4_stat.kickoff_ov0++; vctrl->ov_koff++; outpdw(MDP_BASE + 0x0004, 0); } vctrl->blt_change = 0; } complete_all(&vctrl->dmap_comp); mdp4_overlay_dma_commit(cndx); spin_unlock(&vctrl->spin_lock); }
void mdp4_lcdc_vsync_ctrl(struct fb_info *info, int enable) { struct vsycn_ctrl *vctrl; int cndx = 0; vctrl = &vsync_ctrl_db[cndx]; if (vctrl->fake_vsync) { vctrl->fake_vsync = 0; schedule_work(&vctrl->vsync_work); } if (vctrl->vsync_irq_enabled == enable) return; pr_debug("%s: vsync enable=%d\n", __func__, enable); vctrl->vsync_irq_enabled = enable; if (enable) vsync_irq_enable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM); else vsync_irq_disable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM); }
static void clk_ctrl_work(struct work_struct *work) { struct vsycn_ctrl *vctrl = container_of(to_delayed_work(work), typeof(*vctrl), clk_work); unsigned long flags; mutex_lock(&vctrl->update_lock); if (vctrl->clk_control && vctrl->clk_enabled) { spin_lock_irqsave(&mdp_spin_lock, flags); if (mdp_irq_enabled && (mdp_irq_mask & MDP_DMAP_TERM)) { pr_err("AVOID CLOCK DISABLE DURING DMA TRANSFER!!!\n"); spin_unlock_irqrestore(&mdp_spin_lock, flags); mutex_unlock(&vctrl->update_lock); schedule_delayed_work(&vctrl->clk_work, (HZ/100)); return; } spin_unlock_irqrestore(&mdp_spin_lock, flags); spin_lock_irqsave(&vctrl->spin_lock, flags); if (vctrl->expire_tick || vctrl->vsync_enabled) return; spin_unlock_irqrestore(&vctrl->spin_lock, flags); pr_debug("%s: SET_CLK_OFF\n", __func__); mdp_clk_ctrl(0); spin_lock_irqsave(&vctrl->spin_lock, flags); vsync_irq_disable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM); vctrl->clk_enabled = 0; vctrl->clk_control = 0; spin_unlock_irqrestore(&vctrl->spin_lock, flags); } mutex_unlock(&vctrl->update_lock); }
int mdp4_dsi_video_off(struct platform_device *pdev) { int ret = 0; int cndx = 0; struct msm_fb_data_type *mfd; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; struct vsync_update *vp; unsigned long flags; int undx, need_wait = 0; mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; mdp4_dsi_video_wait4vsync(cndx); if (pipe->ov_blt_addr) { spin_lock_irqsave(&vctrl->spin_lock, flags); if (vctrl->ov_koff != vctrl->ov_done) need_wait = 1; spin_unlock_irqrestore(&vctrl->spin_lock, flags); if (need_wait) mdp4_dsi_video_wait4ov(0); } mdp_histogram_ctrl_all(FALSE); dsi_video_enabled = 0; if (vctrl->vsync_irq_enabled) { vctrl->vsync_irq_enabled = 0; vsync_irq_disable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM); } undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; if (vp->update_cnt) { /* * pipe's iommu will be freed at next overlay play * and iommu_drop statistic will be increased by one */ vp->update_cnt = 0; /* empty queue */ } if (pipe) { /* sanity check, free pipes besides base layer */ mdp4_overlay_unset_mixer(pipe->mixer_num); if (mfd->ref_cnt == 0) { /* adb stop */ if (pipe->pipe_type == OVERLAY_TYPE_BF) mdp4_overlay_borderfill_stage_down(pipe); /* base pipe may change after borderfill_stage_down */ pipe = vctrl->base_pipe; mdp4_mixer_stage_down(pipe, 1); mdp4_overlay_pipe_free(pipe, 1); vctrl->base_pipe = NULL; } else { /* system suspending */ mdp4_mixer_stage_down(vctrl->base_pipe, 1); mdp4_overlay_iommu_pipe_free( vctrl->base_pipe->pipe_ndx, 1); } } mdp4_dsi_video_tg_off(vctrl); atomic_set(&vctrl->suspend, 1); if (vctrl->vsync_irq_enabled) { vctrl->vsync_irq_enabled = 0; vsync_irq_disable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM); } /* mdp clock off */ mdp_clk_ctrl(0); mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); return ret; }
int mdp4_dtv_off(struct platform_device *pdev) { struct msm_fb_data_type *mfd; int ret = 0; int cndx = 0; int undx; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; struct vsync_update *vp; mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); vctrl = &vsync_ctrl_db[cndx]; atomic_set(&vctrl->suspend, 1); atomic_set(&vctrl->vsync_resume, 0); if (vctrl->vsync_irq_enabled) { while (vctrl->wait_vsync_cnt) msleep(20); /* >= 17 ms */ } if (vctrl->wait_vsync_cnt) { complete_all(&vctrl->vsync_comp); vctrl->wait_vsync_cnt = 0; } pipe = vctrl->base_pipe; if (pipe != NULL) { mdp4_dtv_stop(mfd); /* sanity check, free pipes besides base layer */ mdp4_overlay_unset_mixer(pipe->mixer_num); if (hdmi_prim_display && mfd->ref_cnt == 0) { /* adb stop */ if (pipe->pipe_type == OVERLAY_TYPE_BF) mdp4_overlay_borderfill_stage_down(pipe); /* base pipe may change after borderfill_stage_down */ pipe = vctrl->base_pipe; mdp4_mixer_stage_down(pipe, 1); mdp4_overlay_pipe_free(pipe); /* pipe == rgb2 */ vctrl->base_pipe = NULL; } else { mdp4_mixer_stage_down(pipe, 1); mdp4_overlay_pipe_free(pipe); vctrl->base_pipe = NULL; } } mdp4_overlay_panel_mode_unset(MDP4_MIXER1, MDP4_PANEL_DTV); if (vctrl->vsync_irq_enabled) { vctrl->vsync_irq_enabled = 0; vsync_irq_disable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM); } undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; if (vp->update_cnt) { /* * pipe's iommu will be freed at next overlay play * and iommu_drop statistic will be increased by one */ vp->update_cnt = 0; /* empty queue */ } ret = panel_next_off(pdev); mdp_footswitch_ctrl(FALSE); /* Mdp clock disable */ mdp_clk_ctrl(0); pr_info("%s:\n", __func__); return ret; }
int mdp4_dsi_cmd_off(struct platform_device *pdev) { int ret = 0; int cndx = 0; struct msm_fb_data_type *mfd; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; struct vsync_update *vp; int undx; int need_wait, cnt; pr_debug("%s+: pid=%d\n", __func__, current->pid); mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (pipe == NULL) { pr_err("%s: NO base pipe\n", __func__); return ret; } need_wait = 0; mutex_lock(&vctrl->update_lock); atomic_set(&vctrl->suspend, 1); complete_all(&vctrl->vsync_comp); pr_debug("%s: clk=%d pan=%d\n", __func__, vctrl->clk_enabled, vctrl->pan_display); if (vctrl->clk_enabled) need_wait = 1; mutex_unlock(&vctrl->update_lock); cnt = 0; if (need_wait) { while (vctrl->clk_enabled) { msleep(20); cnt++; if (cnt > 10) break; } } /* message for system suspnded */ if (cnt > 10) pr_err("%s:Error, mdp clocks NOT off\n", __func__); else pr_debug("%s: mdp clocks off at cnt=%d\n", __func__, cnt); /* sanity check, free pipes besides base layer */ mdp4_overlay_unset_mixer(pipe->mixer_num); mdp4_mixer_stage_down(pipe, 1); if (mfd->ref_cnt == 0) { mdp4_overlay_pipe_free(pipe); vctrl->base_pipe = NULL; } if (vctrl->vsync_enabled) { vsync_irq_disable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM); vctrl->vsync_enabled = 0; } undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; if (vp->update_cnt) { /* * pipe's iommu will be freed at next overlay play * and iommu_drop statistic will be increased by one */ vp->update_cnt = 0; /* empty queue */ } pr_debug("%s-:\n", __func__); return ret; }
int mdp4_dtv_off(struct platform_device *pdev) { struct msm_fb_data_type *mfd; int ret = 0; int cndx = 0; int undx; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; struct vsync_update *vp; int mixer = 0; mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); vctrl = &vsync_ctrl_db[cndx]; /* wait for one vsycn time to make sure * previous stage_commit had been kicked in */ msleep(20); /* >= 17 ms */ mdp4_dtv_wait4vsync(cndx); mdp4_unmap_sec_resource(mfd); wake_up_interruptible_all(&vctrl->wait_queue); pipe = vctrl->base_pipe; if (pipe != NULL) { mixer = pipe->mixer_num; /* sanity check, free pipes besides base layer */ mdp4_overlay_unset_mixer(mixer); if (hdmi_prim_display && mfd->ref_cnt == 0) { /* adb stop */ if (pipe->pipe_type == OVERLAY_TYPE_BF) mdp4_overlay_borderfill_stage_down(pipe); /* pipe == rgb2 */ vctrl->base_pipe = NULL; } else { mdp4_mixer_stage_down(pipe, 1); mdp4_overlay_pipe_free(pipe, 1); vctrl->base_pipe = NULL; } } mdp4_dtv_tg_off(vctrl); atomic_set(&vctrl->suspend, 1); mdp4_overlay_panel_mode_unset(MDP4_MIXER1, MDP4_PANEL_DTV); if (vctrl->vsync_irq_enabled) { vctrl->vsync_irq_enabled = 0; vsync_irq_disable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM); } undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; if (vp->update_cnt) { /* * pipe's iommu will be freed at next overlay play * and iommu_drop statistic will be increased by one */ vp->update_cnt = 0; /* empty queue */ } ret = panel_next_off(pdev); mdp_footswitch_ctrl(FALSE); /* * clean up ion freelist * there need two stage to empty ion free list * therefore need call unmap freelist twice */ mdp4_overlay_iommu_unmap_freelist(mixer); mdp4_overlay_iommu_unmap_freelist(mixer); /* Mdp clock disable */ mdp_clk_ctrl(0); pr_info("%s:\n", __func__); return ret; }
int mdp4_dsi_cmd_off(struct platform_device *pdev) { int ret = 0; int cndx = 0; struct msm_fb_data_type *mfd; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; pr_debug("%s+:\n", __func__); while (atomic_read(&in_drawing)) { msleep(10); } mdp4_dsi_cmd_wait_dma_ov(); mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (pipe == NULL) { pr_err("%s: NO base pipe\n", __func__); return ret; } atomic_set(&vctrl->suspend, 1); atomic_set(&vctrl->vsync_resume, 0); complete_all(&vctrl->vsync_comp); /* sanity check, free pipes besides base layer */ mdp4_overlay_unset_mixer(pipe->mixer_num); mdp4_mixer_stage_down(pipe, 1); mdp4_overlay_pipe_free(pipe); vctrl->base_pipe = NULL; if (vctrl->clk_enabled) { /* * in case of suspend, vsycn_ctrl off is not * received from frame work which left clock on * then, clock need to be turned off here */ mdp_clk_ctrl(0); } vctrl->clk_enabled = 0; vctrl->vsync_enabled = 0; vctrl->clk_control = 0; vctrl->expire_tick = 0; vsync_irq_disable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM); pr_debug("%s-:\n", __func__); /* * footswitch off * this will casue all mdp register * to be reset to default * after footswitch on later */ return ret; }
int mdp4_dsi_video_off(struct platform_device *pdev) { int ret = 0; int cndx = 0; struct msm_fb_data_type *mfd; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; struct vsync_update *vp; unsigned long flags; int undx, need_wait = 0; #if defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WXGA_PT) \ || defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_FHD_INVERSE_PT) \ || defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_FHD_INVERSE_PT_PANEL) int retry_cnt = 0; #endif printk(KERN_INFO "[LCD][DEBUG] %s is started.. \n", __func__); #if defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WXGA_PT) \ || defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_FHD_INVERSE_PT) \ || defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_FHD_INVERSE_PT_PANEL) do { ret = mipi_lgit_lcd_off(pdev); if (ret < 0) { panel_next_off(pdev); msleep(2); panel_next_on(pdev); msleep(5); retry_cnt++; } else { // if upper routine is successed, need to initialize ret variable. ret = 0; break; } } while(retry_cnt < 10); printk(KERN_INFO "[LCD][DEBUG] %s : mipi_lgit_lcd_off retry_cnt = %d\n", __func__, retry_cnt); #endif mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; atomic_set(&vctrl->suspend, 1); atomic_set(&vctrl->vsync_resume, 0); msleep(20); /* >= 17 ms */ if (vctrl->wait_vsync_cnt) { complete_all(&vctrl->vsync_comp); vctrl->wait_vsync_cnt = 0; } if (pipe->ov_blt_addr) { spin_lock_irqsave(&vctrl->spin_lock, flags); if (vctrl->ov_koff != vctrl->ov_done) need_wait = 1; spin_unlock_irqrestore(&vctrl->spin_lock, flags); if (need_wait) mdp4_dsi_video_wait4ov(0); } mdp_histogram_ctrl_all(FALSE); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 0); dsi_video_enabled = 0; if (vctrl->vsync_irq_enabled) { vctrl->vsync_irq_enabled = 0; vsync_irq_disable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM); } undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; if (vp->update_cnt) { /* * pipe's iommu will be freed at next overlay play * and iommu_drop statistic will be increased by one */ vp->update_cnt = 0; /* empty queue */ } mutex_lock(&mfd->dma->ov_mutex); if (pipe) { /* sanity check, free pipes besides base layer */ mdp4_overlay_unset_mixer(pipe->mixer_num); if (mfd->ref_cnt == 0) { /* adb stop */ if (pipe->pipe_type == OVERLAY_TYPE_BF) mdp4_overlay_borderfill_stage_down(pipe); /* base pipe may change after borderfill_stage_down */ pipe = vctrl->base_pipe; mdp4_mixer_stage_down(pipe, 1); mdp4_overlay_pipe_free(pipe, 1); vctrl->base_pipe = NULL; } else { /* system suspending */ mdp4_mixer_stage_down(vctrl->base_pipe, 1); mdp4_overlay_iommu_pipe_free( vctrl->base_pipe->pipe_ndx, 1); } } /* mdp clock off */ mdp_clk_ctrl(0); mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); mutex_unlock(&mfd->dma->ov_mutex); printk(KERN_INFO "[LCD][DEBUG] %s is ended.. \n", __func__); return ret; }
int mdp4_dtv_off(struct platform_device *pdev) { struct msm_fb_data_type *mfd; int ret = 0; int cndx = 0; int undx; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; struct vsync_update *vp; mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); vctrl = &vsync_ctrl_db[cndx]; atomic_set(&vctrl->suspend, 1); if (vctrl->vsync_irq_enabled) { while (vctrl->wait_vsync_cnt) msleep(20); } complete_all(&vctrl->vsync_comp); vctrl->wait_vsync_cnt = 0; pipe = vctrl->base_pipe; if (pipe != NULL) { mdp4_dtv_stop(mfd); mdp4_overlay_unset_mixer(pipe->mixer_num); if (hdmi_prim_display && mfd->ref_cnt == 0) { if (pipe->pipe_type == OVERLAY_TYPE_BF) mdp4_overlay_borderfill_stage_down(pipe); vctrl->base_pipe = NULL; } else { mdp4_mixer_stage_down(pipe, 1); mdp4_overlay_pipe_free(pipe); vctrl->base_pipe = NULL; msleep(20); } } mdp4_overlay_panel_mode_unset(MDP4_MIXER1, MDP4_PANEL_DTV); if (vctrl->vsync_irq_enabled) { vctrl->vsync_irq_enabled = 0; vsync_irq_disable(INTR_EXTERNAL_VSYNC, MDP_EXTER_VSYNC_TERM); } undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; if (vp->update_cnt) { vp->update_cnt = 0; } ret = panel_next_off(pdev); #if 0 mdp_footswitch_ctrl(FALSE); #endif mdp_clk_ctrl(0); pr_info("%s:\n", __func__); return ret; }
int mdp4_dsi_cmd_off(struct platform_device *pdev) { int ret = 0; int cndx = 0; struct msm_fb_data_type *mfd; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; struct vsync_update *vp; int undx; int need_wait, cnt; unsigned long flags; pr_debug("%s+: pid=%d\n", __func__, current->pid); mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (pipe == NULL) { pr_err("%s: NO base pipe\n", __func__); return ret; } need_wait = 0; mutex_lock(&vctrl->update_lock); atomic_set(&vctrl->suspend, 1); complete_all(&vctrl->vsync_comp); pr_debug("%s: clk=%d pan=%d\n", __func__, vctrl->clk_enabled, vctrl->pan_display); if (vctrl->clk_enabled) need_wait = 1; mutex_unlock(&vctrl->update_lock); cnt = 0; if (need_wait) { while (vctrl->clk_enabled) { msleep(20); cnt++; if (cnt > 10) break; } } if (cnt > 10) { spin_lock_irqsave(&vctrl->spin_lock, flags); vctrl->clk_control = 0; vctrl->clk_enabled = 0; vctrl->expire_tick = 0; spin_unlock_irqrestore(&vctrl->spin_lock, flags); mipi_dsi_clk_cfg(0); mdp_clk_ctrl(0); pr_err("%s: Error, SET_CLK_OFF by force\n", __func__); } /* sanity check, free pipes besides base layer */ mdp4_overlay_unset_mixer(pipe->mixer_num); mdp4_mixer_stage_down(pipe, 1); mdp4_overlay_pipe_free(pipe); vctrl->base_pipe = NULL; if (vctrl->vsync_enabled) { vsync_irq_disable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM); vctrl->vsync_enabled = 0; } undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; if (vp->update_cnt) { /* * pipe's iommu will be freed at next overlay play * and iommu_drop statistic will be increased by one */ vp->update_cnt = 0; /* empty queue */ } pr_debug("%s-:\n", __func__); return ret; }
int mdp4_dsi_cmd_off(struct platform_device *pdev) { int ret = 0; int cndx = 0; struct msm_fb_data_type *mfd; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; struct vsync_update *vp; int undx; int need_wait, cnt; unsigned long flags; int mixer = 0; pr_debug("%s+: pid=%d\n", __func__, current->pid); mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); mutex_lock(&mfd->dma->ov_mutex); vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (pipe == NULL) { pr_err("%s: NO base pipe\n", __func__); mutex_unlock(&mfd->dma->ov_mutex); return ret; } need_wait = 0; mutex_lock(&vctrl->update_lock); wake_up_interruptible_all(&vctrl->wait_queue); pr_debug("%s: clk=%d pan=%d\n", __func__, vctrl->clk_enabled, vctrl->pan_display); if (vctrl->clk_enabled) need_wait = 1; mutex_unlock(&vctrl->update_lock); cnt = 0; if (need_wait) { while (vctrl->clk_enabled) { msleep(20); cnt++; if (cnt > 10) break; } } if (cnt > 10) { spin_lock_irqsave(&vctrl->spin_lock, flags); vctrl->clk_control = 0; vctrl->clk_enabled = 0; vctrl->expire_tick = 0; spin_unlock_irqrestore(&vctrl->spin_lock, flags); mipi_dsi_clk_cfg(0); mdp_clk_ctrl(0); pr_err("%s: Error, SET_CLK_OFF by force\n", __func__); } if (vctrl->vsync_enabled) { vsync_irq_disable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM); vctrl->vsync_enabled = 0; } undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; if (vp->update_cnt) { /* * pipe's iommu will be freed at next overlay play * and iommu_drop statistic will be increased by one */ pr_warn("%s: update_cnt=%d\n", __func__, vp->update_cnt); mdp4_dsi_cmd_pipe_clean(vp); } if (pipe) { /* sanity check, free pipes besides base layer */ mixer = pipe->mixer_num; mdp4_overlay_unset_mixer(mixer); if (mfd->ref_cnt == 0) { /* adb stop */ if (pipe->pipe_type == OVERLAY_TYPE_BF) mdp4_overlay_borderfill_stage_down(pipe); /* base pipe may change after borderfill_stage_down */ pipe = vctrl->base_pipe; mdp4_mixer_stage_down(pipe, 1); mdp4_overlay_pipe_free(pipe); vctrl->base_pipe = NULL; } else { /* system suspending */ mdp4_mixer_stage_down(pipe, 1); mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 1); } } atomic_set(&vctrl->suspend, 1); /* * clean up ion freelist * there need two stage to empty ion free list * therefore need call unmap freelist twice */ mdp4_overlay_iommu_unmap_freelist(mixer); mdp4_overlay_iommu_unmap_freelist(mixer); mutex_unlock(&mfd->dma->ov_mutex); pr_debug("%s-:\n", __func__); return ret; }
int mdp4_dsi_cmd_off(struct platform_device *pdev) { int ret = 0; int cndx = 0; struct msm_fb_data_type *mfd; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; struct vsync_update *vp; int undx; pr_debug("%s+:\n", __func__); mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (pipe == NULL) { pr_err("%s: NO base pipe\n", __func__); return ret; } atomic_set(&vctrl->suspend, 1); atomic_set(&vctrl->vsync_resume, 0); complete_all(&vctrl->vsync_comp); /* sanity check, free pipes besides base layer */ mdp4_overlay_unset_mixer(pipe->mixer_num); mdp4_mixer_stage_down(pipe, 1); mdp4_overlay_pipe_free(pipe); vctrl->base_pipe = NULL; if (vctrl->clk_enabled) { /* * in case of suspend, vsycn_ctrl off is not * received from frame work which left clock on * then, clock need to be turned off here */ mdp_clk_ctrl(0); } undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; if (vp->update_cnt) { /* * pipe's iommu will be freed at next overlay play * and iommu_drop statistic will be increased by one */ vp->update_cnt = 0; /* empty queue */ } vctrl->clk_enabled = 0; vctrl->vsync_enabled = 0; vctrl->clk_control = 0; vctrl->expire_tick = 0; vsync_irq_disable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM); pr_debug("%s-:\n", __func__); /* * footswitch off * this will casue all mdp register * to be reset to default * after footswitch on later */ return ret; }