void mdp4_dsi_cmd_overlay(struct msm_fb_data_type *mfd) { int cndx = 0; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; mutex_lock(&mfd->dma->ov_mutex); vctrl = &vsync_ctrl_db[cndx]; if (!mfd->panel_power_on) { mutex_unlock(&mfd->dma->ov_mutex); return; } pipe = vctrl->base_pipe; if (pipe == NULL) { pr_err("%s: NO base pipe\n", __func__); mutex_unlock(&mfd->dma->ov_mutex); return; } if (pipe->mixer_stage == MDP4_MIXER_STAGE_BASE) { mdp4_mipi_vsync_enable(mfd, pipe, 0); mdp4_overlay_setup_pipe_addr(mfd, pipe); mdp4_dsi_cmd_pipe_queue(0, pipe); } mdp4_overlay_mdp_perf_upd(mfd, 1); mdp4_dsi_cmd_pipe_commit(cndx, 1); mdp4_overlay_mdp_perf_upd(mfd, 0); mutex_unlock(&mfd->dma->ov_mutex); }
void mdp4_dsi_cmd_overlay(struct msm_fb_data_type *mfd) { int cndx = 0; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; long long xx; vctrl = &vsync_ctrl_db[cndx]; if (!mfd->panel_power_on) return; vctrl->clk_control = 0; pipe = vctrl->base_pipe; if (pipe == NULL) { pr_err("%s: NO base pipe\n", __func__); return; } if (pipe->mixer_stage == MDP4_MIXER_STAGE_BASE) { mdp4_mipi_vsync_enable(mfd, pipe, 0); mdp4_overlay_setup_pipe_addr(mfd, pipe); mdp4_dsi_cmd_pipe_queue(0, pipe); } mdp4_overlay_mdp_perf_upd(mfd, 1); mdp4_dsi_cmd_pipe_commit(); mdp4_dsi_cmd_wait4vsync(0, &xx); vctrl->expire_tick = VSYNC_EXPIRE_TICK; vctrl->clk_control = 1; mdp4_overlay_mdp_perf_upd(mfd, 0); }
static void mdp4_dsi_cmd_overlay_nolock( struct msm_fb_data_type *mfd ) { int cndx = 0; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (pipe == NULL) { pr_err("%s: NO base pipe\n", __func__); return; } mdp_clk_ctrl(1); if (pipe->mixer_stage == MDP4_MIXER_STAGE_BASE) { mdp4_mipi_vsync_enable(mfd, pipe, 0); mdp4_overlay_setup_pipe_addr(mfd, pipe); mdp4_dsi_cmd_pipe_queue(0, pipe); } mdp4_overlay_mdp_perf_upd(mfd, 1); mdp4_dsi_cmd_pipe_commit(cndx, 0); mdp4_dsi_cmd_busy(); mdp4_overlay_mdp_perf_upd(mfd, 0); mdp_clk_ctrl(0); }
void mdp4_mddi_overlay(struct msm_fb_data_type *mfd) { mutex_lock(&mfd->dma->ov_mutex); if (mfd && mfd->panel_power_on) { mdp4_mddi_dma_busy_wait(mfd); if (mddi_pipe && mddi_pipe->ov_blt_addr) mdp4_mddi_blt_dmap_busy_wait(mfd); mdp4_overlay_mdp_perf_upd(mfd, 0); mdp4_overlay_update_lcd(mfd); mdp4_overlay_mdp_perf_upd(mfd, 1); if (mdp_hw_revision < MDP4_REVISION_V2_1) { /* dmas dmap switch */ if (mdp4_overlay_mixer_play(mddi_pipe->mixer_num) == 0) { mdp4_dma_s_update_lcd(mfd, mddi_pipe); mdp4_mddi_dma_s_kickoff(mfd, mddi_pipe); } else mdp4_mddi_kickoff_ui(mfd, mddi_pipe); } else /* no dams dmap switch */ mdp4_mddi_kickoff_ui(mfd, mddi_pipe); /* signal if pan function is waiting for the update completion */ if (mfd->pan_waiting) { mfd->pan_waiting = FALSE; complete(&mfd->pan_comp); } } mutex_unlock(&mfd->dma->ov_mutex); }
void mdp4_dsi_cmd_overlay(struct msm_fb_data_type *mfd) { int cndx = 0; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; unsigned long flags; int clk_set_on = 0; vctrl = &vsync_ctrl_db[cndx]; if (!mfd->panel_power_on) return; pipe = vctrl->base_pipe; if (pipe == NULL) { pr_err("%s: NO base pipe\n", __func__); return; } mutex_lock(&vctrl->update_lock); if (atomic_read(&vctrl->suspend)) { mutex_unlock(&vctrl->update_lock); pr_err("%s: suspended, no more pan display\n", __func__); return; } spin_lock_irqsave(&vctrl->spin_lock, flags); vctrl->clk_control = 0; vctrl->pan_display++; if (!vctrl->clk_enabled) { clk_set_on = 1; vctrl->clk_enabled = 1; vctrl->expire_tick = VSYNC_EXPIRE_TICK; } spin_unlock_irqrestore(&vctrl->spin_lock, flags); if (clk_set_on) { pr_debug("%s: SET_CLK_ON\n", __func__); mipi_dsi_clk_cfg(1); mdp_clk_ctrl(1); vsync_irq_enable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM); } mutex_unlock(&vctrl->update_lock); if (pipe->mixer_stage == MDP4_MIXER_STAGE_BASE) { mdp4_mipi_vsync_enable(mfd, pipe, 0); mdp4_overlay_setup_pipe_addr(mfd, pipe); mdp4_dsi_cmd_pipe_queue(0, pipe); } mutex_lock(&mfd->dma->ov_mutex); mdp4_overlay_mdp_perf_upd(mfd, 1); mdp4_dsi_cmd_pipe_commit(cndx, 0); mdp4_overlay_mdp_perf_upd(mfd, 0); mutex_unlock(&mfd->dma->ov_mutex); }
void mdp4_mddi_overlay(struct msm_fb_data_type *mfd) { int cndx = 0; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; unsigned long flags; mutex_lock(&mfd->dma->ov_mutex); vctrl = &vsync_ctrl_db[cndx]; if (!mfd->panel_power_on) { mutex_unlock(&mfd->dma->ov_mutex); return; } pipe = vctrl->base_pipe; if (pipe == NULL) { pr_err("%s: NO base pipe\n", __func__); mutex_unlock(&mfd->dma->ov_mutex); return; } mutex_lock(&vctrl->update_lock); if (!vctrl->clk_enabled) { pr_err("%s: mdp clocks disabled\n", __func__); mutex_unlock(&vctrl->update_lock); mutex_unlock(&mfd->dma->ov_mutex); return; } mutex_unlock(&vctrl->update_lock); spin_lock_irqsave(&vctrl->spin_lock, flags); /* * in the middle of shutting clocks down * delay to allow pan display to go through */ vctrl->expire_tick = VSYNC_EXPIRE_TICK; spin_unlock_irqrestore(&vctrl->spin_lock, flags); if (pipe->mixer_stage == MDP4_MIXER_STAGE_BASE) { mdp4_mddi_vsync_enable(mfd, pipe, 0); mdp4_overlay_setup_pipe_addr(mfd, pipe); mdp4_mddi_pipe_queue(0, pipe); } mdp4_overlay_mdp_pipe_req(pipe, mfd); mdp4_overlay_mdp_perf_req(mfd, pipe); mdp4_overlay_mdp_perf_upd(mfd, 1); mdp4_mddi_pipe_commit(); mdp4_overlay_mdp_perf_upd(mfd, 0); mutex_unlock(&mfd->dma->ov_mutex); }
void mdp4_dsi_video_overlay(struct msm_fb_data_type *mfd) { struct fb_info *fbi = mfd->fbi; uint8 *buf; unsigned int buf_offset; int bpp; int cndx = 0; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; #ifdef CONFIG_PANTECH_LCD_SKYDISP_SUPPORT_OVERLAY_COMMIT int cnt; #endif vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (!pipe || !mfd->panel_power_on) return; pr_debug("%s: cpu=%d pid=%d\n", __func__, smp_processor_id(), current->pid); if (pipe->pipe_type == OVERLAY_TYPE_RGB) { bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; buf_offset = calc_fb_offset(mfd, fbi, bpp); if (mfd->display_iova) pipe->srcp0_addr = mfd->display_iova + buf_offset; else pipe->srcp0_addr = (uint32)(buf + buf_offset); mdp4_dsi_video_pipe_queue(0, pipe); } mutex_lock(&mfd->dma->ov_mutex); mdp4_overlay_mdp_perf_upd(mfd, 1); #ifdef CONFIG_PANTECH_LCD_SKYDISP_SUPPORT_OVERLAY_COMMIT cnt = 0; cnt = mdp4_dsi_video_pipe_commit(cndx, 0); if (cnt) { if (pipe->ov_blt_addr) mdp4_dsi_video_wait4ov(cndx); else mdp4_dsi_video_wait4dmap(cndx); } #else mdp4_dsi_video_pipe_commit(); #endif mutex_unlock(&mfd->dma->ov_mutex); if (pipe->ov_blt_addr) mdp4_dsi_video_wait4ov(0); else mdp4_dsi_video_wait4dmap(0); mdp4_overlay_mdp_perf_upd(mfd, 0); }
void mdp4_dsi_video_overlay(struct msm_fb_data_type *mfd) { struct fb_info *fbi = mfd->fbi; uint8 *buf; unsigned int buf_offset; int bpp; int cnt, cndx = 0; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; mutex_lock(&mfd->dma->ov_mutex); vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (!pipe || !mfd->panel_power_on) { mutex_unlock(&mfd->dma->ov_mutex); return; } pr_debug("%s: cpu=%d pid=%d\n", __func__, smp_processor_id(), current->pid); if (pipe->pipe_type == OVERLAY_TYPE_RGB) { bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; buf_offset = calc_fb_offset(mfd, fbi, bpp); if (mfd->display_iova) pipe->srcp0_addr = mfd->display_iova + buf_offset; else pipe->srcp0_addr = (uint32)(buf + buf_offset); mdp4_dsi_video_pipe_queue(0, pipe); } mdp4_overlay_mdp_perf_upd(mfd, 1); cnt = mdp4_dsi_video_pipe_commit(cndx, 0); if (cnt >= 0) { if (pipe->ov_blt_addr) mdp4_dsi_video_wait4ov(cndx); else mdp4_dsi_video_wait4dmap(cndx); } mdp4_overlay_mdp_perf_upd(mfd, 0); mutex_unlock(&mfd->dma->ov_mutex); #if defined (LCD_DEVICE_S6E8AA0X01) if ( (msm_fb_oled_force_off == FALSE) && (msm_fb_oled_esd_command_locked != TRUE) ) mipi_dsi_cmds_tx_lp_mode(mfd); #endif }
void mdp4_atv_overlay(struct msm_fb_data_type *mfd) { struct fb_info *fbi = mfd->fbi; uint8 *buf; unsigned int buf_offset; int bpp; unsigned long flag; struct mdp4_overlay_pipe *pipe; if (!mfd->panel_power_on) return; /* */ bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; buf_offset = calc_fb_offset(mfd, fbi, bpp); mutex_lock(&mfd->dma->ov_mutex); pipe = atv_pipe; if (mfd->map_buffer) { pipe->srcp0_addr = (unsigned int)mfd->map_buffer->iova[0] + \ buf_offset; pr_debug("start 0x%lx srcp0_addr 0x%x\n", mfd-> map_buffer->iova[0], pipe->srcp0_addr); } else { pipe->srcp0_addr = (uint32)(buf + buf_offset); } mdp_update_pm(mfd, vsync_ctrl_db[0].vsync_time); mdp4_overlay_mdp_perf_req(pipe, mfd); mdp4_overlay_mdp_perf_upd(mfd, 1); mdp4_overlay_rgb_setup(pipe); mdp4_overlay_reg_flush(pipe, 0); mdp4_mixer_stage_up(pipe, 0); mdp4_mixer_stage_commit(pipe->mixer_num); printk(KERN_INFO "mdp4_atv_overlay: pipe=%x ndx=%d\n", (int)pipe, pipe->pipe_ndx); /* */ spin_lock_irqsave(&mdp_spin_lock, flag); mdp_enable_irq(MDP_OVERLAY1_TERM); INIT_COMPLETION(atv_pipe->comp); mfd->dma->waiting = TRUE; outp32(MDP_INTR_CLEAR, INTR_OVERLAY1_DONE); mdp_intr_mask |= INTR_OVERLAY1_DONE; outp32(MDP_INTR_ENABLE, mdp_intr_mask); spin_unlock_irqrestore(&mdp_spin_lock, flag); wait_for_completion_killable(&atv_pipe->comp); mdp_disable_irq(MDP_OVERLAY1_TERM); mdp4_overlay_mdp_perf_upd(mfd, 0); mdp4_stat.kickoff_atv++; mutex_unlock(&mfd->dma->ov_mutex); }
void mdp4_dsi_video_overlay(struct msm_fb_data_type *mfd) { struct fb_info *fbi = mfd->fbi; uint8 *buf; unsigned int buf_offset; int bpp; int cnt, cndx = 0; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; mutex_lock(&mfd->dma->ov_mutex); vctrl = &vsync_ctrl_db[cndx]; if (!vctrl->inited) { mutex_unlock(&mfd->dma->ov_mutex); return; } pipe = vctrl->base_pipe; if (!pipe || !mfd->panel_power_on) { mutex_unlock(&mfd->dma->ov_mutex); return; } pr_debug("%s: cpu=%d pid=%d\n", __func__, smp_processor_id(), current->pid); if (pipe->pipe_type == OVERLAY_TYPE_RGB) { bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; buf_offset = calc_fb_offset(mfd, fbi, bpp); if (mfd->display_iova) pipe->srcp0_addr = mfd->display_iova + buf_offset; else pipe->srcp0_addr = (uint32)(buf + buf_offset); mdp4_dsi_video_pipe_queue(0, pipe); } mdp4_overlay_mdp_perf_upd(mfd, 1); cnt = mdp4_dsi_video_pipe_commit(cndx, 1, NULL); if (cnt >= 0) { if (pipe->ov_blt_addr) mdp4_dsi_video_wait4ov(cndx); else mdp4_dsi_video_wait4vsync(cndx); } mdp4_overlay_mdp_perf_upd(mfd, 0); mutex_unlock(&mfd->dma->ov_mutex); }
void mdp4_lcdc_overlay(struct msm_fb_data_type *mfd) { struct fb_info *fbi = mfd->fbi; uint8 *buf; unsigned int buf_offset; int bpp; int cnt, cndx = 0; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (!pipe || !mfd->panel_power_on) return; pr_debug("%s: cpu=%d pid=%d\n", __func__, smp_processor_id(), current->pid); if (pipe->pipe_type == OVERLAY_TYPE_RGB) { bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; buf_offset = calc_fb_offset(mfd, fbi, bpp); if (mfd->map_buffer->iova[0]) { pipe->srcp0_addr = mfd->map_buffer->iova[0] + buf_offset; } else pipe->srcp0_addr = (uint32)(buf + buf_offset); mdp4_lcdc_pipe_queue(0, pipe); } mdp4_overlay_mdp_perf_upd(mfd, 1); mutex_lock(&mfd->dma->ov_mutex); cnt = mdp4_lcdc_pipe_commit(cndx, 0); mutex_unlock(&mfd->dma->ov_mutex); #if defined (CONFIG_EUR_MODEL_GT_I9210) if (cnt >= 0) #else if (cnt) #endif { if (pipe->ov_blt_addr) mdp4_lcdc_wait4ov(cndx); else mdp4_lcdc_wait4dmap(cndx); } mdp4_mixer_late_commit(); mdp4_overlay_mdp_perf_upd(mfd, 0); }
void mdp4_lcdc_overlay(struct msm_fb_data_type *mfd) { struct fb_info *fbi = mfd->fbi; uint8 *buf; unsigned int buf_offset; int bpp; int cnt, cndx = 0; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; unsigned long flags; vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (!pipe || !mfd->panel_power_on) return; pr_debug("%s: cpu=%d pid=%d\n", __func__, smp_processor_id(), current->pid); if (pipe->pipe_type == OVERLAY_TYPE_RGB) { bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; buf_offset = calc_fb_offset(mfd, fbi, bpp); if (mfd->display_iova) pipe->srcp0_addr = mfd->display_iova + buf_offset; else pipe->srcp0_addr = (uint32)(buf + buf_offset); mdp4_lcdc_pipe_queue(0, pipe); } spin_lock_irqsave(&vctrl->spin_lock, flags); vctrl->expire_tick = VSYNC_EXPIRE_TICK; spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_overlay_mdp_perf_upd(mfd, 1); cnt = 0; mutex_lock(&mfd->dma->ov_mutex); cnt = mdp4_lcdc_pipe_commit(); mutex_unlock(&mfd->dma->ov_mutex); if (cnt) { if (pipe->ov_blt_addr) mdp4_lcdc_wait4ov(0); else mdp4_lcdc_wait4dmap(0); } mdp4_overlay_mdp_perf_upd(mfd, 0); }
void mdp4_dsi_video_overlay(struct msm_fb_data_type *mfd) { struct fb_info *fbi = mfd->fbi; uint8 *buf; unsigned int buf_offset; int bpp; int cnt, cndx = 0; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (!pipe || !mfd->panel_power_on) return; if (pipe->pipe_type == OVERLAY_TYPE_RGB) { bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; buf_offset = calc_fb_offset(mfd, fbi, bpp); if (mfd->map_buffer->iova[0]) { pipe->srcp0_addr = mfd->map_buffer->iova[0] + buf_offset; } else pipe->srcp0_addr = (uint32)(buf + buf_offset); mdp4_dsi_video_pipe_queue(0, pipe); } mutex_lock(&mfd->dma->ov_mutex); mdp4_overlay_mdp_perf_upd(mfd, 1); cnt = mdp4_dsi_video_pipe_commit(cndx, 0); mutex_unlock(&mfd->dma->ov_mutex); if (cnt) { if (pipe->ov_blt_addr) mdp4_dsi_video_wait4ov(cndx); else mdp4_dsi_video_wait4dmap(cndx); } mdp4_overlay_mdp_perf_upd(mfd, 0); }
void mdp4_dsi_refresh_screen_at_once( struct msm_fb_data_type *mfd ) { int cndx = 0; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; DISP_LOCAL_LOG_EMERG("DISP %s S\n",__func__); if( mfd == NULL ){ return; } vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (pipe == NULL) { pr_err("%s: NO base pipe\n", __func__); return; } mdp_clk_ctrl(1); if (pipe->mixer_stage == MDP4_MIXER_STAGE_BASE) { mdp4_mipi_vsync_enable(mfd, pipe, 0); mdp4_overlay_setup_pipe_addr(mfd, pipe); mdp4_dsi_cmd_pipe_queue(0, pipe); } mdp4_overlay_mdp_perf_upd(mfd, 1); mdp4_dsi_screen_at_once_flag = 1; mdp4_dsi_cmd_pipe_commit(cndx, 0); mdp4_dsi_screen_at_once_flag = 0; mdp4_dsi_cmd_busy(); mdp4_overlay_mdp_perf_upd(mfd, 0); mdp_clk_ctrl(0); DISP_LOCAL_LOG_EMERG("DISP %s E\n",__func__); return; }
void mdp4_dtv_overlay(struct msm_fb_data_type *mfd) { int cndx = 0; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; mutex_lock(&mfd->dma->ov_mutex); if (mdp_fb_is_power_off(mfd)) { mutex_unlock(&mfd->dma->ov_mutex); return; } vctrl = &vsync_ctrl_db[cndx]; if (vctrl->base_pipe == NULL) mdp4_overlay_dtv_set(mfd, NULL); pipe = vctrl->base_pipe; if (pipe == NULL) { pr_warn("%s: dtv_pipe == NULL\n", __func__); mutex_unlock(&mfd->dma->ov_mutex); return; } if (hdmi_prim_display && (pipe->pipe_used == 0 || pipe->mixer_stage != MDP4_MIXER_STAGE_BASE)) { pr_err("%s: NOT baselayer\n", __func__); mutex_unlock(&mfd->dma->ov_mutex); return; } if (pipe->pipe_type == OVERLAY_TYPE_RGB) { pipe->srcp0_addr = (uint32)mfd->ibuf.buf; mdp4_dtv_pipe_queue(0, pipe); } mdp4_overlay_mdp_perf_upd(mfd, 1); mdp4_dtv_pipe_commit(0, 1); mdp4_overlay_mdp_perf_upd(mfd, 0); mutex_unlock(&mfd->dma->ov_mutex); }
void mdp4_dsi_cmd_overlay(struct msm_fb_data_type *mfd) { int cndx = 0; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; mutex_lock(&mfd->dma->ov_mutex); vctrl = &vsync_ctrl_db[cndx]; if (!mfd->panel_power_on) { mutex_unlock(&mfd->dma->ov_mutex); return; } pipe = vctrl->base_pipe; if (pipe == NULL) { pr_err("%s: NO base pipe\n", __func__); mutex_unlock(&mfd->dma->ov_mutex); return; } if (pipe->mixer_stage == MDP4_MIXER_STAGE_BASE) { mdp4_mipi_vsync_enable(mfd, pipe, 0); mdp4_overlay_setup_pipe_addr(mfd, pipe); mdp4_dsi_cmd_pipe_queue(0, pipe); } mdp4_overlay_mdp_perf_upd(mfd, 1); /* OPPO 2013-10-19 gousj Modify begin for WFD crash */ #ifndef CONFIG_VENDOR_EDIT mdp4_dsi_cmd_pipe_commit(cndx, 1); #else mdp4_dsi_cmd_pipe_commit(cndx, 0); #endif /* OPPO 2013-10-19 gousj Modify end */ mdp4_overlay_mdp_perf_upd(mfd, 0); mutex_unlock(&mfd->dma->ov_mutex); }
int mdp4_dsi_video_pipe_commit(int cndx, int wait) { int i, undx; int mixer = 0; struct vsycn_ctrl *vctrl; struct vsync_update *vp; struct mdp4_overlay_pipe *pipe; struct mdp4_overlay_pipe *real_pipe; unsigned long flags; int cnt = 0; vctrl = &vsync_ctrl_db[cndx]; mutex_lock(&vctrl->update_lock); undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; pipe = vctrl->base_pipe; if (pipe == NULL) { pr_err("%s: NO base pipe\n", __func__); mutex_unlock(&vctrl->update_lock); return 0; } mixer = pipe->mixer_num; mdp_update_pm(vctrl->mfd, vctrl->vsync_time); /* * allow stage_commit without pipes queued * (vp->update_cnt == 0) to unstage pipes after * overlay_unset */ vctrl->update_ndx++; vctrl->update_ndx &= 0x01; vp->update_cnt = 0; /* reset */ if (vctrl->blt_free) { vctrl->blt_free--; if (vctrl->blt_free == 0) mdp4_free_writeback_buf(vctrl->mfd, mixer); } mutex_unlock(&vctrl->update_lock); spin_lock_irqsave(&vctrl->spin_lock, flags); if (vctrl->ov_koff != vctrl->ov_done) { spin_unlock_irqrestore(&vctrl->spin_lock, flags); pr_err("%s: Error, frame dropped %d %d\n", __func__, vctrl->ov_koff, vctrl->ov_done); return 0; } spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_overlay_mdp_perf_upd(vctrl->mfd, 1); pipe = vp->plist; for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) { if (pipe->pipe_used) { cnt++; real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx); if (real_pipe && real_pipe->pipe_used) { /* * commit pipes which are in pending queue * and not be unset yet */ mdp4_overlay_vsync_commit(pipe); if (pipe->frame_format != MDP4_FRAME_FORMAT_LINEAR) { spin_lock_irqsave(&vctrl->spin_lock, flags); INIT_COMPLETION(vctrl->dmap_comp); vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM); spin_unlock_irqrestore(&vctrl->spin_lock, flags); } } } } mdp4_mixer_stage_commit(mixer); /* start timing generator & mmu if they are not started yet */ mdp4_overlay_dsi_video_start(); /* * there has possibility that pipe commit come very close to next vsync * this may cause two consecutive pie_commits happen within same vsync * period which casue iommu page fault when previous iommu buffer * freed. Set ION_IOMMU_UNMAP_DELAYED flag at ion_map_iommu() to * add delay unmap iommu buffer to fix this problem. * Also ion_unmap_iommu() may take as long as 9 ms to free an ion buffer. * therefore mdp4_overlay_iommu_unmap_freelist(mixer) should be called * ater stage_commit() to ensure pipe_commit (up to stage_commit) * is completed within vsync period. */ /* free previous committed iommu back to pool */ mdp4_overlay_iommu_unmap_freelist(mixer); pipe = vp->plist; for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) { if (pipe->pipe_used) { /* free previous iommu to freelist * which will be freed at next * pipe_commit */ mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0); pipe->pipe_used = 0; /* clear */ } } pipe = vctrl->base_pipe; spin_lock_irqsave(&vctrl->spin_lock, flags); if (pipe->ov_blt_addr) { mdp4_dsi_video_blt_ov_update(pipe); pipe->ov_cnt++; INIT_COMPLETION(vctrl->ov_comp); vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM); mb(); vctrl->ov_koff++; /* kickoff overlay engine */ mdp4_stat.kickoff_ov0++; outpdw(MDP_BASE + 0x0004, 0); } spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_stat.overlay_commit[pipe->mixer_num]++; if (wait) { if (pipe->ov_blt_addr) mdp4_dsi_video_wait4ov(0); else mdp4_dsi_video_wait4vsync(0); } return cnt; }
static void mdp4_overlay_dtv_alloc_pipe(struct msm_fb_data_type *mfd, int32 ptype, struct vsycn_ctrl *vctrl) { int ret = 0; struct fb_info *fbi = mfd->fbi; struct mdp4_overlay_pipe *pipe; if (vctrl->base_pipe != NULL) return; pipe = mdp4_overlay_pipe_alloc(ptype, MDP4_MIXER1); if (pipe == NULL) { pr_err("%s: pipe_alloc failed\n", __func__); return; } pipe->pipe_used++; pipe->mixer_stage = MDP4_MIXER_STAGE_BASE; pipe->mixer_num = MDP4_MIXER1; if (ptype == OVERLAY_TYPE_BF) { /* LSP_BORDER_COLOR */ MDP_OUTP(MDP_BASE + MDP4_OVERLAYPROC1_BASE + 0x5004, ((0x0 & 0xFFF) << 16) | /* 12-bit B */ (0x0 & 0xFFF)); /* 12-bit G */ /* MSP_BORDER_COLOR */ MDP_OUTP(MDP_BASE + MDP4_OVERLAYPROC1_BASE + 0x5008, (0x0 & 0xFFF)); /* 12-bit R */ pipe->src_format = MDP_ARGB_8888; } else { switch (mfd->ibuf.bpp) { case 2: pipe->src_format = MDP_RGB_565; break; case 3: pipe->src_format = MDP_RGB_888; break; case 4: default: if (hdmi_prim_display) pipe->src_format = MSMFB_DEFAULT_TYPE; else pipe->src_format = MDP_ARGB_8888; break; } } pipe->src_height = fbi->var.yres; pipe->src_width = fbi->var.xres; pipe->src_h = fbi->var.yres; pipe->src_w = fbi->var.xres; pipe->dst_h = fbi->var.yres; pipe->dst_w = fbi->var.xres; pipe->src_y = 0; pipe->src_x = 0; pipe->dst_h = fbi->var.yres; pipe->dst_w = fbi->var.xres; pipe->srcp0_ystride = fbi->fix.line_length; mdp4_overlay_mdp_pipe_req(pipe, mfd); mdp4_calc_blt_mdp_bw(mfd, pipe); mdp4_overlay_mdp_perf_req(mfd); mdp4_overlay_mdp_perf_upd(mfd, 1); ret = mdp4_overlay_format2pipe(pipe); if (ret < 0) pr_warn("%s: format2type failed\n", __func__); mdp4_overlay_dmae_xy(pipe); /* dma_e */ mdp4_overlayproc_cfg(pipe); if (pipe->pipe_type == OVERLAY_TYPE_RGB) { pipe->srcp0_addr = (uint32) mfd->ibuf.buf; mdp4_overlay_rgb_setup(pipe); } mdp4_overlay_reg_flush(pipe, 1); mdp4_mixer_stage_up(pipe, 0); mdp4_mixer_stage_commit(pipe->mixer_num); vctrl->base_pipe = pipe; /* keep it */ }
int mdp4_lcdc_pipe_commit(int cndx, int wait) { int i, undx; int mixer = 0; struct vsycn_ctrl *vctrl; struct vsync_update *vp; struct mdp4_overlay_pipe *pipe; struct mdp4_overlay_pipe *real_pipe; unsigned long flags; int cnt = 0; vctrl = &vsync_ctrl_db[cndx]; mutex_lock(&vctrl->update_lock); undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; pipe = vctrl->base_pipe; mixer = pipe->mixer_num; mdp_update_pm(vctrl->mfd, vctrl->vsync_time); if (vp->update_cnt == 0) { mutex_unlock(&vctrl->update_lock); return 0; } vctrl->update_ndx++; vctrl->update_ndx &= 0x01; vp->update_cnt = 0; /* */ if (vctrl->blt_free) { vctrl->blt_free--; if (vctrl->blt_free == 0) mdp4_free_writeback_buf(vctrl->mfd, mixer); } mutex_unlock(&vctrl->update_lock); /* */ mdp4_overlay_iommu_unmap_freelist(mixer); spin_lock_irqsave(&vctrl->spin_lock, flags); if (vctrl->ov_koff != vctrl->ov_done) { spin_unlock_irqrestore(&vctrl->spin_lock, flags); pr_err("%s: Error, frame dropped %d %d\n", __func__, vctrl->ov_koff, vctrl->ov_done); return 0; } spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_overlay_mdp_perf_upd(vctrl->mfd, 1); if (vctrl->blt_change) { pipe = vctrl->base_pipe; spin_lock_irqsave(&vctrl->spin_lock, flags); INIT_COMPLETION(vctrl->dmap_comp); INIT_COMPLETION(vctrl->ov_comp); vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM); spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_lcdc_wait4dmap(0); if (pipe->ov_blt_addr) mdp4_lcdc_wait4ov(0); } pipe = vp->plist; for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) { if (pipe->pipe_used) { cnt++; real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx); if (real_pipe && real_pipe->pipe_used) { /* */ mdp4_overlay_vsync_commit(pipe); } /* */ mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0); pipe->pipe_used = 0; /* */ } } mdp4_mixer_stage_commit(mixer); /* */ mdp4_overlay_lcdc_start(); pipe = vctrl->base_pipe; spin_lock_irqsave(&vctrl->spin_lock, flags); if (pipe->ov_blt_addr) { mdp4_lcdc_blt_ov_update(pipe); pipe->ov_cnt++; INIT_COMPLETION(vctrl->ov_comp); vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM); mb(); vctrl->ov_koff++; /* */ mdp4_stat.kickoff_ov0++; outpdw(MDP_BASE + 0x0004, 0); } else { /* */ INIT_COMPLETION(vctrl->dmap_comp); vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM); } spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_stat.overlay_commit[pipe->mixer_num]++; if (wait) { if (pipe->ov_blt_addr) mdp4_lcdc_wait4ov(cndx); else mdp4_lcdc_wait4dmap(cndx); } return cnt; }
void mdp4_dsi_cmd_overlay(struct msm_fb_data_type *mfd) { int cndx = 0; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; unsigned long flags; #ifdef CONFIG_VENDOR_EDIT //liuyan modify for mhl fick in playing video long long tick; #endif vctrl = &vsync_ctrl_db[cndx]; if (!mfd->panel_power_on) return; pipe = vctrl->base_pipe; if (pipe == NULL) { pr_err("%s: NO base pipe\n", __func__); return; } mutex_lock(&vctrl->update_lock); if (!vctrl->clk_enabled) { pr_err("%s: mdp clocks disabled\n", __func__); mutex_unlock(&vctrl->update_lock); return; } mutex_unlock(&vctrl->update_lock); spin_lock_irqsave(&vctrl->spin_lock, flags); if (vctrl->expire_tick) { /* * in the middle of shutting clocks down * delay to allow pan display to go through */ vctrl->expire_tick = VSYNC_EXPIRE_TICK; } spin_unlock_irqrestore(&vctrl->spin_lock, flags); if (pipe->mixer_stage == MDP4_MIXER_STAGE_BASE) { mdp4_mipi_vsync_enable(mfd, pipe, 0); mdp4_overlay_setup_pipe_addr(mfd, pipe); mdp4_dsi_cmd_pipe_queue(0, pipe); } mdp4_overlay_mdp_perf_upd(mfd, 1); mutex_lock(&mfd->dma->ov_mutex); #ifndef CONFIG_VENDOR_EDIT //liuyan modify for mhl fick in playing video //mdp4_dsi_cmd_pipe_commit(); #else mdp4_dsi_cmd_pipe_commit(cndx, 0); #endif mutex_unlock(&mfd->dma->ov_mutex); #ifdef CONFIG_VENDOR_EDIT //liuyan modify for mhl fick in playing video mdp4_dsi_cmd_wait4vsync(cndx, &tick); #endif mdp4_overlay_mdp_perf_upd(mfd, 0); }
int mdp4_lcdc_pipe_commit(void) { int i, undx; int mixer = 0; struct vsycn_ctrl *vctrl; struct vsync_update *vp; struct mdp4_overlay_pipe *pipe; struct mdp4_overlay_pipe *real_pipe; unsigned long flags; int cnt = 0; vctrl = &vsync_ctrl_db[0]; mutex_lock(&vctrl->update_lock); undx = vctrl->update_ndx; vp = &vctrl->vlist[undx]; pipe = vctrl->base_pipe; mixer = pipe->mixer_num; if (vp->update_cnt == 0) { mutex_unlock(&vctrl->update_lock); return 0; } vctrl->update_ndx++; vctrl->update_ndx &= 0x01; vp->update_cnt = 0; /* reset */ if (vctrl->blt_free) { vctrl->blt_free--; if (vctrl->blt_free == 0) mdp4_free_writeback_buf(vctrl->mfd, mixer); } mutex_unlock(&vctrl->update_lock); /* free previous committed iommu back to pool */ mdp4_overlay_iommu_unmap_freelist(mixer); spin_lock_irqsave(&vctrl->spin_lock, flags); if (vctrl->ov_koff != vctrl->ov_done) { spin_unlock_irqrestore(&vctrl->spin_lock, flags); pr_err("%s: Error, frame dropped %d %d\n", __func__, vctrl->ov_koff, vctrl->ov_done); return 0; } spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_overlay_mdp_perf_upd(vctrl->mfd, 1); if (vctrl->blt_change) { pipe = vctrl->base_pipe; spin_lock_irqsave(&vctrl->spin_lock, flags); INIT_COMPLETION(vctrl->dmap_comp); INIT_COMPLETION(vctrl->ov_comp); vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM); spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_lcdc_wait4dmap(0); if (pipe->ov_blt_addr) mdp4_lcdc_wait4ov(0); } pipe = vp->plist; for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) { if (pipe->pipe_used) { cnt++; real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx); if (real_pipe && real_pipe->pipe_used) { /* pipe not unset */ mdp4_overlay_vsync_commit(pipe); } /* free previous iommu to freelist * which will be freed at next * pipe_commit */ mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0); pipe->pipe_used = 0; /* clear */ } } mdp4_mixer_stage_commit(mixer); /* start timing generator & mmu if they are not started yet */ mdp4_overlay_lcdc_start(); pipe = vctrl->base_pipe; spin_lock_irqsave(&vctrl->spin_lock, flags); if (pipe->ov_blt_addr) { mdp4_lcdc_blt_ov_update(pipe); pipe->ov_cnt++; INIT_COMPLETION(vctrl->ov_comp); vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM); mb(); vctrl->ov_koff++; /* kickoff overlay engine */ mdp4_stat.kickoff_ov0++; outpdw(MDP_BASE + 0x0004, 0); } else { /* schedule second phase update at dmap */ INIT_COMPLETION(vctrl->dmap_comp); vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM); } spin_unlock_irqrestore(&vctrl->spin_lock, flags); mdp4_stat.overlay_commit[pipe->mixer_num]++; return cnt; }
void mdp4_dsi_cmd_overlay(struct msm_fb_data_type *mfd) { int cndx = 0; struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; unsigned long flags; int clk_set_on = 0; #if defined(CONFIG_MACH_JAGUAR) static int logo; #endif vctrl = &vsync_ctrl_db[cndx]; if (!mfd->panel_power_on) return; pipe = vctrl->base_pipe; if (pipe == NULL) { pr_err("%s: NO base pipe\n", __func__); return; } #if defined(CONFIG_FB_MSM_MIPI_NOVATEK_CMD_WVGA_PT) || \ defined(CONFIG_FB_MSM_MIPI_NOVATEK_BOE_CMD_WVGA_PT) if (!is_lcd_connected) return; #endif mutex_lock(&vctrl->update_lock); if (atomic_read(&vctrl->suspend)) { #if defined(CONFIG_MACH_JAGUAR) if (!logo || poweroff_charging ) { mipi_dsi_clk_cfg(1); mdp_clk_ctrl(1); vsync_irq_enable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM); logo = 1; } else { pr_err("%s: mdp clocks disabled\n", __func__); mutex_unlock(&vctrl->update_lock); return; } #else mutex_unlock(&vctrl->update_lock); pr_err("%s: suspended, no more pan display\n", __func__); return; #endif } spin_lock_irqsave(&vctrl->spin_lock, flags); vctrl->clk_control = 0; vctrl->pan_display++; if (!vctrl->clk_enabled) { clk_set_on = 1; vctrl->clk_enabled = 1; } vctrl->expire_tick = VSYNC_EXPIRE_TICK; spin_unlock_irqrestore(&vctrl->spin_lock, flags); if (clk_set_on) { pr_err("%s: warning, clock off while pan display\n", __func__); pr_debug("%s: SET_CLK_ON\n", __func__); mipi_dsi_clk_cfg(1); mdp_clk_ctrl(1); vsync_irq_enable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM); } mutex_unlock(&vctrl->update_lock); if (pipe->mixer_stage == MDP4_MIXER_STAGE_BASE) { mdp4_mipi_vsync_enable(mfd, pipe, 0); mdp4_overlay_setup_pipe_addr(mfd, pipe); mdp4_dsi_cmd_pipe_queue(0, pipe); } mutex_lock(&mfd->dma->ov_mutex); mdp4_overlay_mdp_perf_upd(mfd, 1); mdp4_dsi_cmd_pipe_commit(cndx, 0); mdp4_overlay_mdp_perf_upd(mfd, 0); mutex_unlock(&mfd->dma->ov_mutex); }