void mdp4_mddi_overlay_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { #ifdef MDP4_NONBLOCKING boolean disp_state; down(&mfd->sem); mdp_enable_irq(MDP_OVERLAY0_TERM); mfd->dma->busy = TRUE; disp_state = mddi_ta8851_displaying_chk(); /* start OVERLAY pipe */ mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); up(&mfd->sem); if( disp_state != TRUE ) { INIT_COMPLETION(mdp4_overlay_completion); wait_for_completion_killable(&mdp4_overlay_completion); mddi_ta8851_enable_display(); } #else down(&mfd->sem); mdp_enable_irq(MDP_OVERLAY0_TERM); mfd->dma->busy = TRUE; INIT_COMPLETION(pipe->comp); pending_pipe = pipe; /* start OVERLAY pipe */ mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); up(&mfd->sem); /* wait until DMA finishes the current job */ wait_for_completion_killable(&pipe->comp); mdp_disable_irq(MDP_OVERLAY0_TERM); if( mddi_ta8851_displaying_chk() != TRUE ) mddi_ta8851_enable_display(); #endif }
void mdp4_mddi_overlay_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { #ifdef MDP4_NONBLOCKING unsigned long flag; if (pipe == mddi_pipe) { if (mdp4_overlay_pipe_staged(pipe->mixer_num) > 1) { if (time_before(jiffies, (mddi_last_kick + mddi_kick_interval/2))) { mdp4_stat.kickoff_mddi_skip++; return; } } } spin_lock_irqsave(&mdp_spin_lock, flag); if (mfd->dma->busy == TRUE) { INIT_COMPLETION(pipe->comp); pending_pipe = pipe; } spin_unlock_irqrestore(&mdp_spin_lock, flag); if (pending_pipe != NULL) { wait_for_completion_killable(&pipe->comp); pending_pipe = NULL; } down(&mfd->sem); mdp_enable_irq(MDP_OVERLAY0_TERM); mfd->dma->busy = TRUE; mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); if (pipe != mddi_pipe) { int intv; intv = jiffies - mddi_last_kick; mddi_kick_interval += intv; mddi_kick_interval /= 2; mddi_last_kick = jiffies; } up(&mfd->sem); #else down(&mfd->sem); mdp_enable_irq(MDP_OVERLAY0_TERM); mfd->dma->busy = TRUE; INIT_COMPLETION(pipe->comp); pending_pipe = pipe; mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); up(&mfd->sem); wait_for_completion_killable(&pipe->comp); mdp_disable_irq(MDP_OVERLAY0_TERM); #endif }
void mdp4_mddi_overlay_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { unsigned long flag; /* use dma_p(overlay) pipe ,change bpp into 16 */ #ifdef CONFIG_FB_MSM_BPP_SWITCH if(16 != mfd->panel_info.bpp) { mdp4_switch_bpp_config(mfd,16); } #endif /* change mdp clk while mdp is idle` */ mdp4_set_perf_level(); mdp_enable_irq(MDP_OVERLAY0_TERM); spin_lock_irqsave(&mdp_spin_lock, flag); mfd->dma->busy = TRUE; if (mddi_pipe->blt_addr) mfd->dma->dmap_busy = TRUE; spin_unlock_irqrestore(&mdp_spin_lock, flag); /* start OVERLAY pipe */ mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); mdp4_stat.kickoff_ov0++; }
void mdp4_mddi_dma_s_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { down(&mfd->sem); mdp_enable_irq(MDP_DMA_S_TERM); mfd->dma->busy = TRUE; INIT_COMPLETION(pipe->dmas_comp); mfd->ibuf_flushed = TRUE; pending_pipe = pipe; /* start dma_s pipe */ mdp_pipe_kickoff(MDP_DMA_S_TERM, mfd); up(&mfd->sem); /* wait until DMA finishes the current job */ wait_for_completion_killable(&pipe->dmas_comp); pending_pipe = NULL; mdp_disable_irq(MDP_DMA_S_TERM); mddi_ta8851_enable_display(); }
void mdp4_dsi_cmd_overlay_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { unsigned long flag; /* change mdp clk */ mdp4_set_perf_level(); mipi_dsi_mdp_busy_wait(mfd); if (dsi_pipe->blt_addr == 0) mipi_dsi_cmd_mdp_start(); mdp4_overlay_dsi_state_set(ST_DSI_PLAYING); spin_lock_irqsave(&mdp_spin_lock, flag); mdp_enable_irq(MDP_OVERLAY0_TERM); mfd->dma->busy = TRUE; if (dsi_pipe->blt_addr) mfd->dma->dmap_busy = TRUE; /* start OVERLAY pipe */ spin_unlock_irqrestore(&mdp_spin_lock, flag); mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); mdp4_stat.kickoff_ov0++; }
void mdp4_dsi_cmd_overlay_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { unsigned long flag; spin_lock_irqsave(&mdp_spin_lock, flag); mdp_enable_irq(MDP_OVERLAY0_TERM); mfd->dma->busy = TRUE; mfd->dma->busy_pid = (current->pid << 16)+__LINE__; if (dsi_pipe->blt_addr) { mfd->dma->dmap_busy = TRUE; mfd->dma->dmap_pid = (current->pid << 16)+__LINE__; } wmb(); /* make sure all registers updated */ spin_unlock_irqrestore(&mdp_spin_lock, flag); /* start OVERLAY pipe */ mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); wmb(); if (pipe->blt_addr == 0) { /* trigger dsi cmd engine */ mipi_dsi_cmd_mdp_sw_trigger(); } }
void mdp4_mddi_overlay_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { /* change mdp clk while mdp is idle` */ mdp4_set_perf_level(); if (mdp_hw_revision == MDP4_REVISION_V2_1) { if (mdp4_overlay_status_read(MDP4_OVERLAY_TYPE_UNSET)) { uint32 data; data = inpdw(MDP_BASE + 0x0028); data &= ~0x0300; /* bit 8, 9, MASTER4 */ if (mfd->fbi->var.xres == 540) /* qHD, 540x960 */ data |= 0x0200; else data |= 0x0100; MDP_OUTP(MDP_BASE + 0x00028, data); mdp4_overlay_status_write(MDP4_OVERLAY_TYPE_UNSET, false); } if (mdp4_overlay_status_read(MDP4_OVERLAY_TYPE_SET)) { uint32 data; data = inpdw(MDP_BASE + 0x0028); data &= ~0x0300; /* bit 8, 9, MASTER4 */ MDP_OUTP(MDP_BASE + 0x00028, data); mdp4_overlay_status_write(MDP4_OVERLAY_TYPE_SET, false); } } mdp_enable_irq(MDP_OVERLAY0_TERM); mfd->dma->busy = TRUE; /* start OVERLAY pipe */ mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); }
void mdp4_mddi_dma_s_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { #ifdef CONFIG_SHLCDC_BOARD down(&mfd->sem); #endif /*CONFIG_SHLCDC_BOARD*/ mdp_enable_irq(MDP_DMA_S_TERM); mfd->dma->busy = TRUE; #ifdef CONFIG_SHLCDC_BOARD INIT_COMPLETION(pipe->dmas_comp); #endif /*CONFIG_SHLCDC_BOARD*/ mfd->ibuf_flushed = TRUE; #ifdef CONFIG_SHLCDC_BOARD pending_pipe = pipe; #endif /*CONFIG_SHLCDC_BOARD*/ #ifdef CONFIG_SHLCDC_BOARD mdp_wait_vsync(mfd); #endif /*CONFIG_SHLCDC_BOARD*/ /* start dma_s pipe */ mdp_pipe_kickoff(MDP_DMA_S_TERM, mfd); #ifdef CONFIG_SHLCDC_BOARD up(&mfd->sem); #endif /*CONFIG_SHLCDC_BOARD*/ /* wait until DMA finishes the current job */ #ifdef CONFIG_SHLCDC_BOARD wait_for_completion_killable(&pipe->dmas_comp); pending_pipe = NULL; #else wait_for_completion(&mfd->dma->comp); #endif /*CONFIG_SHLCDC_BOARD*/ mdp_disable_irq(MDP_DMA_S_TERM); }
void mdp4_mddi_dma_s_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { /*< DTS2010080403325 lijianzhao 20100804 begin */ /* use dma_s pipe ,change bpp into 24 */ #ifdef CONFIG_FB_MSM_BPP_SWITCH if(24 != mfd->panel_info.bpp) { mdp4_switch_bpp_config(mfd,24); } #endif /* DTS2010080403325 lijianzhao 20100804 end >*/ mdp_enable_irq(MDP_DMA_S_TERM); mfd->dma->busy = TRUE; mfd->ibuf_flushed = TRUE; /* start dma_s pipe */ mdp_pipe_kickoff(MDP_DMA_S_TERM, mfd); /* wait until DMA finishes the current job */ /* <DTS2010100802855 hufeng 20101008 begin */ #ifdef CONFIG_HUAWEI_KERNEL /* huawei modify */ wait_for_completion_interruptible_timeout(&mfd->dma->comp, 2 * HZ); #else wait_for_completion(&mfd->dma->comp); #endif /* DTS2010100802855 hufeng 20101008 end> */ mdp_disable_irq(MDP_DMA_S_TERM); }
enum hrtimer_restart mdp_dma2_vsync_hrtimer_handler(struct hrtimer *ht) { struct msm_fb_data_type *mfd = NULL; mfd = container_of(ht, struct msm_fb_data_type, dma_hrtimer); mdp_pipe_kickoff(MDP_DMA2_TERM, mfd); if (msm_fb_debug_enabled) { ktime_t t; int usec_diff; int actual_wait; t = ktime_get_real(); actual_wait = ktime_to_us(ktime_sub(t, vt)); usec_diff = actual_wait - mdp_expected_usec_wait; if ((mdp_usec_diff_threshold < usec_diff) || (usec_diff < 0)) MSM_FB_DEBUG ("HRT Diff = %d usec Exp=%d usec Act=%d usec\n", usec_diff, mdp_expected_usec_wait, actual_wait); } return HRTIMER_NORESTART; }
void mdp4_mddi_dma_s_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { /* use dma_s pipe ,change bpp into 24 */ #ifdef CONFIG_FB_MSM_BPP_SWITCH if(24 != mfd->panel_info.bpp) { mdp4_switch_bpp_config(mfd,24); } #endif mdp_enable_irq(MDP_DMA_S_TERM); if (mddi_pipe->ov_blt_addr == 0) mfd->dma->busy = TRUE; mfd->ibuf_flushed = TRUE; /* start dma_s pipe */ mdp_pipe_kickoff(MDP_DMA_S_TERM, mfd); mdp4_stat.kickoff_dmas++; /* wait until DMA finishes the current job */ #ifdef CONFIG_HUAWEI_KERNEL /* huawei modify */ wait_for_completion_interruptible_timeout(&mfd->dma->comp, 2 * HZ); #else wait_for_completion(&mfd->dma->comp); #endif mdp_disable_irq(MDP_DMA_S_TERM); }
void mdp4_mddi_overlay_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { mdp_enable_irq(MDP_OVERLAY0_TERM); mfd->dma->busy = TRUE; /* start OVERLAY pipe */ mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); }
void mdp4_dsi_cmd_overlay_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { mdp_enable_irq(MDP_OVERLAY0_TERM); mfd->dma->busy = TRUE; /* start OVERLAY pipe */ mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); /* trigger dsi cmd engine */ mipi_dsi_cmd_mdp_sw_trigger(); }
void mdp4_mddi_overlay_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { #ifdef MDP4_NONBLOCKING unsigned long flag; spin_lock_irqsave(&mdp_spin_lock, flag); if (mfd->dma->busy == TRUE) { INIT_COMPLETION(pipe->comp); pending_pipe = pipe; } spin_unlock_irqrestore(&mdp_spin_lock, flag); if (pending_pipe != NULL) { /* wait until DMA finishes the current job */ wait_for_completion_killable(&pipe->comp); pending_pipe = NULL; } down(&mfd->sem); mdp_enable_irq(MDP_OVERLAY0_TERM); mfd->dma->busy = TRUE; /* start OVERLAY pipe */ mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); up(&mfd->sem); #else down(&mfd->sem); mdp_enable_irq(MDP_OVERLAY0_TERM); mfd->dma->busy = TRUE; INIT_COMPLETION(pipe->comp); pending_pipe = pipe; /* start OVERLAY pipe */ mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); up(&mfd->sem); /* wait until DMA finishes the current job */ wait_for_completion_killable(&pipe->comp); mdp_disable_irq(MDP_OVERLAY0_TERM); #endif }
void mdp4_mddi_dma_s_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { mdp_enable_irq(MDP_DMA_S_TERM); mfd->dma->busy = TRUE; mfd->ibuf_flushed = TRUE; /* start dma_s pipe */ mdp_pipe_kickoff(MDP_DMA_S_TERM, mfd); /* wait until DMA finishes the current job */ wait_for_completion(&mfd->dma->comp); mdp_disable_irq(MDP_DMA_S_TERM); }
void mdp4_dsi_cmd_overlay_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { unsigned long flag; mdp4_iommu_attach(); /* change mdp clk */ mdp4_set_perf_level(); mipi_dsi_mdp_busy_wait(mfd); mipi_dsi_cmd_mdp_start(); mdp4_overlay_dsi_state_set(ST_DSI_PLAYING); /* MDP cmd block enable */ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE); spin_lock_irqsave(&mdp_spin_lock, flag); outp32(MDP_INTR_CLEAR, INTR_DMA_P_DONE); outp32(MDP_INTR_CLEAR, INTR_OVERLAY0_DONE); mdp_intr_mask |= INTR_DMA_P_DONE; if (dsi_pipe && dsi_pipe->ov_blt_addr) { mdp_intr_mask |= INTR_OVERLAY0_DONE; mfd->dma->busy = TRUE; } else mdp_intr_mask &= ~INTR_OVERLAY0_DONE; mfd->dma->dmap_busy = TRUE; outp32(MDP_INTR_ENABLE, mdp_intr_mask); mdp_enable_irq(MDP_DMA2_TERM); wmb(); spin_unlock_irqrestore(&mdp_spin_lock, flag); /* MDP cmd block disable */ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); wmb(); mdp4_stat.kickoff_ov0++; dsi_pipe->ov_cnt++; if (dsi_pipe && dsi_pipe->ov_blt_addr) { wmb(); outpdw(MDP_BASE + 0x000c, 0x0); mdp4_stat.kickoff_dmap++; wmb(); dsi_pipe->dmap_cnt++; dsi_pipe->blt_cnt++; } }
void mdp4_mddi_overlay_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { unsigned long flag; mdp_enable_irq(MDP_OVERLAY0_TERM); spin_lock_irqsave(&mdp_spin_lock, flag); mfd->dma->busy = TRUE; if (mddi_pipe->ov_blt_addr) mfd->dma->dmap_busy = TRUE; spin_unlock_irqrestore(&mdp_spin_lock, flag); /* start OVERLAY pipe */ mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); mdp4_stat.kickoff_ov0++; }
void mdp4_mddi_dma_s_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { unsigned long flag; /* use dma_s pipe ,change bpp into 24 */ #ifdef CONFIG_FB_MSM_BPP_SWITCH if(24 != mfd->panel_info.bpp) { mdp4_switch_bpp_config(mfd,24); } #endif spin_lock_irqsave(&mdp_spin_lock, flag); if (mfd->dma->busy == TRUE) { INIT_COMPLETION(pipe->comp); pending_pipe = pipe; } spin_unlock_irqrestore(&mdp_spin_lock, flag); if (pending_pipe != NULL) { /* wait until DMA finishes the current job */ #ifdef CONFIG_HUAWEI_KERNEL wait_for_completion_interruptible_timeout(&pipe->comp, 1 * HZ); #else wait_for_completion_killable(&pipe->comp); #endif pending_pipe = NULL; } down(&mfd->sem); mdp_enable_irq(MDP_DMA_S_TERM); mfd->dma->busy = TRUE; INIT_COMPLETION(pipe->dmas_comp); mfd->ibuf_flushed = TRUE; pending_pipe = pipe; /* start dma_s pipe */ mdp_pipe_kickoff(MDP_DMA_S_TERM, mfd); up(&mfd->sem); /* wait until DMA finishes the current job */ #ifdef CONFIG_HUAWEI_KERNEL /* huawei modify */ wait_for_completion_interruptible_timeout(&pipe->dmas_comp, 2 * HZ); #else wait_for_completion_killable(&pipe->dmas_comp); #endif pending_pipe = NULL; mdp_disable_irq(MDP_DMA_S_TERM); }
void mdp4_mddi_dma_s_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { /* change mdp clk while mdp is idle` */ mdp4_set_perf_level(); mdp_enable_irq(MDP_DMA_S_TERM); mfd->dma->busy = TRUE; mfd->ibuf_flushed = TRUE; /* start dma_s pipe */ mdp_pipe_kickoff(MDP_DMA_S_TERM, mfd); mdp4_stat.kickoff_dmas++; /* wait until DMA finishes the current job */ wait_for_completion(&mfd->dma->comp); mdp_disable_irq(MDP_DMA_S_TERM); }
void mdp4_mddi_dma_s_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { down(&mfd->sem); mdp_enable_irq(MDP_DMA_S_TERM); mfd->dma->busy = TRUE; INIT_COMPLETION(pipe->dmas_comp); mfd->ibuf_flushed = TRUE; pending_pipe = pipe; mdp_pipe_kickoff(MDP_DMA_S_TERM, mfd); up(&mfd->sem); wait_for_completion_killable(&pipe->dmas_comp); pending_pipe = NULL; mdp_disable_irq(MDP_DMA_S_TERM); }
void mdp4_writeback_overlay_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { unsigned long flag; spin_lock_irqsave(&mdp_spin_lock, flag); mdp_enable_irq(MDP_OVERLAY2_TERM); mfd->dma->busy = TRUE; outp32(MDP_INTR_CLEAR, INTR_OVERLAY2_DONE); mdp_intr_mask |= INTR_OVERLAY2_DONE; outp32(MDP_INTR_ENABLE, mdp_intr_mask); wmb(); /* make sure all registers updated */ spin_unlock_irqrestore(&mdp_spin_lock, flag); /* start OVERLAY pipe */ mdp_pipe_kickoff(MDP_OVERLAY2_TERM, mfd); wmb(); pr_debug("%s: before ov done interrupt\n", __func__); }
void mdp4_dsi_cmd_overlay_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { unsigned long flag, rflag; #ifdef FACTORY_TEST if (!is_lcd_connected) return; #endif /* *QCT_PATCH-sbyun to avoid confilt *between on going video image and new overlay image, *add some delay when mdp of dma is in busy status */ /* change mdp clk */ mdp4_set_perf_level(); mipi_dsi_mdp_busy_wait(mfd); #ifdef MDP_UNDERFLOW_RESET_CTRL_CMD spin_lock_irqsave(&mixer_reset_lock, rflag); #endif if (dsi_pipe->blt_addr == 0) mipi_dsi_cmd_mdp_start(); mdp4_overlay_dsi_state_set(ST_DSI_PLAYING); spin_lock_irqsave(&mdp_spin_lock, flag); mdp_enable_irq(MDP_OVERLAY0_TERM); mfd->dma->busy = TRUE; if (dsi_pipe->blt_addr) mfd->dma->dmap_busy = TRUE; /* start OVERLAY pipe */ spin_unlock_irqrestore(&mdp_spin_lock, flag); mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); mdp4_stat.kickoff_ov0++; #ifdef MDP_UNDERFLOW_RESET_CTRL_CMD spin_unlock_irqrestore(&mixer_reset_lock, rflag); #endif }
void mdp4_dsi_cmd_overlay_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { unsigned long flag; spin_lock_irqsave(&mdp_spin_lock, flag); mdp_enable_irq(MDP_OVERLAY0_TERM); mfd->dma->busy = TRUE; if (dsi_pipe->blt_addr) mfd->dma->dmap_busy = TRUE; /* start OVERLAY pipe */ spin_unlock_irqrestore(&mdp_spin_lock, flag); mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); if (pipe->blt_addr == 0) { /* trigger dsi cmd engine */ mipi_dsi_cmd_mdp_sw_trigger(); } }
void mdp4_mddi_overlay_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { /*< DTS2010080403325 lijianzhao 20100804 begin */ /* use dma_p(overlay) pipe ,change bpp into 16 */ #ifdef CONFIG_FB_MSM_BPP_SWITCH if(16 != mfd->panel_info.bpp) { mdp4_switch_bpp_config(mfd,16); } #endif /* DTS2010080403325 lijianzhao 20100804 end >*/ /*< DTS2011072603082 fengwei 20110806 begin*/ /*for resolving freeze screen because of 60 frame freq and CTS TEST*/ if (mdp_hw_revision == MDP4_REVISION_V2_1) { if (mdp4_overlay_status_read(MDP4_OVERLAY_TYPE_UNSET)) { uint32 data; data = inpdw(MDP_BASE + 0x0028); data &= ~0x0300; /* bit 8, 9, MASTER4 */ if (mfd->fbi->var.xres == 540) /* qHD, 540x960 */ data |= 0x0200; else data |= 0x0100; MDP_OUTP(MDP_BASE + 0x00028, data); mdp4_overlay_status_write(MDP4_OVERLAY_TYPE_UNSET, false); } if (mdp4_overlay_status_read(MDP4_OVERLAY_TYPE_SET)) { uint32 data; data = inpdw(MDP_BASE + 0x0028); data &= ~0x0300; /* bit 8, 9, MASTER4 */ MDP_OUTP(MDP_BASE + 0x00028, data); mdp4_overlay_status_write(MDP4_OVERLAY_TYPE_SET, false); } } /*DTS2011072603082 fengwei 20110806 end >*/ mdp_enable_irq(MDP_OVERLAY0_TERM); mfd->dma->busy = TRUE; /* start OVERLAY pipe */ mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); }
void mdp4_dsi_cmd_overlay_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { down(&mfd->sem); mfd->dma->busy = TRUE; INIT_COMPLETION(dsi_cmd_comp); mdp_enable_irq(MDP_OVERLAY0_TERM); /* Kick off overlay engine */ mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); /* trigger dsi cmd engine */ mipi_dsi_cmd_mdp_sw_trigger(); up(&mfd->sem); /* wait until DMA finishes the current job */ wait_for_completion_killable(&dsi_cmd_comp); mdp_disable_irq(MDP_OVERLAY0_TERM); }
void mdp4_mddi_overlay_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { struct msm_fb_panel_data *pdata = (struct msm_fb_panel_data *)mfd->pdev->dev.platform_data; down(&mfd->sem); mdp_enable_irq(MDP_OVERLAY0_TERM); mfd->dma->busy = TRUE; if (pdata->power_on_panel_at_pan) { INIT_COMPLETION(pipe->comp); pending_pipe = pipe; } /* start OVERLAY pipe */ mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); up(&mfd->sem); if (pdata->power_on_panel_at_pan) { wait_for_completion_killable(&pipe->comp); pending_pipe = NULL; } }
void mdp4_mddi_overlay_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { unsigned long flag; if (mdp_hw_revision == MDP4_REVISION_V2_1) { if (mdp4_overlay_status_read(MDP4_OVERLAY_TYPE_UNSET)) { uint32 data; data = inpdw(MDP_BASE + 0x0028); data &= ~0x0300; /* bit 8, 9, MASTER4 */ if (mfd->fbi->var.xres == 540) /* qHD, 540x960 */ data |= 0x0200; else data |= 0x0100; MDP_OUTP(MDP_BASE + 0x00028, data); mdp4_overlay_status_write(MDP4_OVERLAY_TYPE_UNSET, false); } if (mdp4_overlay_status_read(MDP4_OVERLAY_TYPE_SET)) { uint32 data; data = inpdw(MDP_BASE + 0x0028); data &= ~0x0300; /* bit 8, 9, MASTER4 */ MDP_OUTP(MDP_BASE + 0x00028, data); mdp4_overlay_status_write(MDP4_OVERLAY_TYPE_SET, false); } } mdp_enable_irq(MDP_OVERLAY0_TERM); spin_lock_irqsave(&mdp_spin_lock, flag); mfd->dma->busy = TRUE; if (mddi_pipe->blt_addr) mfd->dma->dmap_busy = TRUE; spin_unlock_irqrestore(&mdp_spin_lock, flag); /* start OVERLAY pipe */ mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); mdp4_stat.kickoff_ov0++; }
void mdp4_dsi_cmd_overlay_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { unsigned long flag; /* change mdp clk */ mdp4_set_perf_level(); #ifndef MDP4_DSI_SW_TRIGGER mipi_dsi_mdp_busy_wait(mfd); if (dsi_pipe->blt_addr == 0) mipi_dsi_cmd_mdp_start(); #endif mdp4_overlay_dsi_state_set(ST_DSI_PLAYING); spin_lock_irqsave(&mdp_spin_lock, flag); mdp_enable_irq(MDP_OVERLAY0_TERM); mfd->dma->busy = TRUE; if (dsi_pipe->blt_addr) mfd->dma->dmap_busy = TRUE; /* start OVERLAY pipe */ wmb(); spin_unlock_irqrestore(&mdp_spin_lock, flag); mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); wmb(); mdp4_stat.kickoff_ov0++; #ifdef MDP4_DSI_SW_TRIGGER if (dsi_pipe->blt_addr == 0) { /* trigger dsi cmd engine */ mipi_dsi_cmd_mdp_sw_trigger(mfd); } #endif }
static void mdp_dma_schedule(struct msm_fb_data_type *mfd, uint32 term) { /* * dma2 configure VSYNC block * vsync supported on Primary LCD only for now */ int32 mdp_lcd_rd_cnt; uint32 usec_wait_time; uint32 start_y; /* * ToDo: if we can move HRT timer callback to workqueue, we can * move DMA2 power on under mdp_pipe_kickoff(). * This will save a power for hrt time wait. * However if the latency for context switch (hrt irq -> workqueue) * is too big, we will miss the vsync timing. */ if (term == MDP_DMA2_TERM) mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_ON, FALSE); mdp_dma2_update_time_in_usec = ktime_to_us(mdp_dma2_last_update_time); if ((!mfd->ibuf.vsync_enable) || (!mfd->panel_info.lcd.vsync_enable) || (mfd->use_mdp_vsync)) { mdp_pipe_kickoff(term, mfd); return; } /* SW vsync logic starts here */ /* get current rd counter */ mdp_lcd_rd_cnt = mdp_get_lcd_line_counter(mfd); if (mdp_dma2_update_time_in_usec != 0) { uint32 num, den; /* * roi width boundary calculation to know the size of pixel * width that MDP can send faster or slower than LCD read * pointer */ num = mdp_last_dma2_update_width * mdp_last_dma2_update_height; den = (((mfd->panel_info.lcd.refx100 * mfd->total_lcd_lines) / 1000) * (mdp_dma2_update_time_in_usec / 100)) / 1000; if (den == 0) mfd->vsync_width_boundary[mdp_last_dma2_update_width] = mfd->panel_info.xres + 1; else mfd->vsync_width_boundary[mdp_last_dma2_update_width] = (int)(num / den); } if (mfd->vsync_width_boundary[mdp_last_dma2_update_width] > mdp_curr_dma2_update_width) { /* MDP wrp is faster than LCD rdp */ mdp_lcd_rd_cnt += mdp_lcd_rd_cnt_offset_fast; } else { /* MDP wrp is slower than LCD rdp */ mdp_lcd_rd_cnt -= mdp_lcd_rd_cnt_offset_slow; } if (mdp_lcd_rd_cnt < 0) mdp_lcd_rd_cnt = mfd->total_lcd_lines + mdp_lcd_rd_cnt; else if (mdp_lcd_rd_cnt > mfd->total_lcd_lines) mdp_lcd_rd_cnt = mdp_lcd_rd_cnt - mfd->total_lcd_lines - 1; /* get wrt pointer position */ start_y = mfd->ibuf.dma_y; /* measure line difference between start_y and rd counter */ if (start_y > mdp_lcd_rd_cnt) { /* * *100 for lcd_ref_hzx100 was already multiplied by 100 * *1000000 is for usec conversion */ if ((start_y - mdp_lcd_rd_cnt) <= mdp_vsync_usec_wait_line_too_short) usec_wait_time = 0; else usec_wait_time = ((start_y - mdp_lcd_rd_cnt) * 1000000) / ((mfd->total_lcd_lines * mfd->panel_info.lcd.refx100) / 100); } else { if ((start_y + (mfd->total_lcd_lines - mdp_lcd_rd_cnt)) <= mdp_vsync_usec_wait_line_too_short) usec_wait_time = 0; else usec_wait_time = ((start_y + (mfd->total_lcd_lines - mdp_lcd_rd_cnt)) * 1000000) / ((mfd->total_lcd_lines * mfd->panel_info.lcd.refx100) / 100); } mdp_last_dma2_update_width = mdp_curr_dma2_update_width; mdp_last_dma2_update_height = mdp_curr_dma2_update_height; if (usec_wait_time == 0) { mdp_pipe_kickoff(term, mfd); } else { ktime_t wait_time; wait_time = ns_to_ktime(usec_wait_time * 1000); if (msm_fb_debug_enabled) { vt = ktime_get_real(); mdp_expected_usec_wait = usec_wait_time; } hrtimer_start(&mfd->dma_hrtimer, wait_time, HRTIMER_MODE_REL); } }
static void mdp_dma_s_update_lcd(struct msm_fb_data_type *mfd) { MDPIBUF *iBuf = &mfd->ibuf; int mddi_dest = FALSE; uint32 outBpp = iBuf->bpp; uint32 dma_s_cfg_reg; uint8 *src; struct msm_fb_panel_data *pdata = (struct msm_fb_panel_data *)mfd->pdev->dev.platform_data; dma_s_cfg_reg = DMA_PACK_TIGHT | DMA_PACK_ALIGN_LSB | DMA_OUT_SEL_AHB | DMA_IBUF_NONCONTIGUOUS; if (mfd->fb_imgType == MDP_BGR_565) dma_s_cfg_reg |= DMA_PACK_PATTERN_BGR; else dma_s_cfg_reg |= DMA_PACK_PATTERN_RGB; if (outBpp == 4) dma_s_cfg_reg |= DMA_IBUF_C3ALPHA_EN; if (outBpp == 2) dma_s_cfg_reg |= DMA_IBUF_FORMAT_RGB565; if (mfd->panel_info.pdest != DISPLAY_2) { printk(KERN_ERR "error: non-secondary type through dma_s!\n"); return; } if (mfd->panel_info.type == MDDI_PANEL || mfd->panel_info.type == EXT_MDDI_PANEL) { dma_s_cfg_reg |= DMA_OUT_SEL_MDDI; mddi_dest = TRUE; } else { dma_s_cfg_reg |= DMA_AHBM_LCD_SEL_SECONDARY; outp32(MDP_EBI2_LCD1, mfd->data_port_phys); } src = (uint8 *) iBuf->buf; /* starting input address */ src += (iBuf->dma_x + iBuf->dma_y * iBuf->ibuf_width) * outBpp; /* MDP cmd block enable */ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE); /* PIXELSIZE */ if (mfd->panel_info.type == MDDI_PANEL) { MDP_OUTP(MDP_BASE + 0xa0004, (iBuf->dma_h << 16 | iBuf->dma_w)); MDP_OUTP(MDP_BASE + 0xa0008, src); /* ibuf address */ MDP_OUTP(MDP_BASE + 0xa000c, iBuf->ibuf_width * outBpp);/* ystride */ } else { MDP_OUTP(MDP_BASE + 0xb0004, (iBuf->dma_h << 16 | iBuf->dma_w)); MDP_OUTP(MDP_BASE + 0xb0008, src); /* ibuf address */ MDP_OUTP(MDP_BASE + 0xb000c, iBuf->ibuf_width * outBpp);/* ystride */ } if (mfd->panel_info.bpp == 18) { dma_s_cfg_reg |= DMA_DSTC0G_6BITS | /* 666 18BPP */ DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS; } else { dma_s_cfg_reg |= DMA_DSTC0G_6BITS | /* 565 16BPP */ DMA_DSTC1B_5BITS | DMA_DSTC2R_5BITS; } if (mddi_dest) { if (mfd->panel_info.type == MDDI_PANEL) { MDP_OUTP(MDP_BASE + 0xa0010, (iBuf->dma_y << 16) | iBuf->dma_x); MDP_OUTP(MDP_BASE + 0x00090, 1); } else { MDP_OUTP(MDP_BASE + 0xb0010, (iBuf->dma_y << 16) | iBuf->dma_x); MDP_OUTP(MDP_BASE + 0x00090, 2); } MDP_OUTP(MDP_BASE + 0x00094, (MDDI_VDO_PACKET_DESC << 16) | mfd->panel_info.mddi.vdopkt); } else { /* setting LCDC write window */ pdata->set_rect(iBuf->dma_x, iBuf->dma_y, iBuf->dma_w, iBuf->dma_h); } if (mfd->panel_info.type == MDDI_PANEL) MDP_OUTP(MDP_BASE + 0xa0000, dma_s_cfg_reg); else MDP_OUTP(MDP_BASE + 0xb0000, dma_s_cfg_reg); /* MDP cmd block disable */ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); if (mfd->panel_info.type == MDDI_PANEL) mdp_pipe_kickoff(MDP_DMA_S_TERM, mfd); else mdp_pipe_kickoff(MDP_DMA_E_TERM, mfd); }