void mdp_pipe_kickoff(uint32 term, struct msm_fb_data_type *mfd) { /* complete all the writes before starting */ wmb(); /* kick off PPP engine */ if (term == MDP_PPP_TERM) { if (mdp_debug[MDP_PPP_BLOCK]) jiffies_to_timeval(jiffies, &mdp_ppp_timeval); /* let's turn on PPP block */ mdp_pipe_ctrl(MDP_PPP_BLOCK, MDP_BLOCK_POWER_ON, FALSE); mdp_enable_irq(term); INIT_COMPLETION(mdp_ppp_comp); mdp_ppp_waiting = TRUE; outpdw(MDP_BASE + 0x30, 0x1000); wait_for_completion_killable(&mdp_ppp_comp); mdp_disable_irq(term); if (mdp_debug[MDP_PPP_BLOCK]) { struct timeval now; jiffies_to_timeval(jiffies, &now); mdp_ppp_timeval.tv_usec = now.tv_usec - mdp_ppp_timeval.tv_usec; MSM_FB_INFO("MDP-PPP: %d\n", (int)mdp_ppp_timeval.tv_usec); } } else if (term == MDP_DMA2_TERM) { if (mdp_debug[MDP_DMA2_BLOCK]) { MSM_FB_INFO("MDP-DMA2: %d\n", (int)mdp_dma2_timeval.tv_usec); jiffies_to_timeval(jiffies, &mdp_dma2_timeval); } /* DMA update timestamp */ mdp_dma2_last_update_time = ktime_get_real(); /* let's turn on DMA2 block */ #if 0 mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_ON, FALSE); #endif #ifdef CONFIG_FB_MSM_MDP22 outpdw(MDP_CMD_DEBUG_ACCESS_BASE + 0x0044, 0x0);/* start DMA */ #else mdp_lut_enable(); #ifdef CONFIG_FB_MSM_MDP40 outpdw(MDP_BASE + 0x000c, 0x0); /* start DMA */ #else outpdw(MDP_BASE + 0x0044, 0x0); /* start DMA */ #endif #endif #ifdef CONFIG_FB_MSM_MDP40 } else if (term == MDP_DMA_S_TERM) { mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_ON, FALSE); outpdw(MDP_BASE + 0x0010, 0x0); /* start DMA */ } else if (term == MDP_DMA_E_TERM) { mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_ON, FALSE); outpdw(MDP_BASE + 0x0014, 0x0); /* start DMA */ } else if (term == MDP_OVERLAY0_TERM) { mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE); mdp_lut_enable(); outpdw(MDP_BASE + 0x0004, 0); } else if (term == MDP_OVERLAY1_TERM) { mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE); mdp_lut_enable(); outpdw(MDP_BASE + 0x0008, 0); } #else }
void mdp_dma2_update(struct msm_fb_data_type *mfd) #endif { unsigned long flag; static int first_vsync; int need_wait = 0; down(&mfd->dma->mutex); if ((mfd) && (mfd->panel_power_on)) { down(&mfd->sem); spin_lock_irqsave(&mdp_spin_lock, flag); if (mfd->dma->busy == TRUE) need_wait++; spin_unlock_irqrestore(&mdp_spin_lock, flag); if (need_wait) wait_for_completion_killable(&mfd->dma->comp); /* schedule DMA to start */ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE); mfd->ibuf_flushed = TRUE; mdp_dma2_update_lcd(mfd); spin_lock_irqsave(&mdp_spin_lock, flag); mdp_enable_irq(MDP_DMA2_TERM); mfd->dma->busy = TRUE; INIT_COMPLETION(mfd->dma->comp); INIT_COMPLETION(vsync_cntrl.vsync_comp); if (!vsync_cntrl.vsync_irq_enabled && vsync_cntrl.disabled_clocks) { MDP_OUTP(MDP_BASE + 0x021c, 0x10); /* read pointer */ outp32(MDP_INTR_CLEAR, MDP_PRIM_RDPTR); mdp_intr_mask |= MDP_PRIM_RDPTR; outp32(MDP_INTR_ENABLE, mdp_intr_mask); mdp_enable_irq(MDP_VSYNC_TERM); vsync_cntrl.vsync_dma_enabled = 1; } spin_unlock_irqrestore(&mdp_spin_lock, flag); /* schedule DMA to start */ mdp_dma_schedule(mfd, MDP_DMA2_TERM); up(&mfd->sem); /* wait until Vsync finishes the current job */ if (first_vsync) { if (!wait_for_completion_killable_timeout (&vsync_cntrl.vsync_comp, HZ/10)) pr_err("Timedout DMA %s %d", __func__, __LINE__); } else { first_vsync = 1; } mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); /* signal if pan function is waiting for the update completion */ if (mfd->pan_waiting) { mfd->pan_waiting = FALSE; complete(&mfd->pan_comp); } } up(&mfd->dma->mutex); }
void mdp4_mddi_overlay_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { #ifdef MDP4_NONBLOCKING unsigned long flag; /* use dma_p(overlay) pipe ,change bpp into 16 */ #ifdef CONFIG_FB_MSM_BPP_SWITCH if(16 != mfd->panel_info.bpp) { mdp4_switch_bpp_config(mfd,16); } #endif if (pipe == mddi_pipe) { /* base layer */ if (mdp4_overlay_pipe_staged(pipe->mixer_num) > 1) { if (time_before(jiffies, (mddi_last_kick + mddi_kick_interval/2))) { mdp4_stat.kickoff_mddi_skip++; return; /* let other pipe to kickoff */ } } } spin_lock_irqsave(&mdp_spin_lock, flag); if (mfd->dma->busy == TRUE) { INIT_COMPLETION(pipe->comp); pending_pipe = pipe; } spin_unlock_irqrestore(&mdp_spin_lock, flag); if (pending_pipe != NULL) { /* wait until DMA finishes the current job */ #ifdef CONFIG_HUAWEI_KERNEL wait_for_completion_interruptible_timeout(&pipe->comp, 1 * HZ); #else wait_for_completion_killable(&pipe->comp); #endif pending_pipe = NULL; } down(&mfd->sem); mdp_enable_irq(MDP_OVERLAY0_TERM); mfd->dma->busy = TRUE; /* start OVERLAY pipe */ mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); if (pipe != mddi_pipe) { /* non base layer */ int intv; if (mddi_last_kick == 0) intv = 0; else intv = jiffies - mddi_last_kick; mddi_kick_interval += intv; mddi_kick_interval /= 2; /* average */ mddi_last_kick = jiffies; } up(&mfd->sem); #else down(&mfd->sem); mdp_enable_irq(MDP_OVERLAY0_TERM); mfd->dma->busy = TRUE; INIT_COMPLETION(pipe->comp); pending_pipe = pipe; /* start OVERLAY pipe */ mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); up(&mfd->sem); /* wait until DMA finishes the current job */ #ifdef CONFIG_HUAWEI_KERNEL wait_for_completion_interruptible_timeout(&pipe->comp, 1 * HZ); #else wait_for_completion_killable(&pipe->comp); #endif mdp_disable_irq(MDP_OVERLAY0_TERM); #endif }
void mdp4_mddi_overlay_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { #ifdef MDP4_NONBLOCKING unsigned long flag; if (pipe == mddi_pipe) { /* base layer */ if (mdp4_overlay_pipe_staged(pipe->mixer_num) > 1) { if (time_before(jiffies, (mddi_last_kick + mddi_kick_interval/2))) { mdp4_stat.kickoff_mddi_skip++; return; /* let other pipe to kickoff */ } } } spin_lock_irqsave(&mdp_spin_lock, flag); if (mfd->dma->busy == TRUE) { INIT_COMPLETION(pipe->comp); pending_pipe = pipe; } spin_unlock_irqrestore(&mdp_spin_lock, flag); if (pending_pipe != NULL) { /* wait until DMA finishes the current job */ wait_for_completion_killable(&pipe->comp); pending_pipe = NULL; } down(&mfd->sem); mdp_enable_irq(MDP_OVERLAY0_TERM); mfd->dma->busy = TRUE; /* start OVERLAY pipe */ mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); if (pipe != mddi_pipe) { /* non base layer */ int intv; if (mddi_last_kick == 0) intv = 0; else intv = jiffies - mddi_last_kick; mddi_kick_interval += intv; mddi_kick_interval /= 2; /* average */ mddi_last_kick = jiffies; } up(&mfd->sem); #else down(&mfd->sem); mdp_enable_irq(MDP_OVERLAY0_TERM); mfd->dma->busy = TRUE; INIT_COMPLETION(pipe->comp); pending_pipe = pipe; /* start OVERLAY pipe */ mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); up(&mfd->sem); /* wait until DMA finishes the current job */ wait_for_completion_killable(&pipe->comp); mdp_disable_irq(MDP_OVERLAY0_TERM); #endif }
/* * mdp4_overlay0_done_dsi_cmd: called from isr */ void mdp4_overlay0_done_dsi_cmd(struct mdp_dma_data *dma) { int diff; spin_lock(&mdp_done_lock); mdp_disable_irq_nosync(MDP_OVERLAY0_TERM); spin_lock(&mdp_spin_lock); rmb(); if (dsi_pipe->blt_addr == 0) { dma->busy = FALSE; dma->busy_pid = __LINE__; complete(&dma->comp); if (atomic_read(&busy_wait_cnt)) atomic_dec(&busy_wait_cnt); spin_unlock(&mdp_spin_lock); spin_unlock(&mdp_done_lock); mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, TRUE); return; } /* blt enabled */ if (dsi_pipe->blt_end == 0) dsi_pipe->ov_cnt++; #ifdef BLTDEBUG printk(KERN_INFO "%s: ov_cnt=%d dmap_cnt=%d\n", __func__, dsi_pipe->ov_cnt, dsi_pipe->dmap_cnt); #endif if (dsi_pipe->blt_cnt == 0) { /* first kickoff since blt enabled */ mdp_intr_mask |= INTR_DMA_P_DONE; outp32(MDP_INTR_ENABLE, mdp_intr_mask); } dsi_pipe->blt_cnt++; diff = dsi_pipe->ov_cnt - dsi_pipe->dmap_cnt; if (diff >= 2) { PR_DISP_INFO("%s(%d) found diff > 2\n", __func__, __LINE__); spin_unlock(&mdp_spin_lock); spin_unlock(&mdp_done_lock); return; } dma->busy = FALSE; dma->busy_pid = __LINE__; dma->dmap_busy = TRUE; dma->dmap_pid = (current->pid << 16)+__LINE__; complete(&dma->comp); if (atomic_read(&busy_wait_cnt)) atomic_dec(&busy_wait_cnt); spin_unlock(&mdp_spin_lock); mdp4_blt_xy_update(dsi_pipe); mdp_enable_irq(MDP_DMA2_TERM); /* enable intr */ wmb(); /* make sure registers updated */ spin_unlock(&mdp_done_lock); #ifdef BLTDEBUG printk(KERN_INFO "%s: kickoff dmap\n", __func__); #endif /* kick off dmap */ outpdw(MDP_BASE + 0x000c, 0x0); wmb(); /* trigger dsi cmd engine */ mipi_dsi_cmd_mdp_sw_trigger(); }
void mdp_lcdc_update(struct msm_fb_data_type *mfd) { struct fb_info *fbi = mfd->fbi; uint8 *buf; int bpp; unsigned long flag; uint32 dma_base; int irq_block = MDP_DMA2_TERM; #ifdef CONFIG_FB_MSM_MDP40 int intr = INTR_DMA_P_DONE; #endif if (!mfd->panel_power_on) return; /* no need to power on cmd block since it's lcdc mode */ if (!mfd->ibuf.visible_swapped) { bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; buf += fbi->var.xoffset * bpp + fbi->var.yoffset * fbi->fix.line_length; } else { /* we've done something to update the pointer. */ bpp = mfd->ibuf.bpp; buf = mfd->ibuf.buf; } dma_base = DMA_P_BASE; #ifdef CONFIG_FB_MSM_MDP40 if (mfd->panel.type == HDMI_PANEL) { intr = INTR_DMA_E_DONE; irq_block = MDP_DMA_E_TERM; dma_base = DMA_E_BASE; } #endif /* starting address */ MDP_OUTP(MDP_BASE + dma_base + 0x8, (uint32) buf); /* enable LCDC irq */ spin_lock_irqsave(&mdp_spin_lock, flag); mdp_enable_irq(irq_block); INIT_COMPLETION(mfd->dma->comp); mfd->dma->waiting = TRUE; #ifdef CONFIG_FB_MSM_MDP40 outp32(MDP_INTR_CLEAR, intr); mdp_intr_mask |= intr; outp32(MDP_INTR_ENABLE, mdp_intr_mask); #else outp32(MDP_INTR_CLEAR, LCDC_FRAME_START); mdp_intr_mask |= LCDC_FRAME_START; outp32(MDP_INTR_ENABLE, mdp_intr_mask); #endif spin_unlock_irqrestore(&mdp_spin_lock, flag); if (mfd->ibuf.vsync_enable) wait_for_completion_killable(&mfd->dma->comp); mdp_disable_irq(irq_block); }
void mdp4_mddi_overlay_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { #ifdef CONFIG_SHLCDC_BOARD uint32 data; int master4_invalid_backupflg = master4_invalid_flg; #endif /*CONFIG_SHLCDC_BOARD*/ #ifdef CONFIG_SHLCDC_BOARD if(mfd->dma->busy == TRUE){ mutex_unlock(&mfd->dma->ov_mutex); down(&mfd->sem); up(&mfd->sem); mutex_lock(&mfd->dma->ov_mutex); return; } down(&mfd->sem); #endif /*CONFIG_SHLCDC_BOARD*/ #ifndef CONFIG_SHLCDC_BOARD if (mdp_hw_revision == MDP4_REVISION_V2_1) { if (mdp4_overlay_status_read(MDP4_OVERLAY_TYPE_UNSET)) { uint32 data; data = inpdw(MDP_BASE + 0x0028); data &= ~0x0300; /* bit 8, 9, MASTER4 */ if (mfd->fbi->var.xres == 540) /* qHD, 540x960 */ data |= 0x0200; else data |= 0x0100; MDP_OUTP(MDP_BASE + 0x00028, data); mdp4_overlay_status_write(MDP4_OVERLAY_TYPE_UNSET, false); } if (mdp4_overlay_status_read(MDP4_OVERLAY_TYPE_SET)) { uint32 data; data = inpdw(MDP_BASE + 0x0028); data &= ~0x0300; /* bit 8, 9, MASTER4 */ MDP_OUTP(MDP_BASE + 0x00028, data); mdp4_overlay_status_write(MDP4_OVERLAY_TYPE_SET, false); } } #endif /*CONFIG_SHLCDC_BOARD*/ mdp_enable_irq(MDP_OVERLAY0_TERM); mfd->dma->busy = TRUE; #ifdef CONFIG_SHLCDC_BOARD INIT_COMPLETION(pipe->comp); pending_pipe = pipe; mutex_unlock(&mfd->dma->ov_mutex); mdp_wait_vsync(mfd); mutex_lock(&mfd->dma->ov_mutex); #endif /*CONFIG_SHLCDC_BOARD*/ #ifdef CONFIG_SHLCDC_BOARD if( master4_invalid_status != master4_invalid_backupflg ){ if ((mdp_hw_revision == MDP4_REVISION_V2_1) && (master4_invalid_backupflg == 0)) { data = inpdw(MDP_BASE + 0x0028); data &= ~0x0300; /* bit 8, 9, MASTER4 */ if (mfd->fbi->var.xres == 540) /* qHD, 540x960 */ data |= 0x0200; else data |= 0x0100; MDP_OUTP(MDP_BASE + 0x00028, data); }else{ data = inpdw(MDP_BASE + 0x0028); data &= ~0x0300; /* bit 8, 9, MASTER4 */ MDP_OUTP(MDP_BASE + 0x00028, data); } master4_invalid_status = master4_invalid_backupflg; } #endif /*CONFIG_SHLCDC_BOARD*/ /* start OVERLAY pipe */ mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); #ifdef CONFIG_SHLCDC_BOARD up(&mfd->sem); /* wait until DMA finishes the current job */ if (!wait_for_completion_timeout(&pipe->comp, msecs_to_jiffies(100))) { printk(KERN_ERR "%s: completion_timeout\n", __func__); mdp4_overlay0_done_mddi(mfd->dma); mdp_hw_cursor_done(); } #endif /*CONFIG_SHLCDC_BOARD*/ }