void mdp_dsi_video_update(struct msm_fb_data_type *mfd) { struct fb_info *fbi = mfd->fbi; uint8 *buf; int bpp; unsigned long flag; int irq_block = MDP_DMA2_TERM; if (!mfd->panel_power_on) return; down(&mfd->dma->mutex); bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; buf += fbi->var.xoffset * bpp + fbi->var.yoffset * fbi->fix.line_length; /* no need to power on cmd block since it's dsi mode */ /* starting address */ MDP_OUTP(MDP_BASE + DMA_P_BASE + 0x8, (uint32) buf); /* enable irq */ spin_lock_irqsave(&mdp_spin_lock, flag); mdp_enable_irq(irq_block); INIT_COMPLETION(mfd->dma->comp); mfd->dma->waiting = TRUE; outp32(MDP_INTR_CLEAR, LCDC_FRAME_START); mdp_intr_mask |= LCDC_FRAME_START; outp32(MDP_INTR_ENABLE, mdp_intr_mask); spin_unlock_irqrestore(&mdp_spin_lock, flag); wait_for_completion_killable(&mfd->dma->comp); mdp_disable_irq(irq_block); up(&mfd->dma->mutex); }
void mdp_dma3_update(struct msm_fb_data_type *mfd) { struct fb_info *fbi = mfd->fbi; uint8 *buf; int bpp; unsigned long flag; if (!mfd->panel_power_on) return; /* no need to power on cmd block since dma3 is running */ bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; buf += calc_fb_offset(mfd, fbi, bpp); MDP_OUTP(MDP_BASE + 0xC0008, (uint32) buf >> 3); spin_lock_irqsave(&mdp_spin_lock, flag); mdp_enable_irq(MDP_DMA3_TERM); INIT_COMPLETION(mfd->dma->comp); mfd->dma->waiting = TRUE; outp32(MDP_INTR_CLEAR, TV_OUT_DMA3_START); mdp_intr_mask |= TV_OUT_DMA3_START; outp32(MDP_INTR_ENABLE, mdp_intr_mask); spin_unlock_irqrestore(&mdp_spin_lock, flag); wait_for_completion_killable(&mfd->dma->comp); mdp_disable_irq(MDP_DMA3_TERM); }
void mdp4_mddi_dma_s_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { #ifdef CONFIG_SHLCDC_BOARD down(&mfd->sem); #endif /*CONFIG_SHLCDC_BOARD*/ mdp_enable_irq(MDP_DMA_S_TERM); mfd->dma->busy = TRUE; #ifdef CONFIG_SHLCDC_BOARD INIT_COMPLETION(pipe->dmas_comp); #endif /*CONFIG_SHLCDC_BOARD*/ mfd->ibuf_flushed = TRUE; #ifdef CONFIG_SHLCDC_BOARD pending_pipe = pipe; #endif /*CONFIG_SHLCDC_BOARD*/ #ifdef CONFIG_SHLCDC_BOARD mdp_wait_vsync(mfd); #endif /*CONFIG_SHLCDC_BOARD*/ /* start dma_s pipe */ mdp_pipe_kickoff(MDP_DMA_S_TERM, mfd); #ifdef CONFIG_SHLCDC_BOARD up(&mfd->sem); #endif /*CONFIG_SHLCDC_BOARD*/ /* wait until DMA finishes the current job */ #ifdef CONFIG_SHLCDC_BOARD wait_for_completion_killable(&pipe->dmas_comp); pending_pipe = NULL; #else wait_for_completion(&mfd->dma->comp); #endif /*CONFIG_SHLCDC_BOARD*/ mdp_disable_irq(MDP_DMA_S_TERM); }
void mdp_dma2_update(struct msm_fb_data_type *mfd) #endif { down(&mfd->dma->mutex); if ((mfd) && (!mfd->dma->busy) && (mfd->panel_power_on)) { down(&mfd->sem); mfd->ibuf_flushed = TRUE; mdp_dma2_update_lcd(mfd); mdp_enable_irq(MDP_DMA2_TERM); mfd->dma->busy = TRUE; INIT_COMPLETION(mfd->dma->comp); mdp_dma_schedule(mfd, MDP_DMA2_TERM); up(&mfd->sem); wait_for_completion_killable(&mfd->dma->comp); mdp_disable_irq(MDP_DMA2_TERM); if (mfd->pan_waiting) { mfd->pan_waiting = FALSE; complete(&mfd->pan_comp); } } up(&mfd->dma->mutex); }
void mdp_dma_video_vsync_ctrl(int enable) { unsigned long flag; if (vsync_cntrl.vsync_irq_enabled == enable) return; spin_lock_irqsave(&mdp_spin_lock, flag); if (!enable) INIT_COMPLETION(vsync_cntrl.vsync_wait); vsync_cntrl.vsync_irq_enabled = enable; spin_unlock_irqrestore(&mdp_spin_lock, flag); if (enable) { mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE); spin_lock_irqsave(&mdp_spin_lock, flag); outp32(MDP_INTR_CLEAR, LCDC_FRAME_START); mdp_intr_mask |= LCDC_FRAME_START; outp32(MDP_INTR_ENABLE, mdp_intr_mask); mdp_enable_irq(MDP_VSYNC_TERM); spin_unlock_irqrestore(&mdp_spin_lock, flag); } else { wait_for_completion(&vsync_cntrl.vsync_wait); mdp_disable_irq(MDP_VSYNC_TERM); } }
void mdp4_mddi_dma_s_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { /*< DTS2010080403325 lijianzhao 20100804 begin */ /* use dma_s pipe ,change bpp into 24 */ #ifdef CONFIG_FB_MSM_BPP_SWITCH if(24 != mfd->panel_info.bpp) { mdp4_switch_bpp_config(mfd,24); } #endif /* DTS2010080403325 lijianzhao 20100804 end >*/ mdp_enable_irq(MDP_DMA_S_TERM); mfd->dma->busy = TRUE; mfd->ibuf_flushed = TRUE; /* start dma_s pipe */ mdp_pipe_kickoff(MDP_DMA_S_TERM, mfd); /* wait until DMA finishes the current job */ /* <DTS2010100802855 hufeng 20101008 begin */ #ifdef CONFIG_HUAWEI_KERNEL /* huawei modify */ wait_for_completion_interruptible_timeout(&mfd->dma->comp, 2 * HZ); #else wait_for_completion(&mfd->dma->comp); #endif /* DTS2010100802855 hufeng 20101008 end> */ mdp_disable_irq(MDP_DMA_S_TERM); }
static void mdp4_overlay_dsi_video_wait4event(struct msm_fb_data_type *mfd, int dmap) { unsigned long flag; /* enable irq */ spin_lock_irqsave(&mdp_spin_lock, flag); if (wait4vsync_cnt == 0) { INIT_COMPLETION(dsi_pipe->comp); mfd->dma->waiting = TRUE; if (dmap) mdp_intr_mask |= INTR_DMA_P_DONE; else mdp_intr_mask |= INTR_PRIMARY_VSYNC; outp32(MDP_INTR_ENABLE, mdp_intr_mask); mdp_enable_irq(MDP_DMA2_TERM); /* enable intr */ } wait4vsync_cnt++; spin_unlock_irqrestore(&mdp_spin_lock, flag); wait_for_completion_killable(&dsi_pipe->comp); spin_lock(&mdp_spin_lock); wait4vsync_cnt--; if (wait4vsync_cnt == 0) mdp_disable_irq(MDP_DMA2_TERM); spin_unlock(&mdp_spin_lock); }
/* * mdp4_overlay_dsi_video_wait4event: * INTR_DMA_P_DONE and INTR_PRIMARY_VSYNC event only * no INTR_OVERLAY0_DONE event allowed. */ static void mdp4_overlay_dsi_video_wait4event(struct msm_fb_data_type *mfd, int intr_done) { unsigned long flag; unsigned int data; unsigned long vsync_interval; data = inpdw(MDP_BASE + DSI_VIDEO_BASE); data &= 0x01; if (data == 0) /* timing generator disabled */ return; if ((intr_done == INTR_PRIMARY_VSYNC) || (intr_done == INTR_DMA_P_DONE)) { if (first_time) { init_pc_timer(); first_time = 0; } vsync_interval = compute_vsync_interval(); program_pc_timer(vsync_interval); } spin_lock_irqsave(&mdp_spin_lock, flag); INIT_COMPLETION(dsi_video_comp); mfd->dma->waiting = TRUE; outp32(MDP_INTR_CLEAR, intr_done); mdp_intr_mask |= intr_done; outp32(MDP_INTR_ENABLE, mdp_intr_mask); mdp_enable_irq(MDP_DMA2_TERM); /* enable intr */ spin_unlock_irqrestore(&mdp_spin_lock, flag); wait_for_completion(&dsi_video_comp); mdp_disable_irq(MDP_DMA2_TERM); }
int mdp_lcdc_off(struct platform_device *pdev) { int ret = 0; /* MDP cmd block enable */ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE); MDP_OUTP(MDP_BASE + LCDC_BASE, 0); /* MDP cmd block disable */ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); mdp_disable_irq(MDP_DMA2_TERM); /* disable intr */ ret = panel_next_off(pdev); /* delay to make sure the last frame finishes */ msleep(16); #ifdef LCDC_RGB_UNSTAGE /* dis-engage rgb0 from mixer0 */ if (lcdc_pipe) mdp4_mixer_stage_down(lcdc_pipe); #endif #ifdef CONFIG_MSM_BUS_SCALING mdp_bus_scale_update_request(0); #endif return ret; }
void mdp_dma2_update(struct msm_fb_data_type *mfd) #endif { down(&mfd->dma->mutex); if ((mfd) && (!mfd->dma->busy) && (mfd->panel_power_on)) { down(&mfd->sem); mfd->ibuf_flushed = TRUE; mdp_dma2_update_lcd(mfd); mdp_enable_irq(MDP_DMA2_TERM); mfd->dma->busy = TRUE; INIT_COMPLETION(mfd->dma->comp); /* schedule DMA to start */ mdp_dma_schedule(mfd, MDP_DMA2_TERM); up(&mfd->sem); /* wait until DMA finishes the current job */ wait_for_completion_killable(&mfd->dma->comp); mdp_disable_irq(MDP_DMA2_TERM); /* signal if pan function is waiting for the update completion */ if (mfd->pan_waiting) { mfd->pan_waiting = FALSE; complete(&mfd->pan_comp); } } up(&mfd->dma->mutex); }
int mdp_histogram_ctrl(boolean en) { unsigned long flag; boolean hist_start; spin_lock_irqsave(&mdp_spin_lock, flag); hist_start = mdp_is_hist_start; spin_unlock_irqrestore(&mdp_spin_lock, flag); if (hist_start == TRUE) { if (en == TRUE) { mdp_enable_irq(MDP_HISTOGRAM_TERM); mdp_hist.frame_cnt = 1; mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE); #ifdef CONFIG_FB_MSM_MDP40 MDP_OUTP(MDP_BASE + 0x95010, 1); MDP_OUTP(MDP_BASE + 0x9501c, INTR_HIST_DONE); MDP_OUTP(MDP_BASE + 0x95004, 1); MDP_OUTP(MDP_BASE + 0x95000, 1); #else MDP_OUTP(MDP_BASE + 0x94004, 1); MDP_OUTP(MDP_BASE + 0x94000, 1); #endif mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); } else mdp_disable_irq(MDP_HISTOGRAM_TERM); } return 0; }
void mdp_lcdc_update(struct msm_fb_data_type *mfd) { struct fb_info *fbi = mfd->fbi; uint8 *buf; int bpp; unsigned long flag; uint32 dma_base; int irq_block = MDP_DMA2_TERM; #ifdef CONFIG_FB_MSM_MDP40 int intr = INTR_DMA_P_DONE; #endif if (!mfd->panel_power_on) return; /* no need to power on cmd block since it's lcdc mode */ bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; buf += fbi->var.xoffset * bpp + fbi->var.yoffset * fbi->fix.line_length; dma_base = DMA_P_BASE; #ifdef CONFIG_FB_MSM_MDP40 if (mfd->panel.type == HDMI_PANEL) { intr = INTR_DMA_E_DONE; irq_block = MDP_DMA_E_TERM; dma_base = DMA_E_BASE; } #endif if(firstupdate) /////LCD_LUYA_20100610_01 { firstupdate = FALSE; } else { /* starting address */ MDP_OUTP(MDP_BASE + dma_base + 0x8, (uint32) buf); } /* enable LCDC irq */ spin_lock_irqsave(&mdp_spin_lock, flag); mdp_enable_irq(irq_block); INIT_COMPLETION(mfd->dma->comp); mfd->dma->waiting = TRUE; #ifdef CONFIG_FB_MSM_MDP40 outp32(MDP_INTR_CLEAR, intr); mdp_intr_mask |= intr; outp32(MDP_INTR_ENABLE, mdp_intr_mask); #else outp32(MDP_INTR_CLEAR, LCDC_FRAME_START); mdp_intr_mask |= LCDC_FRAME_START; outp32(MDP_INTR_ENABLE, mdp_intr_mask); #endif spin_unlock_irqrestore(&mdp_spin_lock, flag); wait_for_completion_killable(&mfd->dma->comp); mdp_disable_irq(irq_block); }
void mdp4_mddi_overlay_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { #ifdef MDP4_NONBLOCKING boolean disp_state; down(&mfd->sem); mdp_enable_irq(MDP_OVERLAY0_TERM); mfd->dma->busy = TRUE; disp_state = mddi_ta8851_displaying_chk(); /* start OVERLAY pipe */ mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); up(&mfd->sem); if( disp_state != TRUE ) { INIT_COMPLETION(mdp4_overlay_completion); wait_for_completion_killable(&mdp4_overlay_completion); mddi_ta8851_enable_display(); } #else down(&mfd->sem); mdp_enable_irq(MDP_OVERLAY0_TERM); mfd->dma->busy = TRUE; INIT_COMPLETION(pipe->comp); pending_pipe = pipe; /* start OVERLAY pipe */ mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); up(&mfd->sem); /* wait until DMA finishes the current job */ wait_for_completion_killable(&pipe->comp); mdp_disable_irq(MDP_OVERLAY0_TERM); if( mddi_ta8851_displaying_chk() != TRUE ) mddi_ta8851_enable_display(); #endif }
void mdp4_mddi_overlay_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { #ifdef MDP4_NONBLOCKING unsigned long flag; if (pipe == mddi_pipe) { if (mdp4_overlay_pipe_staged(pipe->mixer_num) > 1) { if (time_before(jiffies, (mddi_last_kick + mddi_kick_interval/2))) { mdp4_stat.kickoff_mddi_skip++; return; } } } spin_lock_irqsave(&mdp_spin_lock, flag); if (mfd->dma->busy == TRUE) { INIT_COMPLETION(pipe->comp); pending_pipe = pipe; } spin_unlock_irqrestore(&mdp_spin_lock, flag); if (pending_pipe != NULL) { wait_for_completion_killable(&pipe->comp); pending_pipe = NULL; } down(&mfd->sem); mdp_enable_irq(MDP_OVERLAY0_TERM); mfd->dma->busy = TRUE; mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); if (pipe != mddi_pipe) { int intv; intv = jiffies - mddi_last_kick; mddi_kick_interval += intv; mddi_kick_interval /= 2; mddi_last_kick = jiffies; } up(&mfd->sem); #else down(&mfd->sem); mdp_enable_irq(MDP_OVERLAY0_TERM); mfd->dma->busy = TRUE; INIT_COMPLETION(pipe->comp); pending_pipe = pipe; mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd); up(&mfd->sem); wait_for_completion_killable(&pipe->comp); mdp_disable_irq(MDP_OVERLAY0_TERM); #endif }
void mdp_cursor_ctrl_workqueue_handler(struct work_struct *work) { unsigned long flag; spin_lock_irqsave(&mdp_spin_lock, flag); mdp_disable_irq(MDP_OVERLAY0_TERM); spin_unlock_irqrestore(&mdp_spin_lock, flag); }
/*should hold mdp_hist_mutex before calling this function*/ int _mdp_histogram_ctrl(boolean en) { unsigned long hist_base; uint32_t status; if (mdp_rev >= MDP_REV_40) hist_base = 0x95000; else hist_base = 0x94000; if (en == TRUE) { if (mdp_is_hist_data) return -EINVAL; mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE); mdp_hist_frame_cnt = 1; mdp_enable_irq(MDP_HISTOGRAM_TERM); INIT_COMPLETION(mdp_hist_comp); /*Clear the interrupts before enabling them*/ MDP_OUTP(MDP_BASE + hist_base + 0x18, INTR_HIST_DONE | INTR_HIST_RESET_SEQ_DONE); MDP_OUTP(MDP_BASE + hist_base + 0x10, 1); MDP_OUTP(MDP_BASE + hist_base + 0x1c, INTR_HIST_DONE | INTR_HIST_RESET_SEQ_DONE); mdp_is_hist_data = TRUE; mdp_is_hist_valid = TRUE; mdp_is_hist_init = FALSE; __mdp_histogram_reset(); mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); } else { if (!mdp_is_hist_data) return -EINVAL; mdp_is_hist_data = FALSE; mdp_is_hist_valid = FALSE; mdp_is_hist_init = FALSE; mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE); status = inpdw(MDP_BASE + hist_base + 0x1C); status &= ~(INTR_HIST_DONE | INTR_HIST_RESET_SEQ_DONE); MDP_OUTP(MDP_BASE + hist_base + 0x1C, status); MDP_OUTP(MDP_BASE + hist_base + 0x18, INTR_HIST_DONE | INTR_HIST_RESET_SEQ_DONE); mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); complete(&mdp_hist_comp); mdp_disable_irq(MDP_HISTOGRAM_TERM); } return 0; }
void mdp4_atv_overlay(struct msm_fb_data_type *mfd) { struct fb_info *fbi = mfd->fbi; uint8 *buf; unsigned int buf_offset; int bpp; unsigned long flag; struct mdp4_overlay_pipe *pipe; if (!mfd->panel_power_on) return; /* */ bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; buf_offset = calc_fb_offset(mfd, fbi, bpp); mutex_lock(&mfd->dma->ov_mutex); pipe = atv_pipe; if (mfd->map_buffer) { pipe->srcp0_addr = (unsigned int)mfd->map_buffer->iova[0] + \ buf_offset; pr_debug("start 0x%lx srcp0_addr 0x%x\n", mfd-> map_buffer->iova[0], pipe->srcp0_addr); } else { pipe->srcp0_addr = (uint32)(buf + buf_offset); } mdp_update_pm(mfd, vsync_ctrl_db[0].vsync_time); mdp4_overlay_mdp_perf_req(pipe, mfd); mdp4_overlay_mdp_perf_upd(mfd, 1); mdp4_overlay_rgb_setup(pipe); mdp4_overlay_reg_flush(pipe, 0); mdp4_mixer_stage_up(pipe, 0); mdp4_mixer_stage_commit(pipe->mixer_num); printk(KERN_INFO "mdp4_atv_overlay: pipe=%x ndx=%d\n", (int)pipe, pipe->pipe_ndx); /* */ spin_lock_irqsave(&mdp_spin_lock, flag); mdp_enable_irq(MDP_OVERLAY1_TERM); INIT_COMPLETION(atv_pipe->comp); mfd->dma->waiting = TRUE; outp32(MDP_INTR_CLEAR, INTR_OVERLAY1_DONE); mdp_intr_mask |= INTR_OVERLAY1_DONE; outp32(MDP_INTR_ENABLE, mdp_intr_mask); spin_unlock_irqrestore(&mdp_spin_lock, flag); wait_for_completion_killable(&atv_pipe->comp); mdp_disable_irq(MDP_OVERLAY1_TERM); mdp4_overlay_mdp_perf_upd(mfd, 0); mdp4_stat.kickoff_atv++; mutex_unlock(&mfd->dma->ov_mutex); }
void mdp4_atv_overlay(struct msm_fb_data_type *mfd) { struct fb_info *fbi = mfd->fbi; uint8 *buf; unsigned int buf_offset; int bpp; unsigned long flag; struct mdp4_overlay_pipe *pipe; if (!mfd->panel_power_on) return; /* no need to power on cmd block since it's lcdc mode */ bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; buf_offset = calc_fb_offset(mfd, fbi, bpp); mutex_lock(&mfd->dma->ov_mutex); pipe = atv_pipe; if (mfd->map_buffer) { pipe->srcp0_addr = (unsigned int)mfd->map_buffer->iova[0] + \ buf_offset; pr_debug("start 0x%lx srcp0_addr 0x%x\n", mfd-> map_buffer->iova[0], pipe->srcp0_addr); } else { pipe->srcp0_addr = (uint32)(buf + buf_offset); } mdp4_overlay_rgb_setup(pipe); mdp4_mixer_stage_up(pipe); #ifdef QCT_PATCH mdp4_overlay_reg_flush(pipe, 0); #endif printk(KERN_INFO "mdp4_atv_overlay: pipe=%x ndx=%d\n", (int)pipe, pipe->pipe_ndx); /* enable irq */ spin_lock_irqsave(&mdp_spin_lock, flag); mdp_enable_irq(MDP_OVERLAY1_TERM); INIT_COMPLETION(atv_pipe->comp); mfd->dma->waiting = TRUE; outp32(MDP_INTR_CLEAR, INTR_OVERLAY1_DONE); mdp_intr_mask |= INTR_OVERLAY1_DONE; outp32(MDP_INTR_ENABLE, mdp_intr_mask); spin_unlock_irqrestore(&mdp_spin_lock, flag); wait_for_completion_killable(&atv_pipe->comp); mdp_disable_irq(MDP_OVERLAY1_TERM); /* change mdp clk while mdp is idle` */ mdp4_set_perf_level(); mdp4_stat.kickoff_atv++; mutex_unlock(&mfd->dma->ov_mutex); }
void mdp_lcdc_update(struct msm_fb_data_type *mfd) { struct fb_info *fbi = mfd->fbi; uint8 *buf; int bpp; unsigned long flag; uint32 dma_base; int irq_block = MDP_DMA2_TERM; #ifdef CONFIG_FB_MSM_MDP40 int intr = INTR_DMA_P_DONE; #endif if (!mfd->panel_power_on) return; down(&mfd->dma->mutex); bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; buf += calc_fb_offset(mfd, fbi, bpp); dma_base = DMA_P_BASE; #ifdef CONFIG_FB_MSM_MDP40 if (mfd->panel.type == HDMI_PANEL) { intr = INTR_DMA_E_DONE; irq_block = MDP_DMA_E_TERM; dma_base = DMA_E_BASE; } #endif MDP_OUTP(MDP_BASE + dma_base + 0x8, (uint32) buf); spin_lock_irqsave(&mdp_spin_lock, flag); mdp_enable_irq(irq_block); INIT_COMPLETION(mfd->dma->comp); mfd->dma->waiting = TRUE; #ifdef CONFIG_FB_MSM_MDP40 outp32(MDP_INTR_CLEAR, intr); mdp_intr_mask |= intr; outp32(MDP_INTR_ENABLE, mdp_intr_mask); #else outp32(MDP_INTR_CLEAR, LCDC_FRAME_START); mdp_intr_mask |= LCDC_FRAME_START; outp32(MDP_INTR_ENABLE, mdp_intr_mask); #endif spin_unlock_irqrestore(&mdp_spin_lock, flag); wait_for_completion_killable(&mfd->dma->comp); mdp_disable_irq(irq_block); up(&mfd->dma->mutex); }
static int mdp_do_histogram(struct fb_info *info, struct mdp_histogram *hist) { int ret = 0; if (!hist->frame_cnt || (hist->bin_cnt == 0) || (hist->bin_cnt > MDP_HIST_MAX_BIN)) return -EINVAL; INIT_COMPLETION(mdp_hist_comp); mdp_hist.bin_cnt = hist->bin_cnt; mdp_hist.r = (hist->r) ? mdp_hist_r : 0; mdp_hist.g = (hist->g) ? mdp_hist_g : 0; mdp_hist.b = (hist->b) ? mdp_hist_b : 0; mdp_enable_irq(MDP_HISTOGRAM_TERM); mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE); #ifdef CONFIG_FB_MSM_MDP40 MDP_OUTP(MDP_BASE + 0x95004, hist->frame_cnt); MDP_OUTP(MDP_BASE + 0x95000, 1); #else MDP_OUTP(MDP_BASE + 0x94004, hist->frame_cnt); MDP_OUTP(MDP_BASE + 0x94000, 1); #endif mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); wait_for_completion_killable(&mdp_hist_comp); /* disable the irq for histogram since we handled it when the control reaches here */ mdp_disable_irq(MDP_HISTOGRAM_TERM); if (hist->r) { ret = copy_to_user(hist->r, mdp_hist.r, hist->bin_cnt*4); if (ret) goto hist_err; } if (hist->g) { ret = copy_to_user(hist->g, mdp_hist.g, hist->bin_cnt*4); if (ret) goto hist_err; } if (hist->b) { ret = copy_to_user(hist->b, mdp_hist.b, hist->bin_cnt*4); if (ret) goto hist_err; } return 0; hist_err: printk(KERN_ERR "%s: invalid hist buffer\n", __func__); return ret; }
static void mdp4_overlay_dtv_wait4_ov_done(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { u32 data = inpdw(MDP_BASE + DTV_BASE); mfd->ov_start = false; if (!(data & 0x1) || (pipe == NULL)) return; wait_for_completion_killable(&dtv_pipe->comp); mdp_disable_irq(MDP_OVERLAY1_TERM); }
void mdp4_mddi_dma_s_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { mdp_enable_irq(MDP_DMA_S_TERM); mfd->dma->busy = TRUE; mfd->ibuf_flushed = TRUE; /* start dma_s pipe */ mdp_pipe_kickoff(MDP_DMA_S_TERM, mfd); /* wait until DMA finishes the current job */ wait_for_completion(&mfd->dma->comp); mdp_disable_irq(MDP_DMA_S_TERM); }
void mdp4_overlay_lcdc_wait4vsync(struct msm_fb_data_type *mfd) { unsigned long flag; /* enable irq */ spin_lock_irqsave(&mdp_spin_lock, flag); mdp_enable_irq(MDP_DMA2_TERM); /* enable intr */ INIT_COMPLETION(lcdc_comp); if (mfd->dma->waiting == FALSE) mfd->dma->waiting = TRUE; outp32(MDP_INTR_CLEAR, INTR_PRIMARY_VSYNC); mdp_intr_mask |= INTR_PRIMARY_VSYNC; outp32(MDP_INTR_ENABLE, mdp_intr_mask); spin_unlock_irqrestore(&mdp_spin_lock, flag); if( !mfd->panel_power_on ) { mfd->dma->waiting = FALSE; mdp_disable_irq(MDP_DMA2_TERM); return; } wait_for_completion_killable(&lcdc_comp); mdp_disable_irq(MDP_DMA2_TERM); }
/*should hold mdp_hist_mutex before calling this function*/ int _mdp_histogram_ctrl(boolean en) { unsigned long flag; unsigned long hist_base; uint32_t status; if (mdp_rev >= MDP_REV_40) hist_base = 0x95000; else hist_base = 0x94000; if (en == TRUE) { if (mdp_is_hist_start) return -EINVAL; mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE); mdp_hist_frame_cnt = 1; mdp_enable_irq(MDP_HISTOGRAM_TERM); spin_lock_irqsave(&mdp_spin_lock, flag); if (mdp_is_hist_start == FALSE && mdp_rev >= MDP_REV_40) { MDP_OUTP(MDP_BASE + hist_base + 0x10, 1); MDP_OUTP(MDP_BASE + hist_base + 0x1c, INTR_HIST_DONE); } spin_unlock_irqrestore(&mdp_spin_lock, flag); MDP_OUTP(MDP_BASE + hist_base + 0x4, mdp_hist_frame_cnt); MDP_OUTP(MDP_BASE + hist_base, 1); mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); mdp_is_hist_data = TRUE; } else { if (!mdp_is_hist_start && !mdp_is_hist_data) return -EINVAL; mdp_is_hist_data = FALSE; complete(&mdp_hist_comp); if (mdp_rev >= MDP_REV_40) { mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE); status = inpdw(MDP_BASE + hist_base + 0x1C); status &= ~INTR_HIST_DONE; MDP_OUTP(MDP_BASE + hist_base + 0x1C, status); MDP_OUTP(MDP_BASE + hist_base + 0x18, INTR_HIST_DONE); mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); } mdp_disable_irq(MDP_HISTOGRAM_TERM); } return 0; }
static void mdp4_overlay_dtv_wait4_ov_done(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { u32 data = inpdw(MDP_BASE + DTV_BASE); if (mfd->ov_start) mfd->ov_start = false; else return; if (!(data & 0x1) || (pipe == NULL)) return; wait_for_completion_timeout(&dtv_pipe->comp, msecs_to_jiffies(VSYNC_PERIOD*2)); mdp_disable_irq(MDP_OVERLAY1_TERM); }
static void mdp4_overlay_dtv_wait4_ov_done(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { unsigned long flag; u32 data = inpdw(MDP_BASE + DTV_BASE); mfd->ov_start = false; if (!(data & 0x1) || (pipe == NULL)) return; wait_for_completion_killable_timeout(&dtv_pipe->comp, HZ/10); spin_lock_irqsave(&mdp_done_lock, flag); mdp_disable_irq(MDP_OVERLAY1_TERM); spin_unlock_irqrestore(&mdp_done_lock, flag); }
/* * mdp4_overlay_dsi_video_wait4event: * INTR_DMA_P_DONE and INTR_PRIMARY_VSYNC event only * no INTR_OVERLAY0_DONE event allowed. */ static void mdp4_overlay_dsi_video_wait4event(struct msm_fb_data_type *mfd, int intr_done) { unsigned long flag; spin_lock_irqsave(&mdp_spin_lock, flag); INIT_COMPLETION(dsi_video_comp); mfd->dma->waiting = TRUE; outp32(MDP_INTR_CLEAR, intr_done); mdp_intr_mask |= intr_done; outp32(MDP_INTR_ENABLE, mdp_intr_mask); mdp_enable_irq(MDP_DMA2_TERM); /* enable intr */ spin_unlock_irqrestore(&mdp_spin_lock, flag); wait_for_completion(&dsi_video_comp); mdp_disable_irq(MDP_DMA2_TERM); }
static void mdp4_overlay_dtv_wait4_ov_done(struct msm_fb_data_type *mfd) { unsigned long flag; /* enable irq */ spin_lock_irqsave(&mdp_spin_lock, flag); mdp_enable_irq(MDP_OVERLAY1_TERM); INIT_COMPLETION(dtv_pipe->comp); mfd->dma->waiting = TRUE; outp32(MDP_INTR_CLEAR, INTR_OVERLAY1_DONE); mdp_intr_mask |= INTR_OVERLAY1_DONE; outp32(MDP_INTR_ENABLE, mdp_intr_mask); spin_unlock_irqrestore(&mdp_spin_lock, flag); wait_for_completion_killable(&dtv_pipe->comp); mdp_disable_irq(MDP_OVERLAY1_TERM); }
void mdp4_mddi_dma_s_kickoff(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { unsigned long flag; /* use dma_s pipe ,change bpp into 24 */ #ifdef CONFIG_FB_MSM_BPP_SWITCH if(24 != mfd->panel_info.bpp) { mdp4_switch_bpp_config(mfd,24); } #endif spin_lock_irqsave(&mdp_spin_lock, flag); if (mfd->dma->busy == TRUE) { INIT_COMPLETION(pipe->comp); pending_pipe = pipe; } spin_unlock_irqrestore(&mdp_spin_lock, flag); if (pending_pipe != NULL) { /* wait until DMA finishes the current job */ #ifdef CONFIG_HUAWEI_KERNEL wait_for_completion_interruptible_timeout(&pipe->comp, 1 * HZ); #else wait_for_completion_killable(&pipe->comp); #endif pending_pipe = NULL; } down(&mfd->sem); mdp_enable_irq(MDP_DMA_S_TERM); mfd->dma->busy = TRUE; INIT_COMPLETION(pipe->dmas_comp); mfd->ibuf_flushed = TRUE; pending_pipe = pipe; /* start dma_s pipe */ mdp_pipe_kickoff(MDP_DMA_S_TERM, mfd); up(&mfd->sem); /* wait until DMA finishes the current job */ #ifdef CONFIG_HUAWEI_KERNEL /* huawei modify */ wait_for_completion_interruptible_timeout(&pipe->dmas_comp, 2 * HZ); #else wait_for_completion_killable(&pipe->dmas_comp); #endif pending_pipe = NULL; mdp_disable_irq(MDP_DMA_S_TERM); }
void mdp4_overlay_dsi_video_wait4vsync(struct msm_fb_data_type *mfd) { unsigned long flag; /* enable irq */ spin_lock_irqsave(&mdp_spin_lock, flag); mdp_enable_irq(MDP_DMA2_TERM); /* enable intr */ INIT_COMPLETION(dsi_pipe->comp); if (mfd) mfd->dma->waiting = TRUE; outp32(MDP_INTR_CLEAR, INTR_DMA_P_DONE | INTR_PRIMARY_VSYNC); mdp_intr_mask |= INTR_PRIMARY_VSYNC; outp32(MDP_INTR_ENABLE, mdp_intr_mask); spin_unlock_irqrestore(&mdp_spin_lock, flag); wait_for_completion_killable(&dsi_pipe->comp); mdp_disable_irq(MDP_DMA2_TERM); }