void mdp4_lcdc_wait4vsync(int cndx) { struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; unsigned long flags; if (cndx >= MAX_CONTROLLER) { pr_err("%s: out or range: cndx=%d\n", __func__, cndx); return; } vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (atomic_read(&vctrl->suspend) > 0) return; mdp4_lcdc_vsync_irq_ctrl(cndx, 1); spin_lock_irqsave(&vctrl->spin_lock, flags); if (vctrl->wait_vsync_cnt == 0) INIT_COMPLETION(vctrl->vsync_comp); vctrl->wait_vsync_cnt++; spin_unlock_irqrestore(&vctrl->spin_lock, flags); wait_for_completion(&vctrl->vsync_comp); mdp4_lcdc_vsync_irq_ctrl(cndx, 0); mdp4_stat.wait4vsync0++; }
void mdp4_lcdc_wait4vsync(int cndx, long long *vtime) #endif { struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; unsigned long flags; if (cndx >= MAX_CONTROLLER) { pr_err("%s: out or range: cndx=%d\n", __func__, cndx); return; } vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (atomic_read(&vctrl->suspend) > 0) { #if !defined (CONFIG_EUR_MODEL_GT_I9210) *vtime = -1; #endif return; } #if defined (CONFIG_EUR_MODEL_GT_I9210) mdp4_lcdc_vsync_irq_ctrl(cndx, 1); #else /* start timing generator & mmu if they are not started yet */ mdp4_overlay_lcdc_start(); #endif spin_lock_irqsave(&vctrl->spin_lock, flags); #if defined (CONFIG_EUR_MODEL_GT_I9210) //if (vctrl->wait_vsync_cnt == 0) #else if (vctrl->wait_vsync_cnt == 0) #endif INIT_COMPLETION(vctrl->vsync_comp); vctrl->wait_vsync_cnt++; spin_unlock_irqrestore(&vctrl->spin_lock, flags); pr_err("[QC_DEBUG] %s: Waiting... vctrl->wait_vsync_cnt:%d \n", __func__, vctrl->wait_vsync_cnt); wait_for_completion(&vctrl->vsync_comp); #if defined (CONFIG_EUR_MODEL_GT_I9210) mdp4_lcdc_vsync_irq_ctrl(cndx, 0); #else *vtime = vctrl->vsync_time.tv64; #endif mdp4_stat.wait4vsync0++; }
void mdp4_lcdc_vsync_ctrl(struct fb_info *info, int enable) { struct vsycn_ctrl *vctrl; int cndx = 0; vctrl = &vsync_ctrl_db[cndx]; if (vctrl->vsync_irq_enabled == enable) return; pr_debug("%s: vsync enable=%d\n", __func__, enable); vctrl->vsync_irq_enabled = enable; #if defined (CONFIG_EUR_MODEL_GT_I9210) mdp4_lcdc_vsync_irq_ctrl(cndx, enable); #else if (enable) vsync_irq_enable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM); else vsync_irq_disable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM); #endif if (vctrl->vsync_irq_enabled && atomic_read(&vctrl->suspend) == 0) atomic_set(&vctrl->vsync_resume, 1); }
void mdp4_lcdc_wait4vsync(int cndx) { struct vsycn_ctrl *vctrl; struct mdp4_overlay_pipe *pipe; if (cndx >= MAX_CONTROLLER) { pr_err("%s: out or range: cndx=%d\n", __func__, cndx); return; } vctrl = &vsync_ctrl_db[cndx]; pipe = vctrl->base_pipe; if (atomic_read(&vctrl->suspend) > 0) return; mdp4_lcdc_vsync_irq_ctrl(cndx, 1); wait_event_interruptible_timeout(vctrl->wait_queue, 1, msecs_to_jiffies(VSYNC_PERIOD * 8)); mdp4_lcdc_vsync_irq_ctrl(cndx, 0); mdp4_stat.wait4vsync0++; }
void mdp4_lcdc_vsync_ctrl(struct fb_info *info, int enable) { struct vsycn_ctrl *vctrl; int cndx = 0; vctrl = &vsync_ctrl_db[cndx]; if (vctrl->vsync_irq_enabled == enable) return; pr_debug("%s: vsync enable=%d\n", __func__, enable); vctrl->vsync_irq_enabled = enable; mdp4_lcdc_vsync_irq_ctrl(cndx, enable); }
void mdp4_lcdc_vsync_ctrl(struct fb_info *info, int enable) { struct vsycn_ctrl *vctrl; int cndx = 0; vctrl = &vsync_ctrl_db[cndx]; if (vctrl->vsync_irq_enabled == enable) return; pr_debug("%s: vsync enable=%d\n", __func__, enable); vctrl->vsync_irq_enabled = enable; mdp4_lcdc_vsync_irq_ctrl(cndx, enable); if (vctrl->vsync_irq_enabled && atomic_read(&vctrl->suspend) == 0) atomic_set(&vctrl->vsync_resume, 1); }