Пример #1
0
void mdp_dma2_update(struct msm_fb_data_type *mfd)
#endif
{
	unsigned long flag;

	down(&mfd->dma->mutex);
	if ((mfd) && (!mfd->dma->busy) && (mfd->panel_power_on)) {
		down(&mfd->sem);
		mfd->ibuf_flushed = TRUE;
		mdp_dma2_update_lcd(mfd);

		spin_lock_irqsave(&mdp_spin_lock, flag);
		mdp_enable_irq(MDP_DMA2_TERM);
		mfd->dma->busy = TRUE;
		INIT_COMPLETION(mfd->dma->comp);

		spin_unlock_irqrestore(&mdp_spin_lock, flag);
		/* schedule DMA to start */
		mdp_dma_schedule(mfd, MDP_DMA2_TERM);
		up(&mfd->sem);

		/* wait until DMA finishes the current job */
		wait_for_completion_killable(&mfd->dma->comp);
		mdp_disable_irq(MDP_DMA2_TERM);

	/* signal if pan function is waiting for the update completion */
		if (mfd->pan_waiting) {
			mfd->pan_waiting = FALSE;
			complete(&mfd->pan_comp);
		}
	}
	up(&mfd->dma->mutex);
}
Пример #2
0
void mdp_dma2_update(struct msm_fb_data_type *mfd)
#endif
{
	down(&mfd->dma->mutex);
	if ((mfd) && (!mfd->dma->busy) && (mfd->panel_power_on)) {
		mfd->dma->busy = TRUE;
		INIT_COMPLETION(mfd->dma->comp);

		down(&mfd->sem);
		mfd->ibuf_flushed = TRUE;
		mdp_dma2_update_lcd(mfd);
		// schedule DMA to start
		mdp_dma_schedule(mfd, MDP_DMA2_TERM);
		up(&mfd->sem);

		// wait until DMA finishes the current job
		wait_for_completion_interruptible(&mfd->dma->comp);

		// signal if pan function is waiting for the update completion
		if (mfd->pan_waiting) {
			mfd->pan_waiting = FALSE;
			complete(&mfd->pan_comp);
		}
	}
	up(&mfd->dma->mutex);
}
Пример #3
0
void mdp_dma2_update(struct msm_fb_data_type *mfd)
#endif
{
	down(&mfd->dma->mutex);
	if ((mfd) && (!mfd->dma->busy) && (mfd->panel_power_on)) {
		down(&mfd->sem);
		mfd->ibuf_flushed = TRUE;
		mdp_dma2_update_lcd(mfd);

		mdp_enable_irq(MDP_DMA2_TERM);
		mfd->dma->busy = TRUE;
		INIT_COMPLETION(mfd->dma->comp);

		
		mdp_dma_schedule(mfd, MDP_DMA2_TERM);
		up(&mfd->sem);

		
		wait_for_completion_killable(&mfd->dma->comp);
		mdp_disable_irq(MDP_DMA2_TERM);

	
		if (mfd->pan_waiting) {
			mfd->pan_waiting = FALSE;
			complete(&mfd->pan_comp);
		}
	}
	up(&mfd->dma->mutex);
}
void mdp_dma2_update(struct msm_fb_data_type *mfd)
#endif
{
	int ret = 0;
	unsigned long flag;

	if (!mfd) {
		printk(KERN_ERR "%s: mfd is NULL\n", __func__);
		return;
	}
	down(&mfd->dma->mutex);
	if ((mfd) && (!mfd->dma->busy) && (mfd->panel_power_on)) {
		down(&mfd->sem);
		mfd->ibuf_flushed = TRUE;
		mdp_dma2_update_lcd(mfd);

		spin_lock_irqsave(&mdp_spin_lock, flag);
		mdp_enable_irq(MDP_DMA2_TERM);
		mfd->dma->busy = TRUE;
		INIT_COMPLETION(mfd->dma->comp);

		spin_unlock_irqrestore(&mdp_spin_lock, flag);
		/* schedule DMA to start */
		mdp_dma_schedule(mfd, MDP_DMA2_TERM);
		up(&mfd->sem);

		/* wait until DMA finishes the current job */
		ret = wait_for_completion_killable_timeout(&mfd->dma->comp, msecs_to_jiffies(500));
		if (ret <= 0) {
			mfd->dma->busy = FALSE;
			mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
			complete(&mfd->dma->comp);
		}

		mdp_disable_irq(MDP_DMA2_TERM);

	/* signal if pan function is waiting for the update completion */
		if (mfd->pan_waiting) {
			mfd->pan_waiting = FALSE;
			complete(&mfd->pan_comp);
		}
	}
	up(&mfd->dma->mutex);
}
Пример #5
0
void mdp_dma2_update(struct msm_fb_data_type *mfd)
#endif
{
// resolve lcd freeze issue
    long ret;
    
	down(&mfd->dma->mutex);
	if ((mfd) && (!mfd->dma->busy) && (mfd->panel_power_on)) {

		down(&mfd->sem);
		mfd->ibuf_flushed = TRUE;
		mdp_dma2_update_lcd(mfd);
        
//		mdp_enable_irq(MDP_DMA2_TERM);
		mfd->dma->busy = TRUE;
		INIT_COMPLETION(mfd->dma->comp);

		/* schedule DMA to start */
		mdp_dma_schedule(mfd, MDP_DMA2_TERM);
		up(&mfd->sem);

		/* wait until DMA finishes the current job */
//		wait_for_completion_killable(&mfd->dma->comp);
//		mdp_disable_irq(MDP_DMA2_TERM);
        ret = wait_for_completion_interruptible_timeout(&mfd->dma->comp, HZ/5);
        if (ret <= 0){
            mfd->dma->busy = FALSE;
            mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
            printk("** In %s wait timeout %ld ####!!!! **\n", __func__, ret);
        }

	/* signal if pan function is waiting for the update completion */
		if (mfd->pan_waiting) {
			mfd->pan_waiting = FALSE;
			complete(&mfd->pan_comp);
		}
	}
	up(&mfd->dma->mutex);
}
Пример #6
0
void mdp_dma2_update(struct msm_fb_data_type *mfd)
#endif
{
    unsigned long flag;

    down(&mfd->dma->mutex);
    if ((mfd) && (!mfd->dma->busy) && (mfd->panel_power_on)) {
        down(&mfd->sem);
        mfd->ibuf_flushed = TRUE;
        mdp_dma2_update_lcd(mfd);

        spin_lock_irqsave(&mdp_spin_lock, flag);
        mdp_enable_irq(MDP_DMA2_TERM);
        mfd->dma->busy = TRUE;
        INIT_COMPLETION(mfd->dma->comp);

        spin_unlock_irqrestore(&mdp_spin_lock, flag);
        /* schedule DMA to start */
        mdp_dma_schedule(mfd, MDP_DMA2_TERM);
        up(&mfd->sem);

        /* wait until DMA finishes the current job */
        /* LGE_CHANGE
         * Add this code for screen update when dma completion is failed.
         * 2012-03-06, [email protected]
         */
        if(wait_for_completion_killable(&mfd->dma->comp)< 0)
            goto out;
        mdp_disable_irq(MDP_DMA2_TERM);

        /* signal if pan function is waiting for the update completion */
        if (mfd->pan_waiting) {
            mfd->pan_waiting = FALSE;
            complete(&mfd->pan_comp);
        }
    }
out:
    up(&mfd->dma->mutex);
}
void mdp_dma2_update(struct msm_fb_data_type *mfd)
#endif
{
	unsigned long flag;
	static int first_vsync;
	int need_wait = 0;

	down(&mfd->dma->mutex);
	if ((mfd) && (mfd->panel_power_on)) {
		down(&mfd->sem);
		spin_lock_irqsave(&mdp_spin_lock, flag);
		if (mfd->dma->busy == TRUE)
			need_wait++;
		spin_unlock_irqrestore(&mdp_spin_lock, flag);

		if (need_wait)
			wait_for_completion_killable(&mfd->dma->comp);
#if defined (CONFIG_MACH_KYLEPLUS_CTC)
		/* wait until Vsync finishes the current job */
		if (first_vsync) {
			if (!wait_for_completion_killable_timeout
				(&vsync_cntrl.vsync_comp, HZ/10))
				pr_err("Timedout DMA %s %d", __func__,
									__LINE__);
		} else {
			first_vsync = 1;
		}
#endif
		/* schedule DMA to start */
		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
		mfd->ibuf_flushed = TRUE;
		mdp_dma2_update_lcd(mfd);

		spin_lock_irqsave(&mdp_spin_lock, flag);
		mdp_enable_irq(MDP_DMA2_TERM);
		mfd->dma->busy = TRUE;
		INIT_COMPLETION(mfd->dma->comp);
		INIT_COMPLETION(vsync_cntrl.vsync_comp);
		if (!vsync_cntrl.vsync_irq_enabled &&
				vsync_cntrl.disabled_clocks) {
			MDP_OUTP(MDP_BASE + 0x021c, 0x10); /* read pointer */
			outp32(MDP_INTR_CLEAR, MDP_PRIM_RDPTR);
			mdp_intr_mask |= MDP_PRIM_RDPTR;
			outp32(MDP_INTR_ENABLE, mdp_intr_mask);
			mdp_enable_irq(MDP_VSYNC_TERM);
			vsync_cntrl.vsync_dma_enabled = 1;
		}
		spin_unlock_irqrestore(&mdp_spin_lock, flag);
		/* schedule DMA to start */
		mdp_dma_schedule(mfd, MDP_DMA2_TERM);
		up(&mfd->sem);
		
#ifndef CONFIG_MACH_KYLEPLUS_CTC
		/* wait until Vsync finishes the current job */
		if (first_vsync) {
			if (!wait_for_completion_killable_timeout
					(&vsync_cntrl.vsync_comp, HZ/10))
				pr_err("Timedout DMA %s %d", __func__,
								__LINE__);
		} else {
			first_vsync = 1;
		}
#endif
		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);

	/* signal if pan function is waiting for the update completion */
		if (mfd->pan_waiting) {
			mfd->pan_waiting = FALSE;
			complete(&mfd->pan_comp);
		}
	}
	up(&mfd->dma->mutex);
}