irqreturn_t mxr_irq_handler(int irq, void *dev_data)
{
	struct mxr_device *mdev = dev_data;
	u32 i, val;

	spin_lock(&mdev->reg_slock);
	val = mxr_read(mdev, MXR_INT_STATUS);

	
	if (val & MXR_INT_STATUS_VSYNC) {
		set_bit(MXR_EVENT_VSYNC, &mdev->event_flags);
		wake_up(&mdev->event_queue);
	}

	
	if (~val & MXR_INT_EN_VSYNC) {
		
		val &= ~MXR_INT_EN_VSYNC;
		val |= MXR_INT_CLEAR_VSYNC;
	}
	mxr_write(mdev, MXR_INT_STATUS, val);

	spin_unlock(&mdev->reg_slock);
	
	if (~val & MXR_INT_CLEAR_VSYNC)
		return IRQ_HANDLED;
	for (i = 0; i < MXR_MAX_LAYERS; ++i)
		mxr_irq_layer_handle(mdev->layer[i]);
	return IRQ_HANDLED;
}
irqreturn_t mxr_irq_handler(int irq, void *dev_data)
{
	struct mxr_device *mdev = dev_data;
	u32 val;

	spin_lock(&mdev->reg_slock);
	val = mxr_read(mdev, MXR_INT_STATUS);

	/* wake up process waiting for VSYNC */
	if (val & MXR_INT_STATUS_VSYNC) {
		mdev->vsync_timestamp = ktime_get();
		wake_up_interruptible_all(&mdev->vsync_wait);
	}

	/* clear interrupts.
	   vsync is updated after write MXR_CFG_LAYER_UPDATE bit */
	if (val & MXR_INT_CLEAR_VSYNC)
		mxr_write_mask(mdev, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);

	val = mxr_irq_underrun_handle(mdev, val);
	mxr_write(mdev, MXR_INT_STATUS, val);

	spin_unlock(&mdev->reg_slock);
	return IRQ_HANDLED;
}
Esempio n. 3
0
irqreturn_t mxr_irq_handler(int irq, void *dev_data)
{
	struct mxr_device *mdev = dev_data;
	u32 i, val;

	spin_lock(&mdev->reg_slock);
	val = mxr_read(mdev, MXR_INT_STATUS);

	/* wake up process waiting for VSYNC */
	if (val & MXR_INT_STATUS_VSYNC) {
		set_bit(MXR_EVENT_VSYNC, &mdev->event_flags);
		/* toggle TOP field event if working in interlaced mode */
		if (~mxr_read(mdev, MXR_CFG) & MXR_CFG_SCAN_PROGRASSIVE)
			change_bit(MXR_EVENT_TOP, &mdev->event_flags);
		wake_up(&mdev->event_queue);
		/* vsync interrupt use different bit for read and clear */
		val &= ~MXR_INT_STATUS_VSYNC;
		val |= MXR_INT_CLEAR_VSYNC;
	}

	/* clear interrupts */
	mxr_write(mdev, MXR_INT_STATUS, val);

	spin_unlock(&mdev->reg_slock);
	/* leave on non-vsync event */
	if (~val & MXR_INT_CLEAR_VSYNC)
		return IRQ_HANDLED;
	/* skip layer update on bottom field */
	if (!test_bit(MXR_EVENT_TOP, &mdev->event_flags))
		return IRQ_HANDLED;
	for (i = 0; i < MXR_MAX_LAYERS; ++i)
		mxr_irq_layer_handle(mdev->layer[i]);
	return IRQ_HANDLED;
}
Esempio n. 4
0
static inline void mxr_write_mask(struct mxr_device *mdev, u32 reg_id,
	u32 val, u32 mask)
{
	u32 old = mxr_read(mdev, reg_id);

	val = (val & mask) | (old & ~mask);
	writel(val, mdev->res.mxr_regs + reg_id);
}
Esempio n. 5
0
void mxr_reg_vp_priority(struct mxr_device *mdev, int idx,u32 en)
{
  	u32 val;
        unsigned long flags;
	
	spin_lock_irqsave(&mdev->reg_slock, flags);
        val=mxr_read(mdev,MXR_LAYER_CFG);
 	BF_SET(val,en,0,4);
  	mxr_write(mdev, MXR_LAYER_CFG, val);
  	spin_unlock_irqrestore(&mdev->reg_slock, flags);
}
Esempio n. 6
0
void mxr_reg_vp_layer_blend_alpha(struct mxr_device *mdev , int idx, u32 en)
{
        unsigned long flags;
        int val;  
	
	spin_lock_irqsave(&mdev->reg_slock, flags); 
        val = mxr_read(mdev,MXR_VIDEO_CFG);
        BF_SET(val,en,0,8);
	mxr_write(mdev,MXR_VIDEO_CFG,val);
        spin_unlock_irqrestore(&mdev->reg_slock, flags);
}
Esempio n. 7
0
void mxr_reg_graph_priority(struct mxr_device *mdev, int idx,u32 en)
{
  	u32 val;
        unsigned long flags;
        
	spin_lock_irqsave(&mdev->reg_slock, flags);
        val = mxr_read(mdev,MXR_LAYER_CFG);
	if(idx==0)
		BF_SET(val,en,4,4); /*for grapics later 0*/ 
        else    
        	BF_SET(val,en,8,4); /*for grapics later 1*/

	mxr_write(mdev, MXR_LAYER_CFG, val);
	spin_unlock_irqrestore(&mdev->reg_slock, flags);
}
Esempio n. 8
0
void mxr_reg_graph_chromakey_enable(struct mxr_device *mdev, int idx,u32 en)
{
        u32 val;
        unsigned long flags;

        spin_lock_irqsave(&mdev->reg_slock, flags);
        val=(u32)mxr_read(mdev,MXR_GRAPHIC_CFG(idx));
        if (en)
        	val &= ~MXR_GRP_CFG_COLOR_KEY_DISABLE;
        else
        	val |= MXR_GRP_CFG_COLOR_KEY_DISABLE;

        mxr_write(mdev,MXR_GRAPHIC_CFG(idx),val);
        spin_unlock_irqrestore(&mdev->reg_slock, flags);
}
Esempio n. 9
0
irqreturn_t mxr_irq_handler(int irq, void *dev_data)
{
	struct mxr_device *mdev = dev_data;
	u32 i, val;

	spin_lock(&mdev->reg_slock);
	val = mxr_read(mdev, MXR_INT_STATUS);

	/* wake up process waiting for VSYNC */
	if (val & MXR_INT_STATUS_VSYNC) {
		set_bit(MXR_EVENT_VSYNC, &mdev->event_flags);
		wake_up(&mdev->event_queue);
	}

	/* clear interrupts */
	if (~val & MXR_INT_EN_VSYNC) {
		/* vsync interrupt use different bit for read and clear */
		val &= ~MXR_INT_EN_VSYNC;
		val |= MXR_INT_CLEAR_VSYNC;
	}
	val = mxr_irq_underrun_handle(mdev, val);
	mxr_write(mdev, MXR_INT_STATUS, val);

	spin_unlock(&mdev->reg_slock);
	/* leave on non-vsync event */
	if (~val & MXR_INT_CLEAR_VSYNC)
		return IRQ_HANDLED;

	for (i = 0; i < MXR_MAX_SUB_MIXERS; ++i) {
#if defined(CONFIG_ARCH_EXYNOS4)
		mxr_irq_layer_handle(mdev->sub_mxr[i].layer[MXR_LAYER_VIDEO]);
#endif
		mxr_irq_layer_handle(mdev->sub_mxr[i].layer[MXR_LAYER_GRP0]);
		mxr_irq_layer_handle(mdev->sub_mxr[i].layer[MXR_LAYER_GRP1]);
	}

	return IRQ_HANDLED;
}
static int mxr_update_pending(struct mxr_device *mdev)
{
	return MXR_CFG_LAYER_UPDATE_COUNT(mxr_read(mdev, MXR_CFG));
}