/* call with list_lock and dispc runtime held */
static void omap_irq_update(struct drm_device *dev)
{
	struct omap_drm_private *priv = dev->dev_private;
	struct omap_drm_irq *irq;
	uint32_t irqmask = priv->vblank_mask;

	BUG_ON(!spin_is_locked(&list_lock));

	list_for_each_entry(irq, &priv->irq_list, node)
		irqmask |= irq->irqmask;

	DBG("irqmask=%08x", irqmask);

	dispc_write_irqenable(irqmask);
	dispc_read_irqenable();        /* flush posted write */
}
/* Called from dss.c. Note that we don't touch clocks here,
 * but we presume they are on because we got an IRQ. However,
 * an irq handler may turn the clocks off, so we may not have
 * clock later in the function. */
static irqreturn_t omap_dispc_irq_handler(int irq, void *arg)
{
	int i;
	u32 irqstatus, irqenable;
	u32 handledirqs = 0;
	u32 unhandled_errors;
	struct omap_dispc_isr_data *isr_data;
	struct omap_dispc_isr_data registered_isr[DISPC_MAX_NR_ISRS];

	spin_lock(&dispc_compat.irq_lock);

	irqstatus = dispc_read_irqstatus();
	irqenable = dispc_read_irqenable();

	/* IRQ is not for us */
	if (!(irqstatus & irqenable)) {
		spin_unlock(&dispc_compat.irq_lock);
		return IRQ_NONE;
	}

#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
	spin_lock(&dispc_compat.irq_stats_lock);
	dispc_compat.irq_stats.irq_count++;
	dss_collect_irq_stats(irqstatus, dispc_compat.irq_stats.irqs);
	spin_unlock(&dispc_compat.irq_stats_lock);
#endif

	print_irq_status(irqstatus);

	/* Ack the interrupt. Do it here before clocks are possibly turned
	 * off */
	dispc_clear_irqstatus(irqstatus);
	/* flush posted write */
	dispc_read_irqstatus();

	/* make a copy and unlock, so that isrs can unregister
	 * themselves */
	memcpy(registered_isr, dispc_compat.registered_isr,
			sizeof(registered_isr));

	spin_unlock(&dispc_compat.irq_lock);

	for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
		isr_data = &registered_isr[i];

		if (!isr_data->isr)
			continue;

		if (isr_data->mask & irqstatus) {
			isr_data->isr(isr_data->arg, irqstatus);
			handledirqs |= isr_data->mask;
		}
	}

	spin_lock(&dispc_compat.irq_lock);

	unhandled_errors = irqstatus & ~handledirqs & dispc_compat.irq_error_mask;

	if (unhandled_errors) {
		dispc_compat.error_irqs |= unhandled_errors;

		dispc_compat.irq_error_mask &= ~unhandled_errors;
		_omap_dispc_set_irqs();

		schedule_work(&dispc_compat.error_work);
	}

	spin_unlock(&dispc_compat.irq_lock);

	return IRQ_HANDLED;
}