Esempio n. 1
0
/* before calling this function the interrupts should be disabled
 * and the irq must be disabled at gic to avoid spurious interrupts */
void gic_clear_spi_pending(unsigned int irq)
{
	u32 mask, val;
	WARN_ON(!irqs_disabled());
	spin_lock(&irq_controller_lock);
	mask = 1 << (gic_irq(irq) % 32);
	val = readl(gic_dist_base(irq) +
			GIC_DIST_ENABLE_SET + (gic_irq(irq) / 32) * 4);
	/* warn if the interrupt is enabled */
	WARN_ON(val & mask);
	writel(mask, gic_dist_base(irq) +
			GIC_DIST_PENDING_CLEAR + (gic_irq(irq) / 32) * 4);
	spin_unlock(&irq_controller_lock);
}
Esempio n. 2
0
/*
 * Routines to acknowledge, disable and enable interrupts
 */
static void gic_ack_irq(unsigned int irq)
{

	spin_lock(&irq_controller_lock);

#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
	u32 mask = 1 << (irq % 32);

	 /*
	  * Linux assumes that when we're done with an interrupt we need to
	  * unmask it, in the same way we need to unmask an interrupt when
	  * we first enable it.
	  *
	  * The GIC has a separate notion of "end of interrupt" to re-enable
	  * an interrupt after handling, in order to support hardware
	  * prioritisation.
	  *
	  * We can make the GIC behave in the way that Linux expects by making
	  * our "acknowledge" routine disable the interrupt, then mark it as
	  * complete.
	  */
	writel(mask, gic_dist_base(irq) + GIC_DIST_ENABLE_CLEAR + (gic_irq(irq) / 32) * 4);
#endif
	writel(gic_irq(irq), gic_cpu_base(irq) + GIC_CPU_EOI);
	spin_unlock(&irq_controller_lock);
}
Esempio n. 3
0
File: irq.c Progetto: ryos36/xen-arm
void gic_unmask_irq(unsigned int irq)
{
        u32 mask = 1 << (irq % 32);

        spin_lock(&irq_controller_lock);
        mmio_writel(mask, gic_dist_base(irq) + ICDISER + (gic_irq(irq) / 32) * 4);
        spin_unlock(&irq_controller_lock);
}
Esempio n. 4
0
static void gic_unmask_irq(unsigned int irq)
{
    u32 mask = 1 << (irq % 32);

    spin_lock(&irq_controller_lock);
    writel(mask, gic_dist_base(irq) + GIC_DIST_ENABLE_SET + (gic_irq(irq) / 32) * 4);
    spin_unlock(&irq_controller_lock);
}
Esempio n. 5
0
static void gic_mask_ack_irq(unsigned int irq)
{
	u32 mask = 1 << (irq % 32);
	spin_lock(&irq_controller_lock);
	writel(mask, gic_dist_base(irq) + GIC_DIST_ENABLE_CLEAR +
	       (gic_irq(irq) / 32) * 4);
	writel(gic_irq(irq), gic_cpu_base(irq) + GIC_CPU_EOI);
	spin_unlock(&irq_controller_lock);
}
Esempio n. 6
0
static void gic_mask_irq(unsigned int irq)
{
    u32 mask = 1 << (irq % 32);

    spin_lock(&irq_controller_lock);
    writel(mask, gic_dist_base(irq) + GIC_DIST_ENABLE_CLEAR + (gic_irq(irq) / 32) * 4);
    spin_unlock(&irq_controller_lock);

    msm_mpm_enable_irq(irq, 0);
}
Esempio n. 7
0
static void gic_unmask_irq(struct irq_data *d)
{
	u32 mask = 1 << (d->irq % 32);

	raw_spin_lock(&irq_controller_lock);
	if (gic_arch_extn.irq_unmask)
		gic_arch_extn.irq_unmask(d);
	writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
	raw_spin_unlock(&irq_controller_lock);
}
Esempio n. 8
0
static void gic_unmask_irq(unsigned int irq)
{
	u32 mask = 1 << (irq % 32);

	spin_lock(&irq_controller_lock);
	writel(mask, gic_dist_base(irq) + GIC_DIST_ENABLE_SET + (gic_irq(irq) / 32) * 4);
	spin_unlock(&irq_controller_lock);

#ifdef CONFIG_MSM_MPM
	msm_mpm_enable_irq(irq, 1);
#endif
}
Esempio n. 9
0
static int gic_set_type(unsigned int irq, unsigned int type)
{
	void __iomem *base = gic_dist_base(irq);
	unsigned int gicirq = gic_irq(irq);
	u32 enablemask = 1 << (gicirq % 32);
	u32 enableoff = (gicirq / 32) * 4;
	u32 confmask = 0x2 << ((gicirq % 16) * 2);
	u32 confoff = (gicirq / 16) * 4;
	bool enabled = false;
	u32 val;

	/* Interrupt configuration for SGIs can't be changed */
	if (gicirq < 16)
		return -EINVAL;

	if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
		return -EINVAL;

	spin_lock(&irq_controller_lock);

	val = readl(base + GIC_DIST_CONFIG + confoff);
	if (type == IRQ_TYPE_LEVEL_HIGH)
		val &= ~confmask;
	else if (type == IRQ_TYPE_EDGE_RISING)
		val |= confmask;

	/*
	 * As recommended by the spec, disable the interrupt before changing
	 * the configuration
	 */
	if (readl(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
		writel(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
		enabled = true;
	}

	writel(val, base + GIC_DIST_CONFIG + confoff);

	if (enabled)
		writel(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);

	spin_unlock(&irq_controller_lock);

	if ((type & IRQ_TYPE_EDGE_RISING) && gicirq > 31)
		__set_irq_handler_unlocked(irq, handle_edge_irq);

#ifdef CONFIG_MSM_MPM
	msm_mpm_set_irq_type(irq, type);
#endif

	return 0;
}
Esempio n. 10
0
File: irq.c Progetto: ryos36/xen-arm
void gic_set_cpu(unsigned int irq, cpumask_t mask_val)
{
        void *reg = gic_dist_base(irq) + ICDIPTR + (gic_irq(irq) & ~3);
        unsigned int shift = (irq % 4) * 8;
        unsigned int cpu = first_cpu(mask_val);
        u32 val;

        spin_lock(&irq_controller_lock);
        irq_desc[irq].cpu = cpu;
        val = mmio_readl(reg) & ~(0xff << shift);
        val |= 1 << (cpu + shift);
        mmio_writel(val, reg);
        spin_unlock(&irq_controller_lock);
}
Esempio n. 11
0
static void gic_set_cpu(unsigned int irq, const struct cpumask *mask_val)
{
	void __iomem *reg = gic_dist_base(irq) + GIC_DIST_TARGET + (gic_irq(irq) & ~3);
	unsigned int shift = (irq % 4) * 8;
	unsigned int cpu = cpumask_first(mask_val);
	u32 val;

	spin_lock(&irq_controller_lock);
	irq_desc[irq].cpu = cpu;
	val = readl(reg) & ~(0xff << shift);
	val |= 1 << (cpu + shift);
	writel(val, reg);
	spin_unlock(&irq_controller_lock);
}
static void gic_ack_irq(unsigned int irq)
{

    spin_lock(&irq_controller_lock);

#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
    u32 mask = 1 << (irq % 32);


    writel(mask, gic_dist_base(irq) + GIC_DIST_ENABLE_CLEAR + (gic_irq(irq) / 32) * 4);
#endif
    writel(gic_irq(irq), gic_cpu_base(irq) + GIC_CPU_EOI);
    spin_unlock(&irq_controller_lock);
}
Esempio n. 13
0
static int gic_set_type(struct irq_data *d, unsigned int type)
{
	void __iomem *base = gic_dist_base(d);
	unsigned int gicirq = gic_irq(d);
	u32 enablemask = 1 << (gicirq % 32);
	u32 enableoff = (gicirq / 32) * 4;
	u32 confmask = 0x2 << ((gicirq % 16) * 2);
	u32 confoff = (gicirq / 16) * 4;
	bool enabled = false;
	u32 val;

	/* Interrupt configuration for SGIs can't be changed */
	if (gicirq < 16)
		return -EINVAL;

	if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
		return -EINVAL;

	raw_spin_lock(&irq_controller_lock);

	if (gic_arch_extn.irq_set_type)
		gic_arch_extn.irq_set_type(d, type);

	val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
	if (type == IRQ_TYPE_LEVEL_HIGH)
		val &= ~confmask;
	else if (type == IRQ_TYPE_EDGE_RISING)
		val |= confmask;

	/*
	 * As recommended by the spec, disable the interrupt before changing
	 * the configuration
	 */
	if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
		writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
		enabled = true;
	}

	writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);

	if (enabled)
		writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);

	raw_spin_unlock(&irq_controller_lock);

	return 0;
}
static int gic_set_type(unsigned int irq, unsigned int type)
{
    void __iomem *base = gic_dist_base(irq);
    unsigned int gicirq = gic_irq(irq);
    u32 enablemask = 1 << (gicirq % 32);
    u32 enableoff = (gicirq / 32) * 4;
    u32 confmask = 0x2 << ((gicirq % 16) * 2);
    u32 confoff = (gicirq / 16) * 4;
    bool enabled = false;
    u32 val;


    if (gicirq < 16)
        return -EINVAL;

    if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
        return -EINVAL;

    spin_lock(&irq_controller_lock);

    val = readl(base + GIC_DIST_CONFIG + confoff);
    if (type == IRQ_TYPE_LEVEL_HIGH)
        val &= ~confmask;
    else if (type == IRQ_TYPE_EDGE_RISING)
        val |= confmask;


    if (readl(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
        writel(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
        enabled = true;
    }

    writel(val, base + GIC_DIST_CONFIG + confoff);

    if (enabled)
        writel(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);

    spin_unlock(&irq_controller_lock);

    return 0;
}
Esempio n. 15
0
static int gic_set_affinity(struct vmm_host_irq *irq, 
			    const struct vmm_cpumask *mask_val,
			    bool force)
{
	virtual_addr_t reg;
	u32 shift = (irq->num % 4) * 8;
	u32 cpu = vmm_cpumask_first(mask_val);
	u32 val, mask, bit;

	if (cpu >= 8)
		return VMM_EINVALID;

	reg = gic_dist_base(irq) + GIC_DIST_TARGET + (gic_irq(irq) & ~3);
	mask = 0xff << shift;
	bit = 1 << (cpu + shift);

	val = gic_read(reg) & ~mask;
	gic_write(val | bit, reg);

	return 0;
}
Esempio n. 16
0
static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
			    bool force)
{
	void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
	unsigned int shift = (d->irq % 4) * 8;
	unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
	u32 val, mask, bit;

	if (cpu >= 8 || cpu >= nr_cpu_ids)
		return -EINVAL;

	mask = 0xff << shift;
	bit = 1 << (cpu_logical_map(cpu) + shift);

	raw_spin_lock(&irq_controller_lock);
	val = readl_relaxed(reg) & ~mask;
	writel_relaxed(val | bit, reg);
	raw_spin_unlock(&irq_controller_lock);

	return IRQ_SET_MASK_OK;
}
Esempio n. 17
0
void gic_unmask_irq(struct vmm_host_irq *irq)
{
	gic_write(1 << (irq->num % 32), gic_dist_base(irq) +
		  GIC_DIST_ENABLE_SET + (gic_irq(irq) / 32) * 4);
}