int gic_cpu_init(u32 gic_nr, virtual_addr_t base) { int i; if (GIC_MAX_NR <= gic_nr) { return -1; } gic_data[gic_nr].cpu_base = base; /* * Deal with the banked PPI and SGI interrupts - disable all * PPI interrupts, ensure all SGI interrupts are enabled. */ gic_write(0xffff0000, gic_data[gic_nr].dist_base + GIC_DIST_ENABLE_CLEAR); gic_write(0x0000ffff, gic_data[gic_nr].dist_base + GIC_DIST_ENABLE_SET); /* * Set priority on PPI and SGI interrupts */ for (i = 0; i < 32; i += 4) { gic_write(0xa0a0a0a0, gic_data[gic_nr].dist_base + GIC_DIST_PRI + i * 4 / 4); } gic_write(0xf0, base + GIC_CPU_PRIMASK); gic_write(1, base + GIC_CPU_CTRL); return 0; }
static void gic_eoi_irq(struct vmm_host_irq *d) { struct gic_chip_data *gic = vmm_host_irq_get_chip_data(d); gic_write(d->hwirq, gic->cpu_base + GICC_EOI); if (gic->eoimode && !vmm_host_irq_is_routed(d)) { gic_write(d->hwirq, gic->cpu2_base + GICC2_DIR); } }
static void gic_eoi_irq(struct vmm_host_irq *irq) { struct gic_chip_data *gic = vmm_host_irq_get_chip_data(irq); u32 irq_no = gic_irq(gic, irq); gic_write(irq_no, gic->cpu_base + GIC_CPU_EOI); if (gic->eoimode && !vmm_host_irq_is_routed(irq)) { gic_write(irq_no, gic->cpu2_base + GIC_CPU2_DIR); } }
int gic_cpu_init(u32 gic_nr, virtual_addr_t base) { if (GIC_MAX_NR <= gic_nr) { return -1; } gic_data[gic_nr].cpu_base = base; gic_write(0xf0, base + GIC_CPU_PRIMASK); gic_write(1, base + GIC_CPU_CTRL); return 0; }
int gic_eoi_irq(u32 gic_nr, u32 irq) { // u32 mask = 1 << (irq % 32); u32 gic_irq; if (GIC_MAX_NR <= gic_nr) { return -1; } if (irq < gic_data[gic_nr].irq_offset) { return -1; } gic_irq = irq - gic_data[gic_nr].irq_offset; /* *gic_write(mask, gic_data[gic_nr].dist_base + * GIC_DIST_ENABLE_CLEAR + (gic_irq / 32) * 4); */ gic_write(gic_irq, gic_data[gic_nr].cpu_base + GIC_CPU_EOI); /* *gic_write(mask, gic_data[gic_nr].dist_base + * GIC_DIST_ENABLE_SET + (gic_irq / 32) * 4); */ return 0; }
static void gic_poke_irq(struct gic_chip_data *gic, struct vmm_host_irq *irq, u32 offset) { u32 irq_no = gic_irq(gic, irq); u32 mask = 1 << (irq_no % 32); gic_write(mask, gic->dist_base + offset + (irq_no / 32) * 4); }
static int gic_set_type(struct vmm_host_irq *irq, u32 type) { struct gic_chip_data *gic = vmm_host_irq_get_chip_data(irq); virtual_addr_t base = gic->dist_base; u32 irq_no = gic_irq(gic, irq); u32 enablemask = 1 << (irq_no % 32); u32 enableoff = (irq_no / 32) * 4; u32 confmask = 0x2 << ((irq_no % 16) * 2); u32 confoff = (irq_no / 16) * 4; bool enabled = FALSE; u32 val; /* Interrupt configuration for SGIs can't be changed */ if (irq_no < 16) { return VMM_EINVALID; } if (type != VMM_IRQ_TYPE_LEVEL_HIGH && type != VMM_IRQ_TYPE_EDGE_RISING) { return VMM_EINVALID; } val = gic_read(base + GIC_DIST_CONFIG + confoff); if (type == VMM_IRQ_TYPE_LEVEL_HIGH) { val &= ~confmask; } else if (type == VMM_IRQ_TYPE_EDGE_RISING) { val |= confmask; } /* * As recommended by the spec, disable the interrupt before changing * the configuration */ if (gic_read(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) { gic_write(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff); enabled = TRUE; } gic_write(val, base + GIC_DIST_CONFIG + confoff); if (enabled) { gic_write(enablemask, base + GIC_DIST_ENABLE_SET + enableoff); } return 0; }
int gic_dist_init(u32 gic_nr, virtual_addr_t base, u32 irq_start) { unsigned int max_irq, i; u32 cpumask = 1 << 0; /*smp_processor_id(); */ if (GIC_MAX_NR <= gic_nr) { return -1; } cpumask |= cpumask << 8; cpumask |= cpumask << 16; gic_data[gic_nr].dist_base = base; gic_data[gic_nr].irq_offset = (irq_start - 1) & ~31; gic_write(0, base + GIC_DIST_CTRL); /* * Find out how many interrupts are supported. */ max_irq = gic_read(base + GIC_DIST_CTR) & 0x1f; max_irq = (max_irq + 1) * 32; /* * The GIC only supports up to 1020 interrupt sources. * Limit this to either the architected maximum, or the * platform maximum. */ if (max_irq > max(1020, GIC_NR_IRQS)) max_irq = max(1020, GIC_NR_IRQS); /* * Set all global interrupts to be level triggered, active low. */ for (i = 32; i < max_irq; i += 16) gic_write(0, base + GIC_DIST_CONFIG + i * 4 / 16); /* * Set all global interrupts to this CPU only. */ for (i = 32; i < max_irq; i += 4) gic_write(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); /* * Set priority on all interrupts. */ for (i = 0; i < max_irq; i += 4) gic_write(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4); /* * Disable all interrupts. */ for (i = 0; i < max_irq; i += 32) gic_write(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); gic_write(1, base + GIC_DIST_CTRL); return 0; }
static void __cpuinit gic_cpu_init(struct gic_chip_data *gic) { int i; /* * Deal with the banked PPI and SGI interrupts - disable all * PPI interrupts, ensure all SGI interrupts are enabled. */ gic_write(0xffff0000, gic->dist_base + GICD_ENABLE_CLEAR); gic_write(0x0000ffff, gic->dist_base + GICD_ENABLE_SET); /* * Set priority on PPI and SGI interrupts */ for (i = 0; i < 32; i += 4) { gic_write(0xa0a0a0a0, gic->dist_base + GICD_PRI + i * 4 / 4); } gic_write(0xf0, gic->cpu_base + GICC_PRIMASK); if (gic->eoimode) { gic_write(1|(1<<9), gic->cpu_base + GICC_CTRL); } else { gic_write(1, gic->cpu_base + GICC_CTRL); } }
static void __init gic_dist_init(struct gic_chip_data *gic) { unsigned int i; u32 cpumask = 1 << vmm_smp_processor_id(); virtual_addr_t base = gic->dist_base; cpumask |= cpumask << 8; cpumask |= cpumask << 16; /* Disable IRQ distribution */ gic_write(0, base + GIC_DIST_CTRL); /* * Set all global interrupts to be level triggered, active low. */ for (i = 32; i < gic->max_irqs; i += 16) { gic_write(0, base + GIC_DIST_CONFIG + i * 4 / 16); } /* * Set all global interrupts to this CPU only. */ for (i = 32; i < gic->max_irqs; i += 4) { gic_write(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); } /* * Set priority on all interrupts. */ for (i = 0; i < gic->max_irqs; i += 4) { gic_write(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4); } /* * Disable all interrupts. */ for (i = 0; i < gic->max_irqs; i += 32) { gic_write(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); } /* * Setup the Host IRQ subsystem. * Note: We handle all interrupts including SGIs and PPIs via C code. * The Linux kernel handles pheripheral interrupts via C code and * SGI/PPI via assembly code. */ for (i = gic->irq_start; i < (gic->irq_start + gic->max_irqs); i++) { vmm_host_irq_set_chip(i, &gic_chip); vmm_host_irq_set_chip_data(i, gic); vmm_host_irq_set_handler(i, vmm_handle_fast_eoi); /* Mark SGIs and PPIs as per-CPU IRQs */ if (i < 32) { vmm_host_irq_mark_per_cpu(i); } } /* Enable IRQ distribution */ gic_write(1, base + GIC_DIST_CTRL); }
static void gic_raise(struct vmm_host_irq *d, const struct vmm_cpumask *mask) { unsigned long map = *vmm_cpumask_bits(mask); /* * Ensure that stores to Normal memory are visible to the * other CPUs before issuing the IPI. */ arch_wmb(); /* This always happens on GIC0 */ gic_write(map << 16 | d->hwirq, gic_data[0].dist_base + GICD_SOFTINT); }
int gic_mask(u32 gic_nr, u32 irq) { u32 mask = 1 << (irq % 32); u32 gic_irq; if (GIC_MAX_NR <= gic_nr) { return -1; } if (irq < gic_data[gic_nr].irq_offset) { return -1; } gic_irq = irq - gic_data[gic_nr].irq_offset; gic_write(mask, gic_data[gic_nr].dist_base + GIC_DIST_ENABLE_CLEAR + (gic_irq / 32) * 4); return 0; }
static int gic_set_affinity(struct vmm_host_irq *irq, const struct vmm_cpumask *mask_val, bool force) { virtual_addr_t reg; u32 shift = (irq->num % 4) * 8; u32 cpu = vmm_cpumask_first(mask_val); u32 val, mask, bit; if (cpu >= 8) return VMM_EINVALID; reg = gic_dist_base(irq) + GIC_DIST_TARGET + (gic_irq(irq) & ~3); mask = 0xff << shift; bit = 1 << (cpu + shift); val = gic_read(reg) & ~mask; gic_write(val | bit, reg); return 0; }
static int gic_set_affinity(struct vmm_host_irq *d, const struct vmm_cpumask *mask_val, bool force) { virtual_addr_t reg; u32 shift = (d->hwirq % 4) * 8; u32 cpu = vmm_cpumask_first(mask_val); u32 val, mask, bit; struct gic_chip_data *gic = vmm_host_irq_get_chip_data(d); if (cpu >= 8) return VMM_EINVALID; reg = gic->dist_base + GICD_TARGET + (d->hwirq & ~3); mask = 0xff << shift; bit = 1 << (cpu + shift); val = gic_read(reg) & ~mask; gic_write(val | bit, reg); return 0; }
static void gic_poke_irq(struct gic_chip_data *gic, struct vmm_host_irq *d, u32 offset) { u32 mask = 1 << (d->hwirq % 32); gic_write(mask, gic->dist_base + offset + (d->hwirq / 32) * 4); }
void __init gic_dist_init(struct gic_chip_data *gic, u32 irq_start) { unsigned int max_irq, irq_limit, i; u32 cpumask = 1 << 0; /* FIXME: smp_processor_id(); */ virtual_addr_t base = gic->dist_base; cpumask |= cpumask << 8; cpumask |= cpumask << 16; /* Disable IRQ distribution */ gic_write(0, base + GIC_DIST_CTRL); /* * Find out how many interrupts are supported. */ max_irq = gic_read(base + GIC_DIST_CTR) & 0x1f; max_irq = (max_irq + 1) * 32; /* * The GIC only supports up to 1020 interrupt sources. * Limit this to either the architected maximum, or the * platform maximum. */ if (max_irq > 1020) { max_irq = 1020; } /* * Set all global interrupts to be level triggered, active low. */ for (i = 32; i < max_irq; i += 16) { gic_write(0, base + GIC_DIST_CONFIG + i * 4 / 16); } /* * Set all global interrupts to this CPU only. */ for (i = 32; i < max_irq; i += 4) { gic_write(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); } /* * Set priority on all interrupts. */ for (i = 0; i < max_irq; i += 4) { gic_write(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4); } /* * Disable all interrupts. */ for (i = 0; i < max_irq; i += 32) { gic_write(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); } /* * Limit number of interrupts registered to the platform maximum */ irq_limit = gic->irq_offset + max_irq; if (irq_limit > GIC_NR_IRQS) { irq_limit = GIC_NR_IRQS; } /* * Setup the Host IRQ subsystem. */ for (i = irq_start; i < irq_limit; i++) { vmm_host_irq_set_chip(i, &gic_chip); vmm_host_irq_set_chip_data(i, gic); } /* Enable IRQ distribution */ gic_write(1, base + GIC_DIST_CTRL); }
void gic_eoi_irq(struct vmm_host_irq *irq) { gic_write(gic_irq(irq), gic_cpu_base(irq) + GIC_CPU_EOI); }
void gic_unmask_irq(struct vmm_host_irq *irq) { gic_write(1 << (irq->num % 32), gic_dist_base(irq) + GIC_DIST_ENABLE_SET + (gic_irq(irq) / 32) * 4); }