int gic_dist_init(u32 gic_nr, virtual_addr_t base, u32 irq_start) { unsigned int max_irq, i; u32 cpumask = 1 << 0; /*smp_processor_id(); */ if (GIC_MAX_NR <= gic_nr) { return -1; } cpumask |= cpumask << 8; cpumask |= cpumask << 16; gic_data[gic_nr].dist_base = base; gic_data[gic_nr].irq_offset = (irq_start - 1) & ~31; gic_write(0, base + GIC_DIST_CTRL); /* * Find out how many interrupts are supported. */ max_irq = gic_read(base + GIC_DIST_CTR) & 0x1f; max_irq = (max_irq + 1) * 32; /* * The GIC only supports up to 1020 interrupt sources. * Limit this to either the architected maximum, or the * platform maximum. */ if (max_irq > max(1020, GIC_NR_IRQS)) max_irq = max(1020, GIC_NR_IRQS); /* * Set all global interrupts to be level triggered, active low. */ for (i = 32; i < max_irq; i += 16) gic_write(0, base + GIC_DIST_CONFIG + i * 4 / 16); /* * Set all global interrupts to this CPU only. */ for (i = 32; i < max_irq; i += 4) gic_write(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); /* * Set priority on all interrupts. */ for (i = 0; i < max_irq; i += 4) gic_write(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4); /* * Disable all interrupts. */ for (i = 0; i < max_irq; i += 32) gic_write(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); gic_write(1, base + GIC_DIST_CTRL); return 0; }
static int gic_peek_irq(struct gic_chip_data *gic, struct vmm_host_irq *irq, u32 offset) { u32 irq_no = gic_irq(gic, irq); u32 mask = 1 << (irq_no % 32); return !!(gic_read(gic->dist_base + offset + (irq_no / 32) * 4) & mask); }
static int gic_set_type(struct vmm_host_irq *irq, u32 type) { struct gic_chip_data *gic = vmm_host_irq_get_chip_data(irq); virtual_addr_t base = gic->dist_base; u32 irq_no = gic_irq(gic, irq); u32 enablemask = 1 << (irq_no % 32); u32 enableoff = (irq_no / 32) * 4; u32 confmask = 0x2 << ((irq_no % 16) * 2); u32 confoff = (irq_no / 16) * 4; bool enabled = FALSE; u32 val; /* Interrupt configuration for SGIs can't be changed */ if (irq_no < 16) { return VMM_EINVALID; } if (type != VMM_IRQ_TYPE_LEVEL_HIGH && type != VMM_IRQ_TYPE_EDGE_RISING) { return VMM_EINVALID; } val = gic_read(base + GIC_DIST_CONFIG + confoff); if (type == VMM_IRQ_TYPE_LEVEL_HIGH) { val &= ~confmask; } else if (type == VMM_IRQ_TYPE_EDGE_RISING) { val |= confmask; } /* * As recommended by the spec, disable the interrupt before changing * the configuration */ if (gic_read(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) { gic_write(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff); enabled = TRUE; } gic_write(val, base + GIC_DIST_CONFIG + confoff); if (enabled) { gic_write(enablemask, base + GIC_DIST_ENABLE_SET + enableoff); } return 0; }
static u32 gic_active_irq(u32 cpu_irq_nr) { u32 ret = gic_read(gic_data[0].cpu_base + GICC_INTACK) & 0x3FF; if (ret < 1021) { ret = vmm_host_irqdomain_find_mapping(gic_data[0].domain, ret); } else { ret = UINT_MAX; } return ret; }
u32 gic_active_irq(u32 gic_nr) { u32 ret; if (GIC_MAX_NR <= gic_nr) { return 0xFFFFFFFF; } ret = gic_read(gic_data[gic_nr].cpu_base + GIC_CPU_INTACK) & 0x3FF; ret += gic_data[gic_nr].irq_offset; return ret; }
static u32 gic_active_irq(u32 cpu_irq_nr) { u32 ret; ret = gic_read(gic_data[0].cpu_base + GIC_CPU_INTACK) & 0x3FF; if (ret < 1021) { ret += gic_data[0].irq_start; } else { ret = UINT_MAX; } return ret; }
static int __init gic_init_bases(struct vmm_devtree_node *node, u32 gic_nr, bool eoimode, u32 irq_start, virtual_addr_t cpu_base, virtual_addr_t cpu2_base, virtual_addr_t dist_base) { u32 max_irqs; struct gic_chip_data *gic; BUG_ON(gic_nr >= GIC_MAX_NR); gic = &gic_data[gic_nr]; gic->eoimode = eoimode; /* For primary GICs, skip over SGIs. * For secondary GICs, skip over PPIs, too. */ gic->hwirq_base = (gic_nr == 0) ? 16 : 32; gic->dist_base = dist_base; gic->cpu_base = cpu_base; gic->cpu2_base = cpu2_base; /* * Find out how many interrupts are supported. * The GIC only supports up to 1020 interrupt sources. */ max_irqs = gic_read(gic->dist_base + GICD_CTR) & 0x1f; max_irqs = (max_irqs + 1) * 32; if (max_irqs > 1020) max_irqs = 1020; gic->max_irqs = max_irqs; gic->domain = vmm_host_irqdomain_add(node, (int)irq_start, max_irqs, &gic_ops, gic); if (!gic->domain) { return VMM_EFAIL; } gic_dist_init(gic); gic_cpu_init(gic); return VMM_OK; }
static vmm_irq_return_t gic_handle_cascade_irq(int irq, void *dev) { struct gic_chip_data *gic = dev; u32 cascade_irq, gic_irq; gic_irq = gic_read(gic->cpu_base + GICC_INTACK) & 0x3FF; if (gic_irq == 1023) { return VMM_IRQ_NONE; } cascade_irq = vmm_host_irqdomain_find_mapping(gic->domain, gic_irq); if (likely((32 <= gic_irq) && (gic_irq <= 1020))) { vmm_host_generic_irq_exec(cascade_irq); } return VMM_IRQ_HANDLED; }
static int __init gic_init_bases(u32 gic_nr, bool eoimode, u32 irq_start, virtual_addr_t cpu_base, virtual_addr_t cpu2_base, virtual_addr_t dist_base) { u32 max_irqs; struct gic_chip_data *gic; BUG_ON(gic_nr >= GIC_MAX_NR); gic = &gic_data[gic_nr]; gic->eoimode = eoimode; gic->irq_start = irq_start; /* For primary GICs, skip over SGIs. * For secondary GICs, skip over PPIs, too. */ gic->hwirq_base = (gic_nr == 0) ? 16 : 32; gic->dist_base = dist_base; gic->cpu_base = cpu_base; gic->cpu2_base = cpu2_base; /* * Find out how many interrupts are supported. * The GIC only supports up to 1020 interrupt sources. */ max_irqs = gic_read(gic->dist_base + GIC_DIST_CTR) & 0x1f; max_irqs = (max_irqs + 1) * 32; if (max_irqs > 1020) max_irqs = 1020; /* * Limit number of interrupts registered to the platform maximum */ BUG_ON((max_irqs + gic->irq_start) > CONFIG_HOST_IRQ_COUNT); gic->max_irqs = max_irqs; gic_dist_init(gic); gic_cpu_init(gic); return VMM_OK; }
static int gic_set_affinity(struct vmm_host_irq *irq, const struct vmm_cpumask *mask_val, bool force) { virtual_addr_t reg; u32 shift = (irq->num % 4) * 8; u32 cpu = vmm_cpumask_first(mask_val); u32 val, mask, bit; if (cpu >= 8) return VMM_EINVALID; reg = gic_dist_base(irq) + GIC_DIST_TARGET + (gic_irq(irq) & ~3); mask = 0xff << shift; bit = 1 << (cpu + shift); val = gic_read(reg) & ~mask; gic_write(val | bit, reg); return 0; }
static int gic_set_affinity(struct vmm_host_irq *d, const struct vmm_cpumask *mask_val, bool force) { virtual_addr_t reg; u32 shift = (d->hwirq % 4) * 8; u32 cpu = vmm_cpumask_first(mask_val); u32 val, mask, bit; struct gic_chip_data *gic = vmm_host_irq_get_chip_data(d); if (cpu >= 8) return VMM_EINVALID; reg = gic->dist_base + GICD_TARGET + (d->hwirq & ~3); mask = 0xff << shift; bit = 1 << (cpu + shift); val = gic_read(reg) & ~mask; gic_write(val | bit, reg); return 0; }
static int gic_peek_irq(struct gic_chip_data *gic, struct vmm_host_irq *d, u32 offset) { u32 mask = 1 << (d->hwirq % 32); return !!(gic_read(gic->dist_base + offset + (d->hwirq / 32) * 4) & mask); }
void __init gic_dist_init(struct gic_chip_data *gic, u32 irq_start) { unsigned int max_irq, irq_limit, i; u32 cpumask = 1 << 0; /* FIXME: smp_processor_id(); */ virtual_addr_t base = gic->dist_base; cpumask |= cpumask << 8; cpumask |= cpumask << 16; /* Disable IRQ distribution */ gic_write(0, base + GIC_DIST_CTRL); /* * Find out how many interrupts are supported. */ max_irq = gic_read(base + GIC_DIST_CTR) & 0x1f; max_irq = (max_irq + 1) * 32; /* * The GIC only supports up to 1020 interrupt sources. * Limit this to either the architected maximum, or the * platform maximum. */ if (max_irq > 1020) { max_irq = 1020; } /* * Set all global interrupts to be level triggered, active low. */ for (i = 32; i < max_irq; i += 16) { gic_write(0, base + GIC_DIST_CONFIG + i * 4 / 16); } /* * Set all global interrupts to this CPU only. */ for (i = 32; i < max_irq; i += 4) { gic_write(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); } /* * Set priority on all interrupts. */ for (i = 0; i < max_irq; i += 4) { gic_write(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4); } /* * Disable all interrupts. */ for (i = 0; i < max_irq; i += 32) { gic_write(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); } /* * Limit number of interrupts registered to the platform maximum */ irq_limit = gic->irq_offset + max_irq; if (irq_limit > GIC_NR_IRQS) { irq_limit = GIC_NR_IRQS; } /* * Setup the Host IRQ subsystem. */ for (i = irq_start; i < irq_limit; i++) { vmm_host_irq_set_chip(i, &gic_chip); vmm_host_irq_set_chip_data(i, gic); } /* Enable IRQ distribution */ gic_write(1, base + GIC_DIST_CTRL); }