static void __init gic_dist_init(struct gic_chip_data *gic) { unsigned int i; u32 cpumask = 1 << vmm_smp_processor_id(); virtual_addr_t base = gic->dist_base; cpumask |= cpumask << 8; cpumask |= cpumask << 16; /* Disable IRQ distribution */ gic_write(0, base + GIC_DIST_CTRL); /* * Set all global interrupts to be level triggered, active low. */ for (i = 32; i < gic->max_irqs; i += 16) { gic_write(0, base + GIC_DIST_CONFIG + i * 4 / 16); } /* * Set all global interrupts to this CPU only. */ for (i = 32; i < gic->max_irqs; i += 4) { gic_write(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); } /* * Set priority on all interrupts. */ for (i = 0; i < gic->max_irqs; i += 4) { gic_write(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4); } /* * Disable all interrupts. */ for (i = 0; i < gic->max_irqs; i += 32) { gic_write(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); } /* * Setup the Host IRQ subsystem. * Note: We handle all interrupts including SGIs and PPIs via C code. * The Linux kernel handles pheripheral interrupts via C code and * SGI/PPI via assembly code. */ for (i = gic->irq_start; i < (gic->irq_start + gic->max_irqs); i++) { vmm_host_irq_set_chip(i, &gic_chip); vmm_host_irq_set_chip_data(i, gic); vmm_host_irq_set_handler(i, vmm_handle_fast_eoi); /* Mark SGIs and PPIs as per-CPU IRQs */ if (i < 32) { vmm_host_irq_mark_per_cpu(i); } } /* Enable IRQ distribution */ gic_write(1, base + GIC_DIST_CTRL); }
int __init intc_init(physical_addr_t base, u32 nrirq) { u32 i, tmp; intc_base = vmm_host_iomap(base, 0x1000); intc_nrirq = nrirq; tmp = intc_read(INTC_SYSCONFIG); tmp |= INTC_SYSCONFIG_SOFTRST_M; /* soft reset */ intc_write(INTC_SYSCONFIG, tmp); /* Wait for reset to complete */ while (!(intc_read(INTC_SYSSTATUS) & INTC_SYSSTATUS_RESETDONE_M)) ; /* Enable autoidle */ intc_write(INTC_SYSCONFIG, INTC_SYSCONFIG_AUTOIDLE_M); /* * Setup the Host IRQ subsystem. */ for (i = 0; i < intc_nrirq; i++) { vmm_host_irq_set_chip(i, &intc_chip); vmm_host_irq_set_handler(i, vmm_handle_fast_eoi); } /* Set active IRQ callback */ vmm_host_irq_set_active_callback(intc_active_irq); return VMM_OK; }
void __init fpga_irq_init(void *base, const char *name, u32 irq_start, u32 parent_irq, u32 valid, struct vmm_devtree_node *node) { struct fpga_irq_data *f; int i; if (fpga_irq_id >= array_size(fpga_irq_devices)) { vmm_printf("%s: too few FPGA IRQ controllers, " "increase CONFIG_VERSATILE_FPGA_IRQ_NR\n", __func__); return; } f = &fpga_irq_devices[fpga_irq_id]; f->irq_start = irq_start; f->node = node; f->base = base; f->chip.name = name; f->chip.irq_ack = fpga_irq_mask; f->chip.irq_mask = fpga_irq_mask; f->chip.irq_unmask = fpga_irq_unmask; f->valid = valid; f->used_irqs = 0; if (parent_irq != 0xFFFFFFFF) { fpga_cascade_irq(f, name, parent_irq); } else { vmm_host_irq_set_active_callback(fpga_active_irq); } /* This will allocate all valid descriptors in the linear case */ for (i = 0; i < 32; i++) { if (!(valid & (1 << i))) { continue; } vmm_host_irq_set_chip(f->irq_start + i, &f->chip); vmm_host_irq_set_chip_data(f->irq_start + i, f); vmm_host_irq_set_handler(f->irq_start + i, vmm_handle_level_irq); f->used_irqs++; } fpga_irq_id++; }
void __init gic_dist_init(struct gic_chip_data *gic, u32 irq_start) { unsigned int max_irq, irq_limit, i; u32 cpumask = 1 << 0; /* FIXME: smp_processor_id(); */ virtual_addr_t base = gic->dist_base; cpumask |= cpumask << 8; cpumask |= cpumask << 16; /* Disable IRQ distribution */ gic_write(0, base + GIC_DIST_CTRL); /* * Find out how many interrupts are supported. */ max_irq = gic_read(base + GIC_DIST_CTR) & 0x1f; max_irq = (max_irq + 1) * 32; /* * The GIC only supports up to 1020 interrupt sources. * Limit this to either the architected maximum, or the * platform maximum. */ if (max_irq > 1020) { max_irq = 1020; } /* * Set all global interrupts to be level triggered, active low. */ for (i = 32; i < max_irq; i += 16) { gic_write(0, base + GIC_DIST_CONFIG + i * 4 / 16); } /* * Set all global interrupts to this CPU only. */ for (i = 32; i < max_irq; i += 4) { gic_write(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); } /* * Set priority on all interrupts. */ for (i = 0; i < max_irq; i += 4) { gic_write(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4); } /* * Disable all interrupts. */ for (i = 0; i < max_irq; i += 32) { gic_write(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); } /* * Limit number of interrupts registered to the platform maximum */ irq_limit = gic->irq_offset + max_irq; if (irq_limit > GIC_NR_IRQS) { irq_limit = GIC_NR_IRQS; } /* * Setup the Host IRQ subsystem. */ for (i = irq_start; i < irq_limit; i++) { vmm_host_irq_set_chip(i, &gic_chip); vmm_host_irq_set_chip_data(i, gic); } /* Enable IRQ distribution */ gic_write(1, base + GIC_DIST_CTRL); }