int __init intc_init(physical_addr_t base, u32 nrirq) { u32 i, tmp; intc_base = vmm_host_iomap(base, 0x1000); intc_nrirq = nrirq; tmp = intc_read(INTC_SYSCONFIG); tmp |= INTC_SYSCONFIG_SOFTRST_M; /* soft reset */ intc_write(INTC_SYSCONFIG, tmp); /* Wait for reset to complete */ while (!(intc_read(INTC_SYSSTATUS) & INTC_SYSSTATUS_RESETDONE_M)) ; /* Enable autoidle */ intc_write(INTC_SYSCONFIG, INTC_SYSCONFIG_AUTOIDLE_M); /* * Setup the Host IRQ subsystem. */ for (i = 0; i < intc_nrirq; i++) { vmm_host_irq_set_chip(i, &intc_chip); vmm_host_irq_set_handler(i, vmm_handle_fast_eoi); } /* Set active IRQ callback */ vmm_host_irq_set_active_callback(intc_active_irq); return VMM_OK; }
static int __init gic_devtree_init(struct vmm_devtree_node *node, struct vmm_devtree_node *parent, bool eoimode) { int rc; u32 irq, irq_start = 0; physical_size_t cpu_sz; virtual_addr_t cpu_base; virtual_addr_t cpu2_base; virtual_addr_t dist_base; if (WARN_ON(!node)) { return VMM_ENODEV; } rc = vmm_devtree_request_regmap(node, &dist_base, 0, "GIC Dist"); WARN(rc, "unable to map gic dist registers\n"); rc = vmm_devtree_request_regmap(node, &cpu_base, 1, "GIC CPU"); WARN(rc, "unable to map gic cpu registers\n"); rc = vmm_devtree_request_regmap(node, &cpu2_base, 4, "GIC CPU2"); if (rc) { rc = vmm_devtree_regsize(node, &cpu_sz, 1); if (rc) { return rc; } if (cpu_sz >= 0x20000) { cpu2_base = cpu_base + 0x10000; } else if (cpu_sz >= 0x2000) { cpu2_base = cpu_base + 0x1000; } else { cpu2_base = 0x0; } } if (vmm_devtree_read_u32(node, "irq_start", &irq_start)) { irq_start = 0; } rc = gic_init_bases(node, gic_cnt, eoimode, irq_start, cpu_base, cpu2_base, dist_base); if (rc) { return rc; } if (parent) { if (vmm_devtree_read_u32(node, "parent_irq", &irq)) { irq = 1020; } gic_cascade_irq(gic_cnt, irq); } else { vmm_host_irq_set_active_callback(gic_active_irq); } gic_cnt++; return VMM_OK; }
void __init fpga_irq_init(void *base, const char *name, u32 irq_start, u32 parent_irq, u32 valid, struct vmm_devtree_node *node) { struct fpga_irq_data *f; int i; if (fpga_irq_id >= array_size(fpga_irq_devices)) { vmm_printf("%s: too few FPGA IRQ controllers, " "increase CONFIG_VERSATILE_FPGA_IRQ_NR\n", __func__); return; } f = &fpga_irq_devices[fpga_irq_id]; f->irq_start = irq_start; f->node = node; f->base = base; f->chip.name = name; f->chip.irq_ack = fpga_irq_mask; f->chip.irq_mask = fpga_irq_mask; f->chip.irq_unmask = fpga_irq_unmask; f->valid = valid; f->used_irqs = 0; if (parent_irq != 0xFFFFFFFF) { fpga_cascade_irq(f, name, parent_irq); } else { vmm_host_irq_set_active_callback(fpga_active_irq); } /* This will allocate all valid descriptors in the linear case */ for (i = 0; i < 32; i++) { if (!(valid & (1 << i))) { continue; } vmm_host_irq_set_chip(f->irq_start + i, &f->chip); vmm_host_irq_set_chip_data(f->irq_start + i, f); vmm_host_irq_set_handler(f->irq_start + i, vmm_handle_level_irq); f->used_irqs++; } fpga_irq_id++; }