/* * Initialize handlers for the set of interrupts caused by hardware errors * and power system events. */ void init_ras_IRQ(void) { struct device_node *np; unsigned int *ireg, len, i; if((np = find_path_device("/event-sources/internal-errors")) && (ireg = (unsigned int *)get_property(np, "open-pic-interrupt", &len))) { for(i=0; i<(len / sizeof(*ireg)); i++) { request_irq(virt_irq_create_mapping(*(ireg)) + NUM_8259_INTERRUPTS, &ras_error_interrupt, 0, "RAS_ERROR", NULL); ireg++; } } if((np = find_path_device("/event-sources/epow-events")) && (ireg = (unsigned int *)get_property(np, "open-pic-interrupt", &len))) { for(i=0; i<(len / sizeof(*ireg)); i++) { request_irq(virt_irq_create_mapping(*(ireg)) + NUM_8259_INTERRUPTS, &ras_epow_interrupt, 0, "RAS_EPOW", NULL); ireg++; } } }
static void request_ras_irqs(struct device_node *np, char *propname, irqreturn_t (*handler)(int, void *, struct pt_regs *), const char *name) { unsigned int *ireg, len, i; int virq, n_intr; ireg = (unsigned int *)get_property(np, propname, &len); if (ireg == NULL) return; n_intr = prom_n_intr_cells(np); len /= n_intr * sizeof(*ireg); for (i = 0; i < len; i++) { virq = virt_irq_create_mapping(*ireg); if (virq == NO_IRQ) { printk(KERN_ERR "Unable to allocate interrupt " "number for %s\n", np->full_name); return; } if (request_irq(irq_offset_up(virq), handler, 0, name, NULL)) { printk(KERN_ERR "Unable to request interrupt %d for " "%s\n", irq_offset_up(virq), np->full_name); return; } ireg += n_intr; } }
/* * Initialize handlers for the set of interrupts caused by hardware errors * and power system events. */ static int __init init_ras_IRQ(void) { struct device_node *np; unsigned int *ireg, len, i; int virq; if ((np = of_find_node_by_path("/event-sources/internal-errors")) && (ireg = (unsigned int *)get_property(np, "open-pic-interrupt", &len))) { for (i=0; i<(len / sizeof(*ireg)); i++) { virq = virt_irq_create_mapping(*(ireg)); if (virq == NO_IRQ) { printk(KERN_ERR "Unable to allocate interrupt " "number for %s\n", np->full_name); break; } request_irq(irq_offset_up(virq), ras_error_interrupt, 0, "RAS_ERROR", NULL); ireg++; } } of_node_put(np); if ((np = of_find_node_by_path("/event-sources/epow-events")) && (ireg = (unsigned int *)get_property(np, "open-pic-interrupt", &len))) { for (i=0; i<(len / sizeof(*ireg)); i++) { virq = virt_irq_create_mapping(*(ireg)); if (virq == NO_IRQ) { printk(KERN_ERR "Unable to allocate interrupt " " number for %s\n", np->full_name); break; } request_irq(irq_offset_up(virq), ras_epow_interrupt, 0, "RAS_EPOW", NULL); ireg++; } } of_node_put(np); return 1; }
static int __init hvsi_console_init(void) { struct device_node *vty; hvsi_wait = poll_for_state; /* no irqs yet; must poll */ /* search device tree for vty nodes */ for (vty = of_find_compatible_node(NULL, "serial", "hvterm-protocol"); vty != NULL; vty = of_find_compatible_node(vty, "serial", "hvterm-protocol")) { struct hvsi_struct *hp; uint32_t *vtermno; uint32_t *irq; vtermno = (uint32_t *)get_property(vty, "reg", NULL); irq = (uint32_t *)get_property(vty, "interrupts", NULL); if (!vtermno || !irq) continue; if (hvsi_count >= MAX_NR_HVSI_CONSOLES) { of_node_put(vty); break; } hp = &hvsi_ports[hvsi_count]; INIT_WORK(&hp->writer, hvsi_write_worker, hp); INIT_WORK(&hp->handshaker, hvsi_handshaker, hp); init_waitqueue_head(&hp->emptyq); init_waitqueue_head(&hp->stateq); spin_lock_init(&hp->lock); hp->index = hvsi_count; hp->inbuf_end = hp->inbuf; hp->state = HVSI_CLOSED; hp->vtermno = *vtermno; hp->virq = virt_irq_create_mapping(irq[0]); if (hp->virq == NO_IRQ) { printk(KERN_ERR "%s: couldn't create irq mapping for 0x%x\n", __FUNCTION__, hp->virq); continue; } else hp->virq = irq_offset_up(hp->virq); hvsi_count++; } if (hvsi_count) register_console(&hvsi_con_driver); return 0; }
void xics_init_IRQ( void ) { int i; unsigned long intr_size = 0; struct device_node *np; uint *ireg, ilen, indx=0; ibm_get_xive = rtas_token("ibm,get-xive"); ibm_set_xive = rtas_token("ibm,set-xive"); ibm_int_off = rtas_token("ibm,int-off"); np = find_type_devices("PowerPC-External-Interrupt-Presentation"); if (!np) { printk(KERN_WARNING "Can't find Interrupt Presentation\n"); udbg_printf("Can't find Interrupt Presentation\n"); while (1); } nextnode: ireg = (uint *)get_property(np, "ibm,interrupt-server-ranges", 0); if (ireg) { /* * set node starting index for this node */ indx = *ireg; } ireg = (uint *)get_property(np, "reg", &ilen); if (!ireg) { printk(KERN_WARNING "Can't find Interrupt Reg Property\n"); udbg_printf("Can't find Interrupt Reg Property\n"); while (1); } while (ilen) { inodes[indx].addr = (unsigned long long)*ireg++ << 32; ilen -= sizeof(uint); inodes[indx].addr |= *ireg++; ilen -= sizeof(uint); inodes[indx].size = (unsigned long long)*ireg++ << 32; ilen -= sizeof(uint); inodes[indx].size |= *ireg++; ilen -= sizeof(uint); indx++; if (indx >= NR_CPUS) break; } np = np->next; if ((indx < NR_CPUS) && np) goto nextnode; /* Find the server numbers for the boot cpu. */ for (np = find_type_devices("cpu"); np; np = np->next) { ireg = (uint *)get_property(np, "reg", &ilen); if (ireg && ireg[0] == hard_smp_processor_id()) { ireg = (uint *)get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen); i = ilen / sizeof(int); if (ireg && i > 0) { default_server = ireg[0]; default_distrib_server = ireg[i-1]; /* take last element */ } break; } } intr_base = inodes[0].addr; intr_size = (ulong)inodes[0].size; np = find_type_devices("interrupt-controller"); if (!np) { printk(KERN_WARNING "xics: no ISA Interrupt Controller\n"); xics_irq_8259_cascade = -1; } else { ireg = (uint *) get_property(np, "interrupts", 0); if (!ireg) { printk(KERN_WARNING "Can't find ISA Interrupts Property\n"); udbg_printf("Can't find ISA Interrupts Property\n"); while (1); } xics_irq_8259_cascade_real = *ireg; xics_irq_8259_cascade = virt_irq_create_mapping(xics_irq_8259_cascade_real); } if (naca->platform == PLATFORM_PSERIES) { #ifdef CONFIG_SMP for (i = 0; i < naca->processorCount; ++i) { xics_info.per_cpu[i] = __ioremap((ulong)inodes[get_hard_smp_processor_id(i)].addr, (ulong)inodes[get_hard_smp_processor_id(i)].size, _PAGE_NO_CACHE); } #else xics_info.per_cpu[0] = __ioremap((ulong)intr_base, intr_size, _PAGE_NO_CACHE); #endif /* CONFIG_SMP */ #ifdef CONFIG_PPC_PSERIES /* actually iSeries does not use any of xics...but it has link dependencies * for now, except this new one... */ } else if (naca->platform == PLATFORM_PSERIES_LPAR) { ops = &pSeriesLP_ops; #endif } xics_8259_pic.enable = i8259_pic.enable; xics_8259_pic.disable = i8259_pic.disable; for (i = 0; i < 16; ++i) irq_desc[i].handler = &xics_8259_pic; for (; i < NR_IRQS; ++i) irq_desc[i].handler = &xics_pic; ops->cppr_info(0, 0xff); iosync(); if (xics_irq_8259_cascade != -1) { if (request_irq(xics_irq_8259_cascade + XICS_IRQ_OFFSET, no_action, 0, "8259 cascade", 0)) printk(KERN_ERR "xics_init_IRQ: couldn't get 8259 cascade\n"); i8259_init(); } #ifdef CONFIG_SMP real_irq_to_virt_map[XICS_IPI] = virt_irq_to_real_map[XICS_IPI] = XICS_IPI; request_irq(XICS_IPI + XICS_IRQ_OFFSET, xics_ipi_action, 0, "IPI", 0); irq_desc[XICS_IPI+XICS_IRQ_OFFSET].status |= IRQ_PER_CPU; #endif }