/* * Common functions */ void smp_message_recv(int msg, struct pt_regs *regs) { atomic_inc(&ipi_recv); switch( msg ) { case PPC_MSG_CALL_FUNCTION: smp_call_function_interrupt(); break; case PPC_MSG_RESCHEDULE: set_need_resched(); break; case PPC_MSG_INVALIDATE_TLB: _tlbia(); break; #ifdef CONFIG_XMON case PPC_MSG_XMON_BREAK: xmon(regs); break; #endif /* CONFIG_XMON */ default: printk("SMP %d: smp_message_recv(): unknown msg %d\n", smp_processor_id(), msg); break; } }
void smp_message_recv(int msg, struct pt_regs *regs) { atomic_inc(&ipi_recv); switch( msg ) { case PPC_MSG_CALL_FUNCTION: #ifdef CONFIG_KDB kdb_smp_regs[smp_processor_id()]=regs; #endif smp_call_function_interrupt(); break; case PPC_MSG_RESCHEDULE: current->need_resched = 1; break; #ifdef CONFIG_XMON case PPC_MSG_XMON_BREAK: /* ToDo: need a nmi way to handle this. Soft disable? */ #if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE) if (dump_ipi_function_ptr) { printk(KERN_ALERT "got dump ipi...\n"); dump_ipi_function_ptr(regs); } else #endif xmon(regs); break; #endif /* CONFIG_XMON */ default: printk("SMP %d: smp_message_recv(): unknown msg %d\n", smp_processor_id(), msg); break; } }
void smp_message_recv(int msg, struct pt_regs *regs) { atomic_inc(&ipi_recv); switch( msg ) { case PPC_MSG_CALL_FUNCTION: smp_call_function_interrupt(); break; case PPC_MSG_RESCHEDULE: current->need_resched = 1; break; #ifdef CONFIG_XMON case PPC_MSG_XMON_BREAK: xmon(regs); break; #endif /* CONFIG_XMON */ #ifdef CONFIG_KDB case PPC_MSG_XMON_BREAK: /* This isn't finished yet, obviously -TAI */ kdb(KDB_REASON_KEYBOARD,0, (kdb_eframe_t) regs); break; #endif default: printk("SMP %d: smp_message_recv(): unknown msg %d\n", smp_processor_id(), msg); break; } }
void abort(void) { #ifdef CONFIG_XMON extern void xmon(void *); xmon(0); #endif machine_restart(NULL); }
void pmac_do_IRQ(struct pt_regs *regs, int cpu, int isfake) { int irq; unsigned long bits = 0; #ifdef __SMP__ /* IPI's are a hack on the powersurge -- Cort */ if ( cpu != 0 ) { if (!isfake) { #ifdef CONFIG_XMON static int xmon_2nd; if (xmon_2nd) xmon(regs); #endif pmac_smp_message_recv(); goto out; } /* could be here due to a do_fake_interrupt call but we don't mess with the controller from the second cpu -- Cort */ goto out; } { unsigned int loops = MAXCOUNT; while (test_bit(0, &global_irq_lock)) { if (smp_processor_id() == global_irq_holder) { printk("uh oh, interrupt while we hold global irq lock!\n"); #ifdef CONFIG_XMON xmon(0); #endif break; } if (loops-- == 0) { printk("do_IRQ waiting for irq lock (holder=%d)\n", global_irq_holder); #ifdef CONFIG_XMON xmon(0); #endif } } } #endif /* __SMP__ */ /* Yeah, I know, this could be a separate do_IRQ function */ if (has_openpic) { irq = openpic_irq(0); if (irq == OPENPIC_VEC_SPURIOUS) { /* Spurious interrupts should never be ack'ed */ ppc_spurious_interrupts++; } else { /* Can this happen ? (comes from CHRP code) */ if (irq < 0) { printk(KERN_DEBUG "Bogus interrupt %d from PC = %lx\n", irq, regs->nip); ppc_spurious_interrupts++; } else { if (!irq_desc[irq].level) openpic_eoi(0); ppc_irq_dispatch_handler( regs, irq ); if (irq_desc[irq].level) openpic_eoi(0); } } return; } for (irq = max_real_irqs - 1; irq > 0; irq -= 32) { int i = irq >> 5; bits = ld_le32(&pmac_irq_hw[i]->flag) | ppc_lost_interrupts[i]; if (bits == 0) continue; irq -= cntlzw(bits); break; } if (irq < 0) { printk(KERN_DEBUG "Bogus interrupt %d from PC = %lx\n", irq, regs->nip); ppc_spurious_interrupts++; } else { ppc_irq_dispatch_handler( regs, irq ); } #ifdef CONFIG_SMP out: #endif /* CONFIG_SMP */ } /* This routine will fix some missing interrupt values in the device tree * on the gatwick mac-io controller used by some PowerBooks */ static void __init pmac_fix_gatwick_interrupts(struct device_node *gw, int irq_base) { struct device_node *node; int count; memset(gatwick_int_pool, 0, sizeof(gatwick_int_pool)); node = gw->child; count = 0; while(node) { /* Fix SCC */ if (strcasecmp(node->name, "escc") == 0) if (node->child) { if (node->child->n_intrs < 3) { node->child->intrs = &gatwick_int_pool[count]; count += 3; } node->child->n_intrs = 3; node->child->intrs[0].line = 15+irq_base; node->child->intrs[1].line = 4+irq_base; node->child->intrs[2].line = 5+irq_base; printk(KERN_INFO "irq: fixed SCC on second controller (%d,%d,%d)\n", node->child->intrs[0].line, node->child->intrs[1].line, node->child->intrs[2].line); } /* Fix media-bay & left SWIM */ if (strcasecmp(node->name, "media-bay") == 0) { struct device_node* ya_node; if (node->n_intrs == 0) node->intrs = &gatwick_int_pool[count++]; node->n_intrs = 1; node->intrs[0].line = 29+irq_base; printk(KERN_INFO "irq: fixed media-bay on second controller (%d)\n", node->intrs[0].line); ya_node = node->child; while(ya_node) { if (strcasecmp(ya_node->name, "floppy") == 0) { if (ya_node->n_intrs < 2) { ya_node->intrs = &gatwick_int_pool[count]; count += 2; } ya_node->n_intrs = 2; ya_node->intrs[0].line = 19+irq_base; ya_node->intrs[1].line = 1+irq_base; printk(KERN_INFO "irq: fixed floppy on second controller (%d,%d)\n", ya_node->intrs[0].line, ya_node->intrs[1].line); } if (strcasecmp(ya_node->name, "ata4") == 0) { if (ya_node->n_intrs < 2) { ya_node->intrs = &gatwick_int_pool[count]; count += 2; } ya_node->n_intrs = 2; ya_node->intrs[0].line = 14+irq_base; ya_node->intrs[1].line = 3+irq_base; printk(KERN_INFO "irq: fixed ide on second controller (%d,%d)\n", ya_node->intrs[0].line, ya_node->intrs[1].line); } ya_node = ya_node->sibling; } } node = node->sibling; } if (count > 10) { printk("WARNING !! Gatwick interrupt pool overflow\n"); printk(" GATWICK_IRQ_POOL_SIZE = %d\n", GATWICK_IRQ_POOL_SIZE); printk(" requested = %d\n", count); } } /* * The PowerBook 3400/2400/3500 can have a combo ethernet/modem * card which includes an ohare chip that acts as a second interrupt * controller. If we find this second ohare, set it up and fix the * interrupt value in the device tree for the ethernet chip. */ static void __init enable_second_ohare(void) { unsigned char bus, devfn; unsigned short cmd; unsigned long addr; int second_irq; struct device_node *irqctrler = find_devices("pci106b,7"); struct device_node *ether; if (irqctrler == NULL || irqctrler->n_addrs <= 0) return; addr = (unsigned long) ioremap(irqctrler->addrs[0].address, 0x40); pmac_irq_hw[1] = (volatile struct pmac_irq_hw *)(addr + 0x20); max_irqs = 64; if (pci_device_loc(irqctrler, &bus, &devfn) == 0) { pmac_pcibios_read_config_word(bus, devfn, PCI_COMMAND, &cmd); cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER; cmd &= ~PCI_COMMAND_IO; pmac_pcibios_write_config_word(bus, devfn, PCI_COMMAND, cmd); } second_irq = irqctrler->intrs[0].line; printk(KERN_INFO "irq: secondary controller on irq %d\n", second_irq); request_irq(second_irq, gatwick_action, SA_INTERRUPT, "interrupt cascade", 0 ); /* Fix interrupt for the modem/ethernet combo controller. The number in the device tree (27) is bogus (correct for the ethernet-only board but not the combo ethernet/modem board). The real interrupt is 28 on the second controller -> 28+32 = 60. */ ether = find_devices("pci1011,14"); if (ether && ether->n_intrs > 0) { ether->intrs[0].line = 60; printk(KERN_INFO "irq: Fixed ethernet IRQ to %d\n", ether->intrs[0].line); } }
/* * This function sends a 'generic call function' IPI to all other CPUs * in the system. * * [SUMMARY] Run a function on all other CPUs. * <func> The function to run. This must be fast and non-blocking. * <info> An arbitrary pointer to pass to the function. * <nonatomic> currently unused. * <wait> If true, wait (atomically) until function has completed on other CPUs. * [RETURNS] 0 on success, else a negative status code. Does not return until * remote CPUs are nearly ready to execute <<func>> or are or have executed. * * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. */ int smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait) { struct call_data_struct data; int ret = -1, cpus = smp_num_cpus-1; int timeout; if (!cpus) return 0; data.func = func; data.info = info; atomic_set(&data.started, 0); data.wait = wait; if (wait) atomic_set(&data.finished, 0); spin_lock_bh(&call_lock); call_data = &data; /* Send a message to all other CPUs and wait for them to respond */ smp_message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION, 0, 0); /* Wait for response */ timeout = 8000000; while (atomic_read(&data.started) != cpus) { HMT_low(); if (--timeout == 0) { printk("smp_call_function on cpu %d: other cpus not responding (%d)\n", smp_processor_id(), atomic_read(&data.started)); #ifdef CONFIG_XMON xmon(0); #endif #ifdef CONFIG_KDB kdb(KDB_REASON_CALL,0, (kdb_eframe_t) 0); #endif #ifdef CONFIG_PPC_ISERIES HvCall_terminateMachineSrc(); #endif goto out; } barrier(); udelay(1); } if (wait) { timeout = 1000000; while (atomic_read(&data.finished) != cpus) { HMT_low(); if (--timeout == 0) { printk("smp_call_function on cpu %d: other cpus not finishing (%d/%d)\n", smp_processor_id(), atomic_read(&data.finished), atomic_read(&data.started)); #ifdef CONFIG_PPC_ISERIES HvCall_terminateMachineSrc(); #endif goto out; } barrier(); udelay(1); } } ret = 0; out: call_data = NULL; HMT_medium(); spin_unlock_bh(&call_lock); return ret; }