void titan_mailbox_irq(void) { int cpu = smp_processor_id(); unsigned long status; switch (cpu) { case 0: status = OCD_READ(RM9000x2_OCD_INTP0STATUS3); OCD_WRITE(RM9000x2_OCD_INTP0CLEAR3, status); if (status & 0x2) smp_call_function_interrupt(); if (status & 0x4) scheduler_ipi(); break; case 1: status = OCD_READ(RM9000x2_OCD_INTP1STATUS3); OCD_WRITE(RM9000x2_OCD_INTP1CLEAR3, status); if (status & 0x2) smp_call_function_interrupt(); if (status & 0x4) scheduler_ipi(); break; } }
/* * Common functions */ void smp_message_recv(int msg, struct pt_regs *regs) { atomic_inc(&ipi_recv); switch( msg ) { case PPC_MSG_CALL_FUNCTION: smp_call_function_interrupt(); break; case PPC_MSG_RESCHEDULE: set_need_resched(); break; case PPC_MSG_INVALIDATE_TLB: _tlbia(); break; #ifdef CONFIG_XMON case PPC_MSG_XMON_BREAK: xmon(regs); break; #endif /* CONFIG_XMON */ default: printk("SMP %d: smp_message_recv(): unknown msg %d\n", smp_processor_id(), msg); break; } }
void smp_message_recv(int msg) { switch(msg) { case PPC_MSG_CALL_FUNCTION: smp_call_function_interrupt(); break; case PPC_MSG_RESCHEDULE: /* XXX Do we have to do this? */ set_need_resched(); break; case PPC_MSG_DEBUGGER_BREAK: if (crash_ipi_function_ptr) { crash_ipi_function_ptr(get_irq_regs()); break; } #ifdef CONFIG_DEBUGGER debugger_ipi(get_irq_regs()); break; #endif /* CONFIG_DEBUGGER */ /* FALLTHROUGH */ default: printk("SMP %d: smp_message_recv(): unknown msg %d\n", smp_processor_id(), msg); break; } }
void smp_message_recv(int msg, struct pt_regs *regs) { atomic_inc(&ipi_recv); switch( msg ) { case PPC_MSG_CALL_FUNCTION: #ifdef CONFIG_KDB kdb_smp_regs[smp_processor_id()]=regs; #endif smp_call_function_interrupt(); break; case PPC_MSG_RESCHEDULE: current->need_resched = 1; break; #ifdef CONFIG_XMON case PPC_MSG_XMON_BREAK: /* ToDo: need a nmi way to handle this. Soft disable? */ #if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE) if (dump_ipi_function_ptr) { printk(KERN_ALERT "got dump ipi...\n"); dump_ipi_function_ptr(regs); } else #endif xmon(regs); break; #endif /* CONFIG_XMON */ default: printk("SMP %d: smp_message_recv(): unknown msg %d\n", smp_processor_id(), msg); break; } }
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) { smp_call_function_interrupt(); return IRQ_HANDLED; }
void smp_message_recv(int msg, struct pt_regs *regs) { atomic_inc(&ipi_recv); switch( msg ) { case PPC_MSG_CALL_FUNCTION: smp_call_function_interrupt(); break; case PPC_MSG_RESCHEDULE: current->need_resched = 1; break; #ifdef CONFIG_XMON case PPC_MSG_XMON_BREAK: xmon(regs); break; #endif /* CONFIG_XMON */ #ifdef CONFIG_KDB case PPC_MSG_XMON_BREAK: /* This isn't finished yet, obviously -TAI */ kdb(KDB_REASON_KEYBOARD,0, (kdb_eframe_t) regs); break; #endif default: printk("SMP %d: smp_message_recv(): unknown msg %d\n", smp_processor_id(), msg); break; } }
void smp_send_call_function_mask(const cpumask_t *mask) { send_IPI_mask(mask, CALL_FUNCTION_VECTOR); if ( cpumask_test_cpu(smp_processor_id(), mask) ) { local_irq_disable(); smp_call_function_interrupt(); local_irq_enable(); } }
// Handle interprocessor messages static irqreturn_t brcm_smp_call_interrupt(int irq, void *dev_id, struct pt_regs *regs) { /* SMP_CALL_FUNCTION */ smp_call_function_interrupt(); // we need to clear the interrupt... { register u32 temp; temp = read_c0_cause(); temp &= ~CAUSEF_IP0; write_c0_cause(temp); } return IRQ_HANDLED; }
void smp_send_call_function_mask(const cpumask_t *mask) { cpumask_t target_mask; cpumask_andnot(&target_mask, mask, cpumask_of(smp_processor_id())); send_SGI_mask(&target_mask, GIC_SGI_CALL_FUNCTION); if ( cpumask_test_cpu(smp_processor_id(), mask) ) { local_irq_disable(); smp_call_function_interrupt(); local_irq_enable(); } }
asmlinkage void titan_mailbox_irq(struct pt_regs *regs) { int cpu = smp_processor_id(); unsigned long status; if (cpu == 0) { status = OCD_READ(RM9000x2_OCD_INTP0STATUS3); OCD_WRITE(RM9000x2_OCD_INTP0CLEAR3, status); } if (cpu == 1) { status = OCD_READ(RM9000x2_OCD_INTP1STATUS3); OCD_WRITE(RM9000x2_OCD_INTP1CLEAR3, status); } if (status & 0x2) smp_call_function_interrupt(); }
void titan_mailbox_irq(void) { int cpu = smp_processor_id(); unsigned long status; switch (cpu) { case 0: status = OCD_READ(RM9000x2_OCD_INTP0STATUS3); OCD_WRITE(RM9000x2_OCD_INTP0CLEAR3, status); if (status & 0x2) smp_call_function_interrupt(); <<<<<<< HEAD if (status & 0x4) scheduler_ipi(); ======= >>>>>>> 296c66da8a02d52243f45b80521febece5ed498a break;
static irqreturn_t mailbox_interrupt(int irq, void *dev_id) { const int coreid = cvmx_get_core_num(); uint64_t action; /* Load the mailbox register to figure out what we're supposed to do */ action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid)) & 0xffff; /* Clear the mailbox to clear the interrupt */ cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action); if (action & SMP_CALL_FUNCTION) smp_call_function_interrupt(); if (action & SMP_RESCHEDULE_YOURSELF) scheduler_ipi(); /* Check if we've been told to flush the icache */ if (action & SMP_ICACHE_FLUSH) asm volatile ("synci 0($0)\n"); return IRQ_HANDLED; }
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) { unsigned int cpu = smp_processor_id(); unsigned int cd_event = 0; unsigned long flags; spin_lock_irqsave(&ra_teststat_lock, flags); cd_event = (*( (volatile u32 *)(RALINK_TESTSTAT) )) & ((0x1UL) << cpu); if(cd_event) (*((volatile u32 *)(RALINK_TESTSTAT))) &= ~cd_event; spin_unlock_irqrestore(&ra_teststat_lock, flags); // FIXME!!! if(cd_event){ ra_percpu_event_handler(); } smp_call_function_interrupt(); return IRQ_HANDLED; }
static irqreturn_t brcmstb_ipi_interrupt(int irq, void *dev_id) { brcmstb_ack_ipi(irq); smp_call_function_interrupt(); return IRQ_HANDLED; }
void call_function_interrupt(struct cpu_user_regs *regs) { ack_APIC_irq(); perfc_incr(ipis); smp_call_function_interrupt(); }