static inline void send_IPI_mask_bitmask(int mask, int vector) { unsigned long cfg; unsigned long flags; local_irq_save_hw(flags); /* * Wait for idle. */ apic_wait_icr_idle(); /* * prepare target chip field */ cfg = __prepare_ICR2(mask); apic_write_around(APIC_ICR2, cfg); /* * program the ICR */ cfg = __prepare_ICR(0, vector); /* * Send the IPI. The write to APIC_ICR fires this off. */ apic_write_around(APIC_ICR, cfg); local_irq_restore_hw(flags); }
static int __ipipe_hard_cpuid(void) { unsigned long flags; int cpu; local_irq_save_hw(flags); cpu = __ipipe_apicid_2_cpu[GET_APIC_ID(apic_read(APIC_ID))]; local_irq_restore_hw(flags); return cpu; }
static inline void send_IPI_mask_sequence(int mask, int vector) { unsigned long cfg, flags; unsigned int query_cpu, query_mask; /* * Hack. The clustered APIC addressing mode doesn't allow us to send * to an arbitrary mask, so I do a unicasts to each CPU instead. This * should be modified to do 1 message per cluster ID - mbligh */ local_irq_save_hw(flags); for (query_cpu = 0; query_cpu < NR_CPUS; ++query_cpu) { query_mask = 1 << query_cpu; if (query_mask & mask) { /* * Wait for idle. */ apic_wait_icr_idle(); /* * prepare target chip field */ if(clustered_apic_mode == CLUSTERED_APIC_XAPIC) cfg = __prepare_ICR2(cpu_to_physical_apicid(query_cpu)); else cfg = __prepare_ICR2(cpu_to_logical_apicid(query_cpu)); apic_write_around(APIC_ICR2, cfg); /* * program the ICR */ cfg = __prepare_ICR(0, vector); /* * Send the IPI. The write to APIC_ICR fires this off. */ apic_write_around(APIC_ICR, cfg); } } local_irq_restore_hw(flags); }
int __ipipe_syscall_root(struct pt_regs *regs) { unsigned long flags; int ret; /* * This routine either returns: * 0 -- if the syscall is to be passed to Linux; * >0 -- if the syscall should not be passed to Linux, and no * tail work should be performed; * <0 -- if the syscall should not be passed to Linux but the * tail work has to be performed (for handling signals etc). */ if (!__ipipe_syscall_watched_p(current, regs->orig_ax) || !__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL)) return 0; ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs); local_irq_save_hw(flags); if (current->ipipe_flags & PF_EVTRET) { current->ipipe_flags &= ~PF_EVTRET; __ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs); } if (!ipipe_root_domain_p) return 1; /* * If allowed, sync pending VIRQs before _TIF_NEED_RESCHED is * tested. */ if (__ipipe_ipending_p(ipipe_root_cpudom_ptr())) __ipipe_sync_pipeline(); if (!ret) local_irq_restore_hw(flags); return -ret; }
int __ipipe_send_ipi(unsigned ipi, cpumask_t cpumask) { unsigned long flags; int self; local_irq_save_hw(flags); self = cpu_isset(ipipe_processor_id(),cpumask); cpu_clear(ipipe_processor_id(), cpumask); if (!cpus_empty(cpumask)) apic->send_IPI_mask(&cpumask, ipipe_apic_irq_vector(ipi)); if (self) ipipe_trigger_irq(ipi); local_irq_restore_hw(flags); return 0; }
/* * ipipe_trigger_irq() -- Push the interrupt at front of the pipeline * just like if it has been actually received from a hw source. Also * works for virtual interrupts. */ int ipipe_trigger_irq(unsigned int irq) { struct pt_regs regs; unsigned long flags; #ifdef CONFIG_IPIPE_DEBUG if (irq >= IPIPE_NR_IRQS) return -EINVAL; if (ipipe_virtual_irq_p(irq)) { if (!test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map)) return -EINVAL; } else if (irq_to_desc(irq) == NULL) return -EINVAL; #endif local_irq_save_hw(flags); regs.flags = flags; regs.orig_ax = irq; /* Positive value - IRQ won't be acked */ regs.cs = __KERNEL_CS; __ipipe_handle_irq(®s); local_irq_restore_hw(flags); return 1; }