/* * Handle an IPI sent to this processor. */ intrmask_t smp_handle_ipi(struct trapframe *frame) { cpumask_t cpumask; /* This cpu mask */ u_int ipi, ipi_bitmap; ipi_bitmap = atomic_readandclear_int(PCPU_PTR(pending_ipis)); cpumask = PCPU_GET(cpumask); CTR1(KTR_SMP, "smp_handle_ipi(), ipi_bitmap=%x", ipi_bitmap); while (ipi_bitmap) { /* * Find the lowest set bit. */ ipi = ipi_bitmap & ~(ipi_bitmap - 1); ipi_bitmap &= ~ipi; switch (ipi) { case IPI_INVLTLB: CTR0(KTR_SMP, "IPI_INVLTLB"); break; case IPI_RENDEZVOUS: CTR0(KTR_SMP, "IPI_RENDEZVOUS"); smp_rendezvous_action(); break; case IPI_AST: CTR0(KTR_SMP, "IPI_AST"); break; case IPI_STOP: /* * IPI_STOP_HARD is mapped to IPI_STOP so it is not * necessary to add it in the switch. */ CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD"); atomic_set_int(&stopped_cpus, cpumask); while ((started_cpus & cpumask) == 0) ; atomic_clear_int(&started_cpus, cpumask); atomic_clear_int(&stopped_cpus, cpumask); break; } } return CR_INT_IPI; }
int ia64_highfp_save_ipi(void) { struct thread *td; mtx_lock_spin(&ia64_highfp_mtx); td = PCPU_GET(fpcurthread); if (td != NULL) { KASSERT(td->td_pcb->pcb_fpcpu == pcpup, ("td->td_pcb->pcb_fpcpu != pcpup")); save_high_fp(&td->td_pcb->pcb_high_fp); td->td_frame->tf_special.psr |= IA64_PSR_DFH; td->td_pcb->pcb_fpcpu = NULL; PCPU_SET(fpcurthread, NULL); } wakeup(PCPU_PTR(fpcurthread)); mtx_unlock_spin(&ia64_highfp_mtx); return ((td != NULL) ? 1 : 0); }
void smp_init_secondary(u_int32_t cpuid) { if (cpuid >= MAXCPU) panic ("cpu id exceeds MAXCPU\n"); /* tlb init */ R4K_SetWIRED(0); R4K_TLBFlush(num_tlbentries); R4K_SetWIRED(VMWIRED_ENTRIES); MachSetPID(0); Mips_SyncCache(); mips_cp0_status_write(0); while (!aps_ready) ; mips_sync(); mips_sync(); /* Initialize curthread. */ KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); PCPU_SET(curthread, PCPU_GET(idlethread)); mtx_lock_spin(&ap_boot_mtx); smp_cpus++; CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid)); /* Build our map of 'other' CPUs. */ PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask)); printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid)); if (smp_cpus == mp_ncpus) { smp_started = 1; smp_active = 1; } mtx_unlock_spin(&ap_boot_mtx); while (smp_started == 0) ; /* nothing */ /* Enable Interrupt */ mips_cp0_status_write(SR_INT_ENAB); /* ok, now grab sched_lock and enter the scheduler */ mtx_lock_spin(&sched_lock); /* * Correct spinlock nesting. The idle thread context that we are * borrowing was created so that it would start out with a single * spin lock (sched_lock) held in fork_trampoline(). Since we've * explicitly acquired locks in this function, the nesting count * is now 2 rather than 1. Since we are nested, calling * spinlock_exit() will simply adjust the counts without allowing * spin lock using code to interrupt us. */ spinlock_exit(); KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count")); binuptime(PCPU_PTR(switchtime)); PCPU_SET(switchticks, ticks); /* kick off the clock on this cpu */ mips_start_timer(); cpu_throw(NULL, choosethread()); /* doesn't return */ panic("scheduler returned us to %s", __func__); }