/* * Notify the current process (p) that it has a signal pending, * process as soon as possible. */ void signotify(struct proc *p) { aston(p); #ifdef MULTIPROCESSOR if (p->p_cpu != curcpu() && p->p_cpu != NULL) x86_send_ipi(p->p_cpu, X86_IPI_NOP); #endif }
void db_stopcpu(int cpu) { mtx_enter(&ddb_mp_mutex); if (cpu != cpu_number() && cpu_info[cpu] != NULL && cpu_info[cpu]->ci_ddb_paused != CI_DDB_STOPPED) { cpu_info[cpu]->ci_ddb_paused = CI_DDB_SHOULDSTOP; mtx_leave(&ddb_mp_mutex); x86_send_ipi(cpu_info[cpu], X86_IPI_DDB); } else { mtx_leave(&ddb_mp_mutex); } }
/* * Save l's FPU state, which may be on this processor or another processor. * It may take some time, so we avoid disabling preemption where possible. * Caller must know that the target LWP is stopped, otherwise this routine * may race against it. */ void fpusave_lwp(struct lwp *l, bool save) { struct cpu_info *oci; struct pcb *pcb; int s, spins, ticks; spins = 0; ticks = hardclock_ticks; for (;;) { s = splhigh(); pcb = lwp_getpcb(l); oci = pcb->pcb_fpcpu; if (oci == NULL) { splx(s); break; } if (oci == curcpu()) { KASSERT(oci->ci_fpcurlwp == l); fpusave_cpu(save); splx(s); break; } splx(s); #ifdef XEN if (xen_send_ipi(oci, XEN_IPI_SYNCH_FPU) != 0) { panic("xen_send_ipi(%s, XEN_IPI_SYNCH_FPU) failed.", cpu_name(oci)); } #else /* XEN */ x86_send_ipi(oci, X86_IPI_SYNCH_FPU); #endif while (pcb->pcb_fpcpu == oci && ticks == hardclock_ticks) { x86_pause(); spins++; } if (spins > 100000000) { panic("fpusave_lwp: did not"); } } if (!save) { /* Ensure we restart with a clean slate. */ l->l_md.md_flags &= ~MDL_USEDFPU; } }
int db_enter_ddb(void) { int i; mtx_enter(&ddb_mp_mutex); /* If we are first in, grab ddb and stop all other CPUs */ if (ddb_state == DDB_STATE_NOT_RUNNING) { ddb_active_cpu = cpu_number(); ddb_state = DDB_STATE_RUNNING; curcpu()->ci_ddb_paused = CI_DDB_INDDB; mtx_leave(&ddb_mp_mutex); for (i = 0; i < MAXCPUS; i++) { if (cpu_info[i] != NULL && i != cpu_number() && cpu_info[i]->ci_ddb_paused != CI_DDB_STOPPED) { cpu_info[i]->ci_ddb_paused = CI_DDB_SHOULDSTOP; x86_send_ipi(cpu_info[i], X86_IPI_DDB); } } return (1); } /* Leaving ddb completely. Start all other CPUs and return 0 */ if (ddb_active_cpu == cpu_number() && ddb_state == DDB_STATE_EXITING) { for (i = 0; i < MAXCPUS; i++) { if (cpu_info[i] != NULL) { cpu_info[i]->ci_ddb_paused = CI_DDB_RUNNING; } } mtx_leave(&ddb_mp_mutex); return (0); } /* We're switching to another CPU. db_ddbproc_cmd() has made sure * it is waiting for ddb, we just have to set ddb_active_cpu. */ if (ddb_active_cpu == cpu_number() && db_switch_cpu) { curcpu()->ci_ddb_paused = CI_DDB_SHOULDSTOP; db_switch_cpu = 0; ddb_active_cpu = db_switch_to_cpu; cpu_info[db_switch_to_cpu]->ci_ddb_paused = CI_DDB_ENTERDDB; } /* Wait until we should enter ddb or resume */ while (ddb_active_cpu != cpu_number() && curcpu()->ci_ddb_paused != CI_DDB_RUNNING) { if (curcpu()->ci_ddb_paused == CI_DDB_SHOULDSTOP) curcpu()->ci_ddb_paused = CI_DDB_STOPPED; mtx_leave(&ddb_mp_mutex); /* Busy wait without locking, we'll confirm with lock later */ while (ddb_active_cpu != cpu_number() && curcpu()->ci_ddb_paused != CI_DDB_RUNNING) CPU_BUSY_CYCLE(); mtx_enter(&ddb_mp_mutex); } /* Either enter ddb or exit */ if (ddb_active_cpu == cpu_number() && ddb_state == DDB_STATE_RUNNING) { curcpu()->ci_ddb_paused = CI_DDB_INDDB; mtx_leave(&ddb_mp_mutex); return (1); } else { mtx_leave(&ddb_mp_mutex); return (0); } }