RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2) { #if __FreeBSD_version >= 900000 cpuset_t Mask; #elif __FreeBSD_version >= 700000 cpumask_t Mask; #endif RTMPARGS Args; /* Will panic if no rendezvousing cpus, so make sure the cpu is online. */ if (!RTMpIsCpuOnline(idCpu)) return VERR_CPU_NOT_FOUND; Args.pfnWorker = pfnWorker; Args.pvUser1 = pvUser1; Args.pvUser2 = pvUser2; Args.idCpu = idCpu; Args.cHits = 0; #if __FreeBSD_version >= 700000 # if __FreeBSD_version >= 900000 CPU_SETOF(idCpu, &Mask); # else Mask = (cpumask_t)1 << idCpu; # endif smp_rendezvous_cpus(Mask, NULL, rtmpOnSpecificFreeBSDWrapper, smp_no_rendezvous_barrier, &Args); #else smp_rendezvous(NULL, rtmpOnSpecificFreeBSDWrapper, NULL, &Args); #endif return Args.cHits == 1 ? VINF_SUCCESS : VERR_CPU_NOT_FOUND; }
RTDECL(int) RTMpOnOthers(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2) { /* Will panic if no rendezvousing cpus, so check up front. */ if (RTMpGetOnlineCount() > 1) { #if __FreeBSD_version >= 900000 cpuset_t Mask; #elif __FreeBSD_version >= 700000 cpumask_t Mask; #endif RTMPARGS Args; Args.pfnWorker = pfnWorker; Args.pvUser1 = pvUser1; Args.pvUser2 = pvUser2; Args.idCpu = RTMpCpuId(); Args.cHits = 0; #if __FreeBSD_version >= 700000 # if __FreeBSD_version >= 900000 Mask = all_cpus; CPU_CLR(curcpu, &Mask); # else Mask = ~(cpumask_t)curcpu; # endif smp_rendezvous_cpus(Mask, NULL, rtmpOnOthersFreeBSDWrapper, smp_no_rendezvous_barrier, &Args); #else smp_rendezvous(NULL, rtmpOnOthersFreeBSDWrapper, NULL, &Args); #endif } return VINF_SUCCESS; }
void smp_rendezvous(void (* setup_func)(void *), void (* action_func)(void *), void (* teardown_func)(void *), void *arg) { smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func, arg); }
static void xcall(cyb_arg_t arg __unused, cpu_t *c, cyc_func_t func, void *param) { cpuset_t cpus; CPU_SETOF(c->cpuid, &cpus); smp_rendezvous_cpus(cpus, smp_no_rendevous_barrier, func, smp_no_rendevous_barrier, param); }
static void xcall(cyb_arg_t arg, cpu_t *c, cyc_func_t func, void *param) { /* * If the target CPU is the current one, just call the * function. This covers the non-SMP case. */ if (c == &solaris_cpu[curcpu]) (*func)(param); else smp_rendezvous_cpus((cpumask_t) (1 << c->cpuid), NULL, func, smp_no_rendevous_barrier, param); }
void dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg) { cpuset_t cpus; if (cpu == DTRACE_CPUALL) cpus = all_cpus; else CPU_SETOF(cpu, &cpus); smp_rendezvous_cpus(cpus, smp_no_rendevous_barrier, func, smp_no_rendevous_barrier, arg); }
void _rm_wlock(struct rmlock *rm) { struct rm_priotracker *prio; struct turnstile *ts; cpuset_t readcpus; if (SCHEDULER_STOPPED()) return; if (rm->lock_object.lo_flags & LO_SLEEPABLE) sx_xlock(&rm->rm_lock_sx); else mtx_lock(&rm->rm_lock_mtx); if (CPU_CMP(&rm->rm_writecpus, &all_cpus)) { /* Get all read tokens back */ readcpus = all_cpus; CPU_NAND(&readcpus, &rm->rm_writecpus); rm->rm_writecpus = all_cpus; /* * Assumes rm->rm_writecpus update is visible on other CPUs * before rm_cleanIPI is called. */ #ifdef SMP smp_rendezvous_cpus(readcpus, smp_no_rendevous_barrier, rm_cleanIPI, smp_no_rendevous_barrier, rm); #else rm_cleanIPI(rm); #endif mtx_lock_spin(&rm_spinlock); while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) { ts = turnstile_trywait(&rm->lock_object); prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL; mtx_unlock_spin(&rm_spinlock); turnstile_wait(ts, prio->rmp_thread, TS_EXCLUSIVE_QUEUE); mtx_lock_spin(&rm_spinlock); } mtx_unlock_spin(&rm_spinlock); } }
RTDECL(int) RTMpPokeCpu(RTCPUID idCpu) { #if __FreeBSD_version >= 900000 cpuset_t Mask; #elif __FreeBSD_version >= 700000 cpumask_t Mask; #endif /* Will panic if no rendezvousing cpus, so make sure the cpu is online. */ if (!RTMpIsCpuOnline(idCpu)) return VERR_CPU_NOT_FOUND; # if __FreeBSD_version >= 900000 CPU_SETOF(idCpu, &Mask); # else Mask = (cpumask_t)1 << idCpu; # endif smp_rendezvous_cpus(Mask, NULL, rtmpFreeBSDPokeCallback, smp_no_rendezvous_barrier, NULL); return VINF_SUCCESS; }