/*ARGSUSED*/ static void lockstat_disable(void *arg, dtrace_id_t id, void *parg) { #pragma unused(arg, id) /* __APPLE__ */ lockstat_probe_t *probe = parg; int i; ASSERT(lockstat_probemap[probe->lsp_probe]); lockstat_probemap[probe->lsp_probe] = 0; lockstat_hot_patch(FALSE, probe->lsp_probe); membar_producer(); /* * See if we have any probes left enabled. */ for (i = 0; i < LS_NPROBES; i++) { if (lockstat_probemap[i]) { /* * This probe is still enabled. We don't need to deal * with waiting for all threads to be out of the * lockstat critical sections; just return. */ return; } } }
/*ARGSUSED*/ static int lockstat_enable(void *arg, dtrace_id_t id, void *parg) { #pragma unused(arg) /* __APPLE__ */ lockstat_probe_t *probe = parg; ASSERT(!lockstat_probemap[probe->lsp_probe]); lockstat_probemap[probe->lsp_probe] = id; membar_producer(); lockstat_hot_patch(TRUE, probe->lsp_probe); membar_producer(); return(0); }
/*ARGSUSED*/ static void lockstat_disable(void *arg, dtrace_id_t id, void *parg) { lockstat_probe_t *probe = parg; int i; ASSERT(lockstat_probemap[probe->lsp_probe]); lockstat_probemap[probe->lsp_probe] = 0; lockstat_hot_patch(); membar_producer(); /* * See if we have any probes left enabled. */ for (i = 0; i < LS_NPROBES; i++) { if (lockstat_probemap[i]) { /* * This probe is still enabled. We don't need to deal * with waiting for all threads to be out of the * lockstat critical sections; just return. */ return; } } /* * The delay() here isn't as cheesy as you might think. We don't * want to busy-loop in the kernel, so we have to give up the * CPU between calls to lockstat_active_threads(); that much is * obvious. But the reason it's a do..while loop rather than a * while loop is subtle. The memory barrier above guarantees that * no threads will enter the lockstat code from this point forward. * However, another thread could already be executing lockstat code * without our knowledge if the update to its t_lockstat field hasn't * cleared its CPU's store buffer. Delaying for one clock tick * guarantees that either (1) the thread will have *ample* time to * complete its work, or (2) the thread will be preempted, in which * case it will have to grab and release a dispatcher lock, which * will flush that CPU's store buffer. Either way we're covered. */ do { delay(1); } while (lockstat_active_threads()); }
/*ARGSUSED*/ static int lockstat_enable(void *arg, dtrace_id_t id, void *parg) { lockstat_probe_t *probe = parg; ASSERT(!lockstat_probemap[probe->lsp_probe]); lockstat_probemap[probe->lsp_probe] = id; membar_producer(); lockstat_hot_patch(); membar_producer(); /* * Immediately generate a record for the lockstat_test mutex * to verify that the mutex hot-patch code worked as expected. */ mutex_enter(&lockstat_test); mutex_exit(&lockstat_test); return (0); }